diff --git a/.gitignore b/.gitignore deleted file mode 100644 index b6e47617..00000000 --- a/.gitignore +++ /dev/null @@ -1,129 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -pip-wheel-metadata/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -.python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 9247e23f..00000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,62 +0,0 @@ -# How to contribute to Vector AI? - -Everyone is welcome to contribute, and we value everybody's contribution. Code -is thus not the only way to help the community. Answering questions, helping -others, reaching out and improving the documentations are immensely valuable to -the community. - -It also helps us if you spread the word: reference the library from blog posts -on the awesome projects it made possible, shout out on Twitter/Reddit/Kaggle -every time it has helped you, or simply star the repo to say "thank you". - -# How To Add Your Model To Vector Hub - -We have written a simple 7-step guide to help you add your models here if you have trained them! This should take approximately 30 minutes - 1 hour. Let us know at dev@vctr.ai if you need any help. - -1. Fork the project. - -2. Identify the minimum requirements for your model, identify the associated module and then add them to the MODEL_REQUIREMENTS in vectorhub/model_dict. - -3. Write a brief description about what your model involves. - -4. Create a new branch called new_model/____2vec, replace ___ with the model/domain etc. - -5. Identify which directory your model should fall under. Here is a basic directory outline. - -|____ encoders -|________ audio -|________ image -|________ text -|____ bi_encoders -|________ qa -If you believe your model falls under a new category than we recommend making a new directory! - -6. Once you identify the requirements, find the associated module or create a new one if required. - -Use the following code as a base for any new models and add an encode and bulk_encode method. Both should return lists. -``` - from ....import_utils import * - # Import dictionary for model requirements - from ....models_dict import MODEL_REQUIREMENTS - # Add dependencies in if-statement to avoid import breaks in the library - if is_all_dependency_installed(MODEL_REQUIREMENTS['text-bi-encoder-tfhub-use-qa']): - # add imports here - import bert - import numpy as np - import tensorflow as tf - import tensorflow_hub as hub - import tensorflow_text - from typing import List - # This decorator returns a default vector in case of an error - from ....base import catch_vector_errors - # Base class that provides basic utilities - from ..base import BaseTextText2Vec - - class USEMultiQA2Vec(BaseTextText2Vec): - ... - # Add decorator in case encoding errors and we need a dummy vector. - @catch_vector_errors - def encode(self, text): - pass -``` -7. Submit a PR! \ No newline at end of file diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index a71830b8..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1,3 +0,0 @@ -include LICENSE -include extra_requirements.json -recursive-include vectorhub *.md diff --git a/README.md b/README.md index ba99ec56..cbace019 100644 --- a/README.md +++ b/README.md @@ -1,325 +1,10 @@

-(Vectorhub is depreciated, and no longer maintained. We recommend using Sentence Transformer, TFHub and Huggingface directly. If you are looking to vectorize 100million+ data in a parallelized fashion, check out https://tryrelevance.com ) +This repository is depreciated, and no longer maintained. If you are looking to learn about state-of-the-art techniques and models for turning data into vector embeddings, we recommend visiting our friends at VectorHub.

-

- - - -
-

-
-

- - Release - - - Website - - - Hub - - - Discord - -

- -

-Vector Hub is a library for publication, discovery, and consumption of State-of-the-art models to turn data into vectors. (Text2Vec, Image2Vec, Video2Vec, Face2Vec, Bert2Vec, Inception2Vec, Code2Vec, LegalBert2Vec, etc) -

- - ---- -

- - - -

-
- - -There are many ways to extract vectors from data. This library aims to bring in all the state of the art models in a simple manner to vectorise your data easily. - -Vector Hub provides: -- A low barrier of entry for practitioners (using common methods) -- Vectorise rich and complex data types like: text, image, audio, etc in 3 lines of code -- Retrieve and find information about a model -- An easy way to handle dependencies easily for different models -- Universal format of installation and encoding (using a simple `encode` method). - -In order to provide an easy way for practitioners to quickly experiment, research and build new models and feature vectors, we provide a streamlined way to obtain vectors through our universal `encode` API. - -Every model has the following: -- `encode` allows you to turn raw data into a vector -- `bulk_encode` allows you to turn multiple objects into multiple vectors -- `encode_documents` returns a list of dictionaries with with an encoded field - -For bi-modal models: -Question Answering encoders will have: -- `encode_question` -- `encode_answer` -- `bulk_encode_question` -- `bulk_encode_answer` - -Text Image Bi-encoders will have: -- `encode_image` -- `encode_text` -- `bulk_encode_image` -- `bulk_encode_text` - - -There are thousands of _____2Vec models across different use cases/domains. Vectorhub allows people to aggregate their work and share it with the community. - ---- - -## Powered By Relevance AI - Vector Experimentation Platform - -Relevance AI is the vector platform for rapid experimentation. Launch great vector-based applications with flexible developer tools for storing, experimenting and deploying vectors. - -[Check out our Github repository here!](https://github.com/RelevanceAI/RelevanceAI) - - -![Github Banner](https://github.com/RelevanceAI/RelevanceAI/blob/development/assets/github_banner.png) - ---- - -## Quickstart: - -[Intro to Vectors](https://github.com/vector-ai/vectorhub#what-are-vectors) | [Model Hub](https://hub.getvectorai.com/) | [Google Colab Quickstart](https://go.vctr.ai/vectorhub-colab) | [Python Documentation](https://go.vctr.ai/vectorhub-docs) - ---- - -## Installation: -To get started quickly install vectorhub: - -``` -pip install vectorhub -``` - -Alternatively if you require more up-to-date models/features and are okay if it is not fully stable, you can install the nightly version of VectorHub using: -``` -pip install vectorhub-nightly -``` - -After this, our built-in dependency manager will tell you what to install when you instantiate -a model. The main types of installation options can be found here: https://hub.getvectorai.com/ - -To install different types of models: -``` -# To install transformer requirements -pip install vectorhub[text-encoder-transformers] -``` - -To install all models at once (note: this can take a while! We recommend searching for an interesting model on the website such as USE2Vec or BitMedium2Vec and following the installation line or see examples below.) -``` -pip install vectorhub[all] -``` - -We recommend activating a new virtual environment and then installing using the following: - -``` -python3 -m pip install virtualenv -python3 -m virtualenv env -source env/bin/activate -python3 -m pip install --upgrade pip -python3 -m pip install vectorhub[all] -``` - -### Updates - -#### Version 1.4 - -Previous issues with batch-processing: - -If bulk fed in, would cause bulk error when really only 1 in 15 inputs were causing errors. Lack of reliability in bulk_encode meant most of the time bulk_encode was just a list comprehension. This meant we lost any speed enhancements we could be getting as we had to feed it through matrices every time. - -The new design now lets us get the most out of multiple tensor inputs. - -

- - - -

-
- - - -[Google's Big Image Transfer model](https://blog.tensorflow.org/2020/05/bigtransfer-bit-state-of-art-transfer-learning-computer-vision.html) - -``` -from vectorhub.encoders.image.tfhub import BitSmall2Vec -image_encoder = BitSmall2Vec() -image_encoder.encode('https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_92x30dp.png') -[0.47, 0.83, 0.148, ...] -``` -[Google's BERT model](https://blog.google/products/search/search-language-understanding-bert/) -``` -from vectorhub.encoders.text.tfhub import Bert2Vec -text_encoder = Bert2Vec() -text_encoder.encode('This is sparta!') -[0.47, 0.83, 0.148, ...] -``` - -[Google's USE QA model](https://tfhub.dev/google/universal-sentence-encoder-qa/3) -``` -from vectorhub.bi_encoders.text.tfhub import UseQA2Vec -text_encoder = UseQA2Vec() -text_encoder.encode_question('Who is sparta!') -[0.47, 0.83, 0.148, ...] -text_encoder.encode_answer('Sparta!') -[0.47, 0.83, 0.148, ...] -``` - -[HuggingFace Transformer's Albert](https://github.com/huggingface/transformers) - -``` -from vectorhub.encoders.text import Transformer2Vec -text_encoder = Transformer2Vec('albert-base-v2') -text_encoder.encode('This is sparta!') -[0.47, 0.83, 0.148, ...] -``` -[Facebook Dense Passage Retrieval](https://github.com/facebookresearch/DPR) -``` -from vectorhub.bi_encoders.qa.torch_transformers import DPR2Vec -text_encoder = DPR2Vec() -text_encoder.encode_question('Who is sparta!') -[0.47, 0.83, 0.148, ...] -text_encoder.encode_answer('Sparta!') -[0.47, 0.83, 0.148, ...] -``` - --- +Additional information: -### Index and search your vectors easily on the cloud using 1 line of code! - -``` -#pip install vectorhub[encoders-text-tfhub] -from vectorhub.encoders.text.tfhub import USE2VEc -encoder = USE2Vec() - -# You can request an api_key simply by using your username and email. -username = '' -email = '' -api_key = encoder.request_api_key(username, email) - -# Index in 1 line of code -items = ['dogs', 'toilet', 'paper', 'enjoy walking'] -encoder.add_documents(user, api_key, items) - -# Search in 1 line of code and get the most similar results. -encoder.search('basin') -``` - -Add metadata to your search (information about your vectors) - -``` -# Add the number of letters of each word -metadata = [7, 6, 5, 12] -encoder.add_documents(user, api_key, items=items, metadata=metadata) -``` - -#### Using a document-orientated-approach instead: - -``` -from vectorhub.encoders.text import Transformer2Vec -encoder = Transformer2Vec('bert-base-uncased') - -from vectorai import ViClient -vi_client = ViClient(username, api_key) -docs = vi_client.create_sample_documents(10) -vi_client.insert_documents('collection_name_here', docs, models={'color': encoder.encode}) - -# Now we can search through our collection -vi_client.search('collection_name_here', field='color_vector_', vector=encoder.encode('purple')) -``` - ---- - -### Easily access information with your model! - -``` -# If you want to additional information about the model, you can access the information below: -text_encoder.definition.repo -text_encoder.definition.description -# If you want all the information in a dictionary, you can call: -text_encoder.definition.create_dict() # returns a dictionary with model id, description, paper, etc. -``` - ---- - -### Turn Off Error-Catching - -By default, if encoding errors, it returns a vector filled with 1e-7 so that if you are encoding and then inserting then it errors out. -However, if you want to turn off automatic error-catching in VectorHub, simply run: - -``` -import vectorhub -vectorhub.options.set_option('catch_vector_errors', False) -``` - -If you want to turn it back on again, run: -``` -vectorhub.options.set_option('catch_vector_errors', True) -``` - ---- - -### Instantiate our auto_encoder class as such and use any of the models! - -``` -from vectorhub.auto_encoder import AutoEncoder -encoder = AutoEncoder.from_model('text/bert') -encoder.encode("Hello vectorhub!") -[0.47, 0.83, 0.148, ...] -``` - -You can choose from our list of models: -``` -['text/albert', 'text/bert', 'text/labse', 'text/use', 'text/use-multi', 'text/use-lite', 'text/legal-bert', 'audio/fairseq', 'audio/speech-embedding', 'audio/trill', 'audio/trill-distilled', 'audio/vggish', 'audio/yamnet', 'audio/wav2vec', 'image/bit', 'image/bit-medium', 'image/inception', 'image/inception-v2', 'image/inception-v3', 'image/inception-resnet', 'image/mobilenet', 'image/mobilenet-v2', 'image/resnet', 'image/resnet-v2', 'qa/use-multi-qa', 'qa/use-qa', 'qa/dpr', 'qa/lareqa-qa'] -``` -## What are Vectors? -Common Terminologys when operating with Vectors: -- Vectors (aka. Embeddings, Encodings, Neural Representation) ~ It is a list of numbers to represent a piece of data. - E.g. the vector for the word "king" using a Word2Vec model is [0.47, 0.83, 0.148, ...] -- ____2Vec (aka. Models, Encoders, Embedders) ~ Turns data into vectors e.g. Word2Vec turns words into vector - -

- -
-

- - -### How can I use vectors? - -Vectors have a broad range of applications. The most common use case is to perform semantic vector search and analysing the topics/clusters using vector analytics. - -If you are interested in these applications, take a look at [Vector AI](https://github.com/vector-ai/vectorai). - -### How can I obtain vectors? -- Taking the outputs of layers from deep learning models -- Data cleaning, such as one hot encoding labels -- Converting graph representations to vectors - -### How To Upload Your 2Vec Model - -[Read here if you would like to contribute your model!](https://vector-ai.github.io/vectorhub/how_to_add_a_model.html) - -## Philosophy - -The goal of VectorHub is to provide a flexible yet comprehensive framework that allows people to easily be able to turn their data into vectors in whatever form the data can be in. While our focus is largely on simplicity, customisation should always be an option and the level of abstraction is always up model-uploader as long as the reason is justified. For example - with text, we chose to keep the encoding at the text level as opposed to the token level because selection of text should not be applied at the token level so practitioners are aware of what texts go into the actual vectors (i.e. instead of ignoring a '[next][SEP][wo][##rd]', we are choosing to ignore 'next word' explicitly. We think this will allow practitioners to focus better on what should matter when it comes to encoding. - -Similarly, when we are turning data into vectors, we convert to native Python objects. The decision for this is to attempt to remove as many dependencies as possible once the vectors are created - specifically those of deep learning frameworks such as Tensorflow/PyTorch. This is to allow other frameworks to be built on top of it. - -## Team - -This library is maintained by the Relevance AI - your go-to solution for data science tooling with tvectors. If you are interested in using our API for vector search, visit https://relevance.ai or if you are interested in using API, check out https://relevance.ai its free for public research and open source. - -### Credit: - -This library wouldn't exist if it weren't for the following libraries and the incredible machine learning community that releases their state-of-the-art models: +This repository was first set up by RelevanceAI - a platform to build and deploy AI apps and agents. -1. https://github.com/huggingface/transformers -2. https://github.com/tensorflow/hub -3. https://github.com/pytorch/pytorch -4. Word2Vec image - Alammar, Jay (2018). The Illustrated Transformer [Blog post]. Retrieved from https://jalammar.github.io/illustrated-transformer/ -5. https://github.com/UKPLab/sentence-transformers +VectorHub is a free and open-source educational platform to learn how to build information retrieval and feature engineering powered by vector embeddings. VectorHub is community-led and maintained by Superlinked - an open-source compute framework, focused on turning complex data into vector embeddings. diff --git a/assets/monthly_downloads.svg b/assets/monthly_downloads.svg deleted file mode 100644 index 335c456f..00000000 --- a/assets/monthly_downloads.svg +++ /dev/null @@ -1 +0,0 @@ -Monthly DownloadsMonthly Downloads1096510965 \ No newline at end of file diff --git a/assets/total_downloads.svg b/assets/total_downloads.svg deleted file mode 100644 index b686732a..00000000 --- a/assets/total_downloads.svg +++ /dev/null @@ -1 +0,0 @@ -Total DownloadsTotal Downloads9855298552 \ No newline at end of file diff --git a/assets/vectorhub-batch-processing.png b/assets/vectorhub-batch-processing.png deleted file mode 100644 index 05a56c47..00000000 Binary files a/assets/vectorhub-batch-processing.png and /dev/null differ diff --git a/assets/weekly_downloads.svg b/assets/weekly_downloads.svg deleted file mode 100644 index 47776218..00000000 --- a/assets/weekly_downloads.svg +++ /dev/null @@ -1 +0,0 @@ -Weekly DownloadsWeekly Downloads26612661 \ No newline at end of file diff --git a/docs/.buildinfo b/docs/.buildinfo deleted file mode 100644 index 2a6d91c4..00000000 --- a/docs/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 0d1f1d1216174a08c917ab5785c453d8 -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/.doctrees/auto_encoder.doctree b/docs/.doctrees/auto_encoder.doctree deleted file mode 100644 index b97d4057..00000000 Binary files a/docs/.doctrees/auto_encoder.doctree and /dev/null differ diff --git a/docs/.doctrees/bi_encoders.text_text.dpr2vec.doctree b/docs/.doctrees/bi_encoders.text_text.dpr2vec.doctree deleted file mode 100644 index 26b76977..00000000 Binary files a/docs/.doctrees/bi_encoders.text_text.dpr2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/bi_encoders.text_text.lareqa_qa2vec.doctree b/docs/.doctrees/bi_encoders.text_text.lareqa_qa2vec.doctree deleted file mode 100644 index 2ef60379..00000000 Binary files a/docs/.doctrees/bi_encoders.text_text.lareqa_qa2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/bi_encoders.text_text.use_qa2vec.doctree b/docs/.doctrees/bi_encoders.text_text.use_qa2vec.doctree deleted file mode 100644 index 7e75eec3..00000000 Binary files a/docs/.doctrees/bi_encoders.text_text.use_qa2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.audio.speech_embedding2vec.doctree b/docs/.doctrees/encoders.audio.speech_embedding2vec.doctree deleted file mode 100644 index 23faf0ee..00000000 Binary files a/docs/.doctrees/encoders.audio.speech_embedding2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.audio.trill2vec.doctree b/docs/.doctrees/encoders.audio.trill2vec.doctree deleted file mode 100644 index f4c97f61..00000000 Binary files a/docs/.doctrees/encoders.audio.trill2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.audio.vectorai2vec.doctree b/docs/.doctrees/encoders.audio.vectorai2vec.doctree deleted file mode 100644 index c1562acd..00000000 Binary files a/docs/.doctrees/encoders.audio.vectorai2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.audio.vggish2vec.doctree b/docs/.doctrees/encoders.audio.vggish2vec.doctree deleted file mode 100644 index 93439942..00000000 Binary files a/docs/.doctrees/encoders.audio.vggish2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.audio.wav2vec.doctree b/docs/.doctrees/encoders.audio.wav2vec.doctree deleted file mode 100644 index 4e0502dc..00000000 Binary files a/docs/.doctrees/encoders.audio.wav2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.audio.yamnet2vec.doctree b/docs/.doctrees/encoders.audio.yamnet2vec.doctree deleted file mode 100644 index 9d24c71f..00000000 Binary files a/docs/.doctrees/encoders.audio.yamnet2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.image.bit2vec.doctree b/docs/.doctrees/encoders.image.bit2vec.doctree deleted file mode 100644 index a1805947..00000000 Binary files a/docs/.doctrees/encoders.image.bit2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.image.inception2vec.doctree b/docs/.doctrees/encoders.image.inception2vec.doctree deleted file mode 100644 index 93d3a1b3..00000000 Binary files a/docs/.doctrees/encoders.image.inception2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.image.inception_resnet2vec.doctree b/docs/.doctrees/encoders.image.inception_resnet2vec.doctree deleted file mode 100644 index cff82a50..00000000 Binary files a/docs/.doctrees/encoders.image.inception_resnet2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.image.mobilenet2vec.doctree b/docs/.doctrees/encoders.image.mobilenet2vec.doctree deleted file mode 100644 index d167cdd0..00000000 Binary files a/docs/.doctrees/encoders.image.mobilenet2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.image.resnet2vec.doctree b/docs/.doctrees/encoders.image.resnet2vec.doctree deleted file mode 100644 index 7e3e304b..00000000 Binary files a/docs/.doctrees/encoders.image.resnet2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.image.vectorai2vec.doctree b/docs/.doctrees/encoders.image.vectorai2vec.doctree deleted file mode 100644 index 35b3d122..00000000 Binary files a/docs/.doctrees/encoders.image.vectorai2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.text.albert2vec.doctree b/docs/.doctrees/encoders.text.albert2vec.doctree deleted file mode 100644 index de402b49..00000000 Binary files a/docs/.doctrees/encoders.text.albert2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.text.bert2vec.doctree b/docs/.doctrees/encoders.text.bert2vec.doctree deleted file mode 100644 index 9d2ef217..00000000 Binary files a/docs/.doctrees/encoders.text.bert2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.text.labse2vec.doctree b/docs/.doctrees/encoders.text.labse2vec.doctree deleted file mode 100644 index 33fdded6..00000000 Binary files a/docs/.doctrees/encoders.text.labse2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.text.legalbert2vec.doctree b/docs/.doctrees/encoders.text.legalbert2vec.doctree deleted file mode 100644 index dddc7bc8..00000000 Binary files a/docs/.doctrees/encoders.text.legalbert2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.text.sentencetransformer2vec.doctree b/docs/.doctrees/encoders.text.sentencetransformer2vec.doctree deleted file mode 100644 index 3787e53d..00000000 Binary files a/docs/.doctrees/encoders.text.sentencetransformer2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.text.transformer2vec.doctree b/docs/.doctrees/encoders.text.transformer2vec.doctree deleted file mode 100644 index fd9b0e28..00000000 Binary files a/docs/.doctrees/encoders.text.transformer2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.text.use2vec.doctree b/docs/.doctrees/encoders.text.use2vec.doctree deleted file mode 100644 index f3b7b4de..00000000 Binary files a/docs/.doctrees/encoders.text.use2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.text.use_multi2vec.doctree b/docs/.doctrees/encoders.text.use_multi2vec.doctree deleted file mode 100644 index 82230258..00000000 Binary files a/docs/.doctrees/encoders.text.use_multi2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/encoders.text.vectorai2vec.doctree b/docs/.doctrees/encoders.text.vectorai2vec.doctree deleted file mode 100644 index 02239bff..00000000 Binary files a/docs/.doctrees/encoders.text.vectorai2vec.doctree and /dev/null differ diff --git a/docs/.doctrees/environment.pickle b/docs/.doctrees/environment.pickle deleted file mode 100644 index 8e584adf..00000000 Binary files a/docs/.doctrees/environment.pickle and /dev/null differ diff --git a/docs/.doctrees/how_to_add_a_model.doctree b/docs/.doctrees/how_to_add_a_model.doctree deleted file mode 100644 index 69878d06..00000000 Binary files a/docs/.doctrees/how_to_add_a_model.doctree and /dev/null differ diff --git a/docs/.doctrees/index.doctree b/docs/.doctrees/index.doctree deleted file mode 100644 index f405b6d6..00000000 Binary files a/docs/.doctrees/index.doctree and /dev/null differ diff --git a/docs/.doctrees/intro.doctree b/docs/.doctrees/intro.doctree deleted file mode 100644 index 61325ddf..00000000 Binary files a/docs/.doctrees/intro.doctree and /dev/null differ diff --git a/docs/.doctrees/modules.doctree b/docs/.doctrees/modules.doctree deleted file mode 100644 index 3ff8a36d..00000000 Binary files a/docs/.doctrees/modules.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.bi_encoders.doctree b/docs/.doctrees/vectorhub.bi_encoders.doctree deleted file mode 100644 index 279f98d8..00000000 Binary files a/docs/.doctrees/vectorhub.bi_encoders.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.bi_encoders.qa.doctree b/docs/.doctrees/vectorhub.bi_encoders.qa.doctree deleted file mode 100644 index 1d656d64..00000000 Binary files a/docs/.doctrees/vectorhub.bi_encoders.qa.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.bi_encoders.qa.sentence_transformers.doctree b/docs/.doctrees/vectorhub.bi_encoders.qa.sentence_transformers.doctree deleted file mode 100644 index 0e97ce99..00000000 Binary files a/docs/.doctrees/vectorhub.bi_encoders.qa.sentence_transformers.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.bi_encoders.qa.tfhub.doctree b/docs/.doctrees/vectorhub.bi_encoders.qa.tfhub.doctree deleted file mode 100644 index 42c93627..00000000 Binary files a/docs/.doctrees/vectorhub.bi_encoders.qa.tfhub.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.bi_encoders.qa.torch_transformers.doctree b/docs/.doctrees/vectorhub.bi_encoders.qa.torch_transformers.doctree deleted file mode 100644 index c5aa3c8d..00000000 Binary files a/docs/.doctrees/vectorhub.bi_encoders.qa.torch_transformers.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.bi_encoders.text_image.doctree b/docs/.doctrees/vectorhub.bi_encoders.text_image.doctree deleted file mode 100644 index f09cdfff..00000000 Binary files a/docs/.doctrees/vectorhub.bi_encoders.text_image.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.bi_encoders.text_image.torch.doctree b/docs/.doctrees/vectorhub.bi_encoders.text_image.torch.doctree deleted file mode 100644 index f32b8e2d..00000000 Binary files a/docs/.doctrees/vectorhub.bi_encoders.text_image.torch.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.bi_encoders.text_text.doctree b/docs/.doctrees/vectorhub.bi_encoders.text_text.doctree deleted file mode 100644 index 47df1261..00000000 Binary files a/docs/.doctrees/vectorhub.bi_encoders.text_text.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.bi_encoders.text_text.sentence_transformers.doctree b/docs/.doctrees/vectorhub.bi_encoders.text_text.sentence_transformers.doctree deleted file mode 100644 index 53c1f604..00000000 Binary files a/docs/.doctrees/vectorhub.bi_encoders.text_text.sentence_transformers.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.bi_encoders.text_text.tfhub.doctree b/docs/.doctrees/vectorhub.bi_encoders.text_text.tfhub.doctree deleted file mode 100644 index 57a038ab..00000000 Binary files a/docs/.doctrees/vectorhub.bi_encoders.text_text.tfhub.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.bi_encoders.text_text.torch_transformers.doctree b/docs/.doctrees/vectorhub.bi_encoders.text_text.torch_transformers.doctree deleted file mode 100644 index 351dd2bc..00000000 Binary files a/docs/.doctrees/vectorhub.bi_encoders.text_text.torch_transformers.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.doctree b/docs/.doctrees/vectorhub.doctree deleted file mode 100644 index 6efae69e..00000000 Binary files a/docs/.doctrees/vectorhub.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.audio.doctree b/docs/.doctrees/vectorhub.encoders.audio.doctree deleted file mode 100644 index 64d9b164..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.audio.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.audio.pytorch.doctree b/docs/.doctrees/vectorhub.encoders.audio.pytorch.doctree deleted file mode 100644 index d336a215..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.audio.pytorch.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.audio.tfhub.doctree b/docs/.doctrees/vectorhub.encoders.audio.tfhub.doctree deleted file mode 100644 index 44dfb24b..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.audio.tfhub.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.audio.vectorai.doctree b/docs/.doctrees/vectorhub.encoders.audio.vectorai.doctree deleted file mode 100644 index 0f93c367..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.audio.vectorai.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.code.doctree b/docs/.doctrees/vectorhub.encoders.code.doctree deleted file mode 100644 index 9c300d3e..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.code.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.code.transformers.doctree b/docs/.doctrees/vectorhub.encoders.code.transformers.doctree deleted file mode 100644 index 5fd70c79..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.code.transformers.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.doctree b/docs/.doctrees/vectorhub.encoders.doctree deleted file mode 100644 index 53e285bf..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.face.doctree b/docs/.doctrees/vectorhub.encoders.face.doctree deleted file mode 100644 index e4addf98..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.face.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.face.tf.doctree b/docs/.doctrees/vectorhub.encoders.face.tf.doctree deleted file mode 100644 index ad4de2cc..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.face.tf.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.image.doctree b/docs/.doctrees/vectorhub.encoders.image.doctree deleted file mode 100644 index 2f3f7836..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.image.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.image.fastai.doctree b/docs/.doctrees/vectorhub.encoders.image.fastai.doctree deleted file mode 100644 index 9dffb254..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.image.fastai.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.image.tensorflow.doctree b/docs/.doctrees/vectorhub.encoders.image.tensorflow.doctree deleted file mode 100644 index 06896daf..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.image.tensorflow.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.image.tfhub.doctree b/docs/.doctrees/vectorhub.encoders.image.tfhub.doctree deleted file mode 100644 index e60ed70f..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.image.tfhub.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.image.vectorai.doctree b/docs/.doctrees/vectorhub.encoders.image.vectorai.doctree deleted file mode 100644 index 6bf42376..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.image.vectorai.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.text.doctree b/docs/.doctrees/vectorhub.encoders.text.doctree deleted file mode 100644 index ccb7e1d9..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.text.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.text.sentence_transformers.doctree b/docs/.doctrees/vectorhub.encoders.text.sentence_transformers.doctree deleted file mode 100644 index 27b2acc5..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.text.sentence_transformers.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.text.tf_transformers.doctree b/docs/.doctrees/vectorhub.encoders.text.tf_transformers.doctree deleted file mode 100644 index 016f7bb8..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.text.tf_transformers.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.text.tfhub.doctree b/docs/.doctrees/vectorhub.encoders.text.tfhub.doctree deleted file mode 100644 index 4c3fd13e..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.text.tfhub.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.text.torch_transformers.doctree b/docs/.doctrees/vectorhub.encoders.text.torch_transformers.doctree deleted file mode 100644 index ab467165..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.text.torch_transformers.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.text.vectorai.doctree b/docs/.doctrees/vectorhub.encoders.text.vectorai.doctree deleted file mode 100644 index faa22f5a..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.text.vectorai.doctree and /dev/null differ diff --git a/docs/.doctrees/vectorhub.encoders.video.doctree b/docs/.doctrees/vectorhub.encoders.video.doctree deleted file mode 100644 index 7cb728c9..00000000 Binary files a/docs/.doctrees/vectorhub.encoders.video.doctree and /dev/null differ diff --git a/docs/.nojekyll b/docs/.nojekyll deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/_sources/auto_encoder.rst.txt b/docs/_sources/auto_encoder.rst.txt deleted file mode 100644 index 37b09863..00000000 --- a/docs/_sources/auto_encoder.rst.txt +++ /dev/null @@ -1,31 +0,0 @@ -Guide to using Auto-Encoder -===================================== - -Inspired by transformers' adoption of the auto-models, we created an -AutoEncoder class that allows you to easily get the relevant models. Not to be confused with the autoencoder architecture. - -The relevant models can be found here: - -.. code-block:: python - - from vectorhub import AutoEncoder - encoder = AutoEncoder('text/bert') - encoder.encode("Hi...") - - -To view the list of available models, you can call: - - -.. code-block:: python - - import vectorhub as vh - vh.list_available_auto_models() - -When you instantiate the autoencoder, you will need to pip install -the relevant module. The requirements here can be given here. - -The list of supported models are: - -.. code-block:: python - - ['text/albert', 'text/bert', 'text/labse', 'text/use', 'text/use-multi', 'text/use-lite', 'text/legal-bert', 'audio/fairseq', 'audio/speech-embedding', 'audio/trill', 'audio/trill-distilled', 'audio/vggish', 'audio/yamnet', 'audio/wav2vec', 'image/bit', 'image/bit-medium', 'image/inception', 'image/inception-v2', 'image/inception-v3', 'image/inception-resnet', 'image/mobilenet', 'image/mobilenet-v2', 'image/resnet', 'image/resnet-v2', 'text_text/use-multi-qa', 'text_text/use-qa', 'text_text/dpr', 'text_text/lareqa-qa] diff --git a/docs/_sources/bi_encoders.text_text.dpr2vec.rst.txt b/docs/_sources/bi_encoders.text_text.dpr2vec.rst.txt deleted file mode 100644 index 5c98ae01..00000000 --- a/docs/_sources/bi_encoders.text_text.dpr2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -DPR2Vec ----------------------------------------- - -Transformers -========================================== - -.. automodule:: vectorhub.bi_encoders.text_text.torch_transformers.dpr - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/bi_encoders.text_text.lareqa_qa2vec.rst.txt b/docs/_sources/bi_encoders.text_text.lareqa_qa2vec.rst.txt deleted file mode 100644 index f34523de..00000000 --- a/docs/_sources/bi_encoders.text_text.lareqa_qa2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -LAReQA2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.bi_encoders.text_text.tfhub.lareqa_qa - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/bi_encoders.text_text.use_qa2vec.rst.txt b/docs/_sources/bi_encoders.text_text.use_qa2vec.rst.txt deleted file mode 100644 index cf33504c..00000000 --- a/docs/_sources/bi_encoders.text_text.use_qa2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -USEQA2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.bi_encoders.text_text.tfhub.use_qa - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.audio.speech_embedding2vec.rst.txt b/docs/_sources/encoders.audio.speech_embedding2vec.rst.txt deleted file mode 100644 index d29bcfda..00000000 --- a/docs/_sources/encoders.audio.speech_embedding2vec.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -SpeechEmbedding2Vec ----------------------------------------- - - - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.audio.tfhub.speech_embedding - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.audio.trill2vec.rst.txt b/docs/_sources/encoders.audio.trill2vec.rst.txt deleted file mode 100644 index b210d44a..00000000 --- a/docs/_sources/encoders.audio.trill2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -Trill2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.audio.tfhub.trill - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.audio.vectorai2vec.rst.txt b/docs/_sources/encoders.audio.vectorai2vec.rst.txt deleted file mode 100644 index 478d456d..00000000 --- a/docs/_sources/encoders.audio.vectorai2vec.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -ViAudio2Vec ----------------------------------------- - -Vector AI -========================================== - -For Vector AI users to access to our deployed vector models and evaluate embeddings. - -.. automodule:: vectorhub.encoders.audio.vectorai.vi_encoder - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.audio.vggish2vec.rst.txt b/docs/_sources/encoders.audio.vggish2vec.rst.txt deleted file mode 100644 index e62d94e0..00000000 --- a/docs/_sources/encoders.audio.vggish2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -Vggish2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.audio.tfhub.vggish - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.audio.wav2vec.rst.txt b/docs/_sources/encoders.audio.wav2vec.rst.txt deleted file mode 100644 index 732092d0..00000000 --- a/docs/_sources/encoders.audio.wav2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -Wav2Vec ----------------------------------------- - -PyTorch -========================================== - -.. automodule:: vectorhub.encoders.audio.pytorch.fairseq - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.audio.yamnet2vec.rst.txt b/docs/_sources/encoders.audio.yamnet2vec.rst.txt deleted file mode 100644 index 19b7a8f1..00000000 --- a/docs/_sources/encoders.audio.yamnet2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -Yamnet2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.audio.tfhub.yamnet - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.image.bit2vec.rst.txt b/docs/_sources/encoders.image.bit2vec.rst.txt deleted file mode 100644 index b97fb90f..00000000 --- a/docs/_sources/encoders.image.bit2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -Bit2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.image.tfhub.bit - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.image.inception2vec.rst.txt b/docs/_sources/encoders.image.inception2vec.rst.txt deleted file mode 100644 index d35052fd..00000000 --- a/docs/_sources/encoders.image.inception2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -Inception2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.image.tfhub.inception - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.image.inception_resnet2vec.rst.txt b/docs/_sources/encoders.image.inception_resnet2vec.rst.txt deleted file mode 100644 index 7e2b04a7..00000000 --- a/docs/_sources/encoders.image.inception_resnet2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -InceptionResnet2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.image.tfhub.inception_resnet - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.image.mobilenet2vec.rst.txt b/docs/_sources/encoders.image.mobilenet2vec.rst.txt deleted file mode 100644 index 2f6dc3f0..00000000 --- a/docs/_sources/encoders.image.mobilenet2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -MobileNet2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.image.tfhub.mobilenet - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.image.resnet2vec.rst.txt b/docs/_sources/encoders.image.resnet2vec.rst.txt deleted file mode 100644 index 6a2302e6..00000000 --- a/docs/_sources/encoders.image.resnet2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -ResNet2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.image.tfhub.resnet - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.image.vectorai2vec.rst.txt b/docs/_sources/encoders.image.vectorai2vec.rst.txt deleted file mode 100644 index 9ba974a0..00000000 --- a/docs/_sources/encoders.image.vectorai2vec.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -ViImage2Vec ----------------------------------------- - -Vector AI -========================================== - -For Vector AI users to access to our deployed vector models and evaluate embeddings. - -.. automodule:: vectorhub.encoders.image.vectorai.vi_encoder - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.text.albert2vec.rst.txt b/docs/_sources/encoders.text.albert2vec.rst.txt deleted file mode 100644 index ba44abac..00000000 --- a/docs/_sources/encoders.text.albert2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -AlBert2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.text.tfhub.albert - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.text.bert2vec.rst.txt b/docs/_sources/encoders.text.bert2vec.rst.txt deleted file mode 100644 index da2a9ceb..00000000 --- a/docs/_sources/encoders.text.bert2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -Bert2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.text.tfhub.bert - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.text.labse2vec.rst.txt b/docs/_sources/encoders.text.labse2vec.rst.txt deleted file mode 100644 index 2e09a5a0..00000000 --- a/docs/_sources/encoders.text.labse2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -LaBSE2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.text.tfhub.labse - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.text.legalbert2vec.rst.txt b/docs/_sources/encoders.text.legalbert2vec.rst.txt deleted file mode 100644 index 85a3906e..00000000 --- a/docs/_sources/encoders.text.legalbert2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -LegalBert2Vec ----------------------------------------- - -Transformers -========================================== - -.. automodule:: vectorhub.encoders.text.torch_transformers.legal_bert - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.text.sentencetransformer2vec.rst.txt b/docs/_sources/encoders.text.sentencetransformer2vec.rst.txt deleted file mode 100644 index 663a7a20..00000000 --- a/docs/_sources/encoders.text.sentencetransformer2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -SentenceTransformer2Vec ----------------------------------------- - -Sentence-Transformers -========================================== - -.. automodule:: vectorhub.encoders.text.sentence_transformers.sentence_auto_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.text.transformer2vec.rst.txt b/docs/_sources/encoders.text.transformer2vec.rst.txt deleted file mode 100644 index d134efb5..00000000 --- a/docs/_sources/encoders.text.transformer2vec.rst.txt +++ /dev/null @@ -1,18 +0,0 @@ -Transformer2Vec ----------------------------------------- - -PyTorch Transformers -========================================== - -.. automodule:: vectorhub.encoders.text.torch_transformers.torch_auto_transformers - :members: - :undoc-members: - :show-inheritance: - -Tensorflow Transformers -========================================== - -.. automodule:: vectorhub.encoders.text.tf_transformers.tf_auto_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.text.use2vec.rst.txt b/docs/_sources/encoders.text.use2vec.rst.txt deleted file mode 100644 index 8e91f555..00000000 --- a/docs/_sources/encoders.text.use2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -USE2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.text.tfhub.use - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.text.use_multi2vec.rst.txt b/docs/_sources/encoders.text.use_multi2vec.rst.txt deleted file mode 100644 index b241643b..00000000 --- a/docs/_sources/encoders.text.use_multi2vec.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -USEMulti2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.text.tfhub.use_multi - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/encoders.text.vectorai2vec.rst.txt b/docs/_sources/encoders.text.vectorai2vec.rst.txt deleted file mode 100644 index 3bb50aab..00000000 --- a/docs/_sources/encoders.text.vectorai2vec.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -ViText2Vec ----------------------------------------- - -Vector AI -========================================== - -For Vector AI users to access to our deployed vector models and evaluate embeddings. - -.. automodule:: vectorhub.encoders.text.vectorai.vi_encoder - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/how_to_add_a_model.rst.txt b/docs/_sources/how_to_add_a_model.rst.txt deleted file mode 100644 index 2a1b00ed..00000000 --- a/docs/_sources/how_to_add_a_model.rst.txt +++ /dev/null @@ -1,60 +0,0 @@ - -How To Add Your Model To Vector Hub -===================================== - -We have written a simple 7-step guide to help you add your models here if you have trained them! -This should take approximately 30 minutes - 1 hour. Let us know at dev@vctr.ai if you need any help. - -* 1. Fork the project. - -* 2. Identify the minimum requirements for your model, identify the associated module and then add them to the MODEL_REQUIREMENTS in vectorhub/model_dict. - -* 3. Write a brief description about what your model involves. - -* 4. Create a new branch called new_model/____2vec, replace ___ with the model/domain etc. - -* 5. Identify which directory your model should fall under. Here is a basic directory outline. - -.. code-block:: - - |____ encoders - |________ audio - |________ image - |________ text - |____ bi_encoders - |________ text_text - -If you believe your model falls under a new category than we recommend making a new directory! - -* 6. Once you identify the requirements, find the associated module or create a new one if required. -Use the following code as a base for any new models and add an -`encode` and `bulk_encode` method. Both should return lists. - -.. code-block:: python - - from ....import_utils import * - # Import dictionary for model requirements - from ....models_dict import MODEL_REQUIREMENTS - # Add dependencies in if-statement to avoid import breaks in the library - if is_all_dependency_installed(MODEL_REQUIREMENTS['text-bi-encoder-tfhub-use-qa']): - # add imports here - import bert - import numpy as np - import tensorflow as tf - import tensorflow_hub as hub - import tensorflow_text - - from typing import List - # This decorator returns a default vector in case of an error - from ....base import catch_vector_errors - # Base class that provides basic utilities - from ..base import BaseTextText2Vec - - class USEMultiQA2Vec(BaseTextText2Vec): - ... - # Add decorator in case encoding errors and we need a dummy vector. - @catch_vector_errors - def encode(self, text): - pass - -* 7. Submit a PR! diff --git a/docs/_sources/index.rst.txt b/docs/_sources/index.rst.txt deleted file mode 100644 index 93a3b598..00000000 --- a/docs/_sources/index.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ - -Welcome to VectorHub's documentation! -===================================== - -Vector Hub is your home for ___2Vec models! - - - -The rise of deep learning and encoding has meant that there are now explosion of -open-source and proprietary models and techniques that have allowed for distributed -representation of entities. This means the rise of new ____2Vec models that are: - -1) Model-specific - New architecture is introduced. -2) Domain-specific - Architecture is trained on new domain. -3) Language-specific - Architecture is trained in new language. -4) Task-specific - Architecture is trained on new task. - -In order to allow people to understand what these models do and mean, we aim to provide -a hub for these __2vec models. - -Our vision to build a hub that allows people to store these ____2Vec models and provide explanations -for how to best use these encodings while building a flexible framework that allows these -different models to be used easily. - - - - -.. toctree:: - :maxdepth: 2 - :caption: Contents - - intro - how_to_add_a_model - auto_encoder - -.. toctree:: - :maxdepth: 4 - :caption: Text Encoders - - encoders.text.bert2vec - encoders.text.albert2vec - encoders.text.labse2vec - encoders.text.use2vec - encoders.text.use_multi2vec - encoders.text.legalbert2vec - encoders.text.transformer2vec - encoders.text.sentencetransformer2vec - encoders.text.vectorai2vec - - -.. toctree:: - :maxdepth: 2 - :caption: Image Encoders - - encoders.image.bit2vec - encoders.image.inception2vec - encoders.image.resnet2vec - encoders.image.inception_resnet2vec - encoders.image.mobilenet2vec - encoders.image.vectorai2vec - -.. toctree:: - :maxdepth: 2 - :caption: Audio Encoders - - encoders.audio.speech_embedding2vec - encoders.audio.trill2vec - encoders.audio.vggish2vec - encoders.audio.yamnet2vec - encoders.audio.wav2vec - encoders.audio.vectorai2vec - - -.. toctree:: - :maxdepth: 4 - :caption: Text Bi-Encoders - - bi_encoders.text_text.use_qa2vec - bi_encoders.text_text.lareqa_qa2vec - bi_encoders.text_text.dpr2vec - -.. toctree:: - :maxdepth: 4 - :caption: Modules - - modules - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/_sources/intro.rst.txt b/docs/_sources/intro.rst.txt deleted file mode 100644 index a8895187..00000000 --- a/docs/_sources/intro.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -What is Vector Hub? -============================== - -The home of ___2Vec Models. - -The rise of deep learning and encoding has meant that there are now explosion of -open-source and proprietary models and techniques that have allowed for distributed -representation of entities. This means the rise of new ____2Vec models that are: - -1) Model-specific - New architecture is introduced. -2) Domain-specific - Architecture is trained on new domain. -3) Language-specific - Architecture is trained in new language. -4) Task-specific - Architecture is trained on new task. - -In order to allow people to understand what these models do and mean, we aim to provide -a hub for these __2vec models. - -Our vision to build a hub that allows people to store these ____2Vec models and provide explanations -for how to best use these encodings while building a flexible framework that allows these -different models to be used easily. diff --git a/docs/_sources/modules.rst.txt b/docs/_sources/modules.rst.txt deleted file mode 100644 index b457194d..00000000 --- a/docs/_sources/modules.rst.txt +++ /dev/null @@ -1,7 +0,0 @@ -vectorhub -========= - -.. toctree:: - :maxdepth: 4 - - vectorhub diff --git a/docs/_sources/vectorhub.bi_encoders.qa.rst.txt b/docs/_sources/vectorhub.bi_encoders.qa.rst.txt deleted file mode 100644 index 6730e43d..00000000 --- a/docs/_sources/vectorhub.bi_encoders.qa.rst.txt +++ /dev/null @@ -1,31 +0,0 @@ -vectorhub.bi\_encoders.qa package -================================= - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.bi_encoders.qa.sentence_transformers - vectorhub.bi_encoders.qa.tfhub - vectorhub.bi_encoders.qa.torch_transformers - -Submodules ----------- - -vectorhub.bi\_encoders.qa.base module -------------------------------------- - -.. automodule:: vectorhub.bi_encoders.qa.base - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.qa - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.bi_encoders.qa.sentence_transformers.rst.txt b/docs/_sources/vectorhub.bi_encoders.qa.sentence_transformers.rst.txt deleted file mode 100644 index 09245b91..00000000 --- a/docs/_sources/vectorhub.bi_encoders.qa.sentence_transformers.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.bi\_encoders.qa.sentence\_transformers package -======================================================== - -Submodules ----------- - -vectorhub.bi\_encoders.qa.sentence\_transformers.distilroberta\_qa module -------------------------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.qa.sentence_transformers.distilroberta_qa - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.qa.sentence_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.bi_encoders.qa.tfhub.rst.txt b/docs/_sources/vectorhub.bi_encoders.qa.tfhub.rst.txt deleted file mode 100644 index 7628dc6c..00000000 --- a/docs/_sources/vectorhub.bi_encoders.qa.tfhub.rst.txt +++ /dev/null @@ -1,37 +0,0 @@ -vectorhub.bi\_encoders.qa.tfhub package -======================================= - -Submodules ----------- - -vectorhub.bi\_encoders.qa.tfhub.lareqa\_qa module -------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.qa.tfhub.lareqa_qa - :members: - :undoc-members: - :show-inheritance: - -vectorhub.bi\_encoders.qa.tfhub.use\_multi\_qa module ------------------------------------------------------ - -.. automodule:: vectorhub.bi_encoders.qa.tfhub.use_multi_qa - :members: - :undoc-members: - :show-inheritance: - -vectorhub.bi\_encoders.qa.tfhub.use\_qa module ----------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.qa.tfhub.use_qa - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.qa.tfhub - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.bi_encoders.qa.torch_transformers.rst.txt b/docs/_sources/vectorhub.bi_encoders.qa.torch_transformers.rst.txt deleted file mode 100644 index 876b24df..00000000 --- a/docs/_sources/vectorhub.bi_encoders.qa.torch_transformers.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.bi\_encoders.qa.torch\_transformers package -===================================================== - -Submodules ----------- - -vectorhub.bi\_encoders.qa.torch\_transformers.dpr module --------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.qa.torch_transformers.dpr - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.qa.torch_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.bi_encoders.rst.txt b/docs/_sources/vectorhub.bi_encoders.rst.txt deleted file mode 100644 index 01b15752..00000000 --- a/docs/_sources/vectorhub.bi_encoders.rst.txt +++ /dev/null @@ -1,18 +0,0 @@ -vectorhub.bi\_encoders package -============================== - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.bi_encoders.text_text - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.bi_encoders.text_image.rst.txt b/docs/_sources/vectorhub.bi_encoders.text_image.rst.txt deleted file mode 100644 index ea446af9..00000000 --- a/docs/_sources/vectorhub.bi_encoders.text_image.rst.txt +++ /dev/null @@ -1,18 +0,0 @@ -vectorhub.bi\_encoders.text\_image package -========================================== - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.bi_encoders.text_image.torch - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.text_image - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.bi_encoders.text_image.torch.rst.txt b/docs/_sources/vectorhub.bi_encoders.text_image.torch.rst.txt deleted file mode 100644 index b1627ee4..00000000 --- a/docs/_sources/vectorhub.bi_encoders.text_image.torch.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.bi\_encoders.text\_image.torch package -================================================ - -Submodules ----------- - -vectorhub.bi\_encoders.text\_image.torch.clip2vec module --------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_image.torch.clip2vec - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.text_image.torch - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.bi_encoders.text_text.rst.txt b/docs/_sources/vectorhub.bi_encoders.text_text.rst.txt deleted file mode 100644 index 843d4c67..00000000 --- a/docs/_sources/vectorhub.bi_encoders.text_text.rst.txt +++ /dev/null @@ -1,31 +0,0 @@ -vectorhub.bi\_encoders.text\_text package -========================================= - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.bi_encoders.text_text.sentence_transformers - vectorhub.bi_encoders.text_text.tfhub - vectorhub.bi_encoders.text_text.torch_transformers - -Submodules ----------- - -vectorhub.bi\_encoders.text\_text.base module ---------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_text.base - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.text_text - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.bi_encoders.text_text.sentence_transformers.rst.txt b/docs/_sources/vectorhub.bi_encoders.text_text.sentence_transformers.rst.txt deleted file mode 100644 index 1f198dc1..00000000 --- a/docs/_sources/vectorhub.bi_encoders.text_text.sentence_transformers.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.bi\_encoders.text\_text.sentence\_transformers package -================================================================ - -Submodules ----------- - -vectorhub.bi\_encoders.text\_text.sentence\_transformers.distilroberta\_qa module ---------------------------------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_text.sentence_transformers.distilroberta_qa - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.text_text.sentence_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.bi_encoders.text_text.tfhub.rst.txt b/docs/_sources/vectorhub.bi_encoders.text_text.tfhub.rst.txt deleted file mode 100644 index 6f4e811d..00000000 --- a/docs/_sources/vectorhub.bi_encoders.text_text.tfhub.rst.txt +++ /dev/null @@ -1,37 +0,0 @@ -vectorhub.bi\_encoders.text\_text.tfhub package -=============================================== - -Submodules ----------- - -vectorhub.bi\_encoders.text\_text.tfhub.lareqa\_qa module ---------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_text.tfhub.lareqa_qa - :members: - :undoc-members: - :show-inheritance: - -vectorhub.bi\_encoders.text\_text.tfhub.use\_multi\_qa module -------------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_text.tfhub.use_multi_qa - :members: - :undoc-members: - :show-inheritance: - -vectorhub.bi\_encoders.text\_text.tfhub.use\_qa module ------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_text.tfhub.use_qa - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.text_text.tfhub - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.bi_encoders.text_text.torch_transformers.rst.txt b/docs/_sources/vectorhub.bi_encoders.text_text.torch_transformers.rst.txt deleted file mode 100644 index bcb8cdd7..00000000 --- a/docs/_sources/vectorhub.bi_encoders.text_text.torch_transformers.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.bi\_encoders.text\_text.torch\_transformers package -============================================================= - -Submodules ----------- - -vectorhub.bi\_encoders.text\_text.torch\_transformers.dpr module ----------------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_text.torch_transformers.dpr - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.text_text.torch_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.audio.pytorch.rst.txt b/docs/_sources/vectorhub.encoders.audio.pytorch.rst.txt deleted file mode 100644 index 8861664b..00000000 --- a/docs/_sources/vectorhub.encoders.audio.pytorch.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.audio.pytorch package -======================================== - -Submodules ----------- - -vectorhub.encoders.audio.pytorch.wav2vec module ------------------------------------------------ - -.. automodule:: vectorhub.encoders.audio.pytorch.wav2vec - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.audio.pytorch - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.audio.rst.txt b/docs/_sources/vectorhub.encoders.audio.rst.txt deleted file mode 100644 index db186037..00000000 --- a/docs/_sources/vectorhub.encoders.audio.rst.txt +++ /dev/null @@ -1,31 +0,0 @@ -vectorhub.encoders.audio package -================================ - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.encoders.audio.pytorch - vectorhub.encoders.audio.tfhub - vectorhub.encoders.audio.vectorai - -Submodules ----------- - -vectorhub.encoders.audio.base module ------------------------------------- - -.. automodule:: vectorhub.encoders.audio.base - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.audio - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.audio.tfhub.rst.txt b/docs/_sources/vectorhub.encoders.audio.tfhub.rst.txt deleted file mode 100644 index 8a7f9cef..00000000 --- a/docs/_sources/vectorhub.encoders.audio.tfhub.rst.txt +++ /dev/null @@ -1,53 +0,0 @@ -vectorhub.encoders.audio.tfhub package -====================================== - -Submodules ----------- - -vectorhub.encoders.audio.tfhub.speech\_embedding module -------------------------------------------------------- - -.. automodule:: vectorhub.encoders.audio.tfhub.speech_embedding - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.audio.tfhub.trill module -------------------------------------------- - -.. automodule:: vectorhub.encoders.audio.tfhub.trill - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.audio.tfhub.trill\_distilled module ------------------------------------------------------- - -.. automodule:: vectorhub.encoders.audio.tfhub.trill_distilled - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.audio.tfhub.vggish module --------------------------------------------- - -.. automodule:: vectorhub.encoders.audio.tfhub.vggish - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.audio.tfhub.yamnet module --------------------------------------------- - -.. automodule:: vectorhub.encoders.audio.tfhub.yamnet - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.audio.tfhub - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.audio.vectorai.rst.txt b/docs/_sources/vectorhub.encoders.audio.vectorai.rst.txt deleted file mode 100644 index 061ef40b..00000000 --- a/docs/_sources/vectorhub.encoders.audio.vectorai.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.audio.vectorai package -========================================= - -Submodules ----------- - -vectorhub.encoders.audio.vectorai.vi\_encoder module ----------------------------------------------------- - -.. automodule:: vectorhub.encoders.audio.vectorai.vi_encoder - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.audio.vectorai - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.code.rst.txt b/docs/_sources/vectorhub.encoders.code.rst.txt deleted file mode 100644 index 70612ffc..00000000 --- a/docs/_sources/vectorhub.encoders.code.rst.txt +++ /dev/null @@ -1,18 +0,0 @@ -vectorhub.encoders.code package -=============================== - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.encoders.code.transformers - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.code - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.code.transformers.rst.txt b/docs/_sources/vectorhub.encoders.code.transformers.rst.txt deleted file mode 100644 index f02c9aa2..00000000 --- a/docs/_sources/vectorhub.encoders.code.transformers.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.code.transformers package -============================================ - -Submodules ----------- - -vectorhub.encoders.code.transformers.codebert module ----------------------------------------------------- - -.. automodule:: vectorhub.encoders.code.transformers.codebert - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.code.transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.face.rst.txt b/docs/_sources/vectorhub.encoders.face.rst.txt deleted file mode 100644 index 5322a010..00000000 --- a/docs/_sources/vectorhub.encoders.face.rst.txt +++ /dev/null @@ -1,18 +0,0 @@ -vectorhub.encoders.face package -=============================== - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.encoders.face.tf - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.face - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.face.tf.rst.txt b/docs/_sources/vectorhub.encoders.face.tf.rst.txt deleted file mode 100644 index f766ba0a..00000000 --- a/docs/_sources/vectorhub.encoders.face.tf.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.face.tf package -================================== - -Submodules ----------- - -vectorhub.encoders.face.tf.face2vec module ------------------------------------------- - -.. automodule:: vectorhub.encoders.face.tf.face2vec - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.face.tf - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.image.fastai.rst.txt b/docs/_sources/vectorhub.encoders.image.fastai.rst.txt deleted file mode 100644 index 3787eb39..00000000 --- a/docs/_sources/vectorhub.encoders.image.fastai.rst.txt +++ /dev/null @@ -1,29 +0,0 @@ -vectorhub.encoders.image.fastai package -======================================= - -Submodules ----------- - -vectorhub.encoders.image.fastai.base module -------------------------------------------- - -.. automodule:: vectorhub.encoders.image.fastai.base - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.fastai.resnet module ---------------------------------------------- - -.. automodule:: vectorhub.encoders.image.fastai.resnet - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.image.fastai - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.image.rst.txt b/docs/_sources/vectorhub.encoders.image.rst.txt deleted file mode 100644 index 59b7b5ce..00000000 --- a/docs/_sources/vectorhub.encoders.image.rst.txt +++ /dev/null @@ -1,31 +0,0 @@ -vectorhub.encoders.image package -================================ - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.encoders.image.fastai - vectorhub.encoders.image.tfhub - vectorhub.encoders.image.vectorai - -Submodules ----------- - -vectorhub.encoders.image.base module ------------------------------------- - -.. automodule:: vectorhub.encoders.image.base - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.image - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.image.tensorflow.rst.txt b/docs/_sources/vectorhub.encoders.image.tensorflow.rst.txt deleted file mode 100644 index 537a0cfd..00000000 --- a/docs/_sources/vectorhub.encoders.image.tensorflow.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.image.tensorflow package -=========================================== - -Submodules ----------- - -vectorhub.encoders.image.tensorflow.face2vec module ---------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tensorflow.face2vec - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.image.tensorflow - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.image.tfhub.rst.txt b/docs/_sources/vectorhub.encoders.image.tfhub.rst.txt deleted file mode 100644 index 638c8809..00000000 --- a/docs/_sources/vectorhub.encoders.image.tfhub.rst.txt +++ /dev/null @@ -1,93 +0,0 @@ -vectorhub.encoders.image.tfhub package -====================================== - -Submodules ----------- - -vectorhub.encoders.image.tfhub.bit module ------------------------------------------ - -.. automodule:: vectorhub.encoders.image.tfhub.bit - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.bit\_medium module -------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.bit_medium - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.inception\_resnet module -------------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.inception_resnet - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.inceptionv1 module -------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.inceptionv1 - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.inceptionv2 module -------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.inceptionv2 - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.inceptionv3 module -------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.inceptionv3 - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.mobilenet module ------------------------------------------------ - -.. automodule:: vectorhub.encoders.image.tfhub.mobilenet - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.mobilenetv2 module -------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.mobilenetv2 - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.resnet module --------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.resnet - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.resnetv2 module ----------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.resnetv2 - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.image.tfhub - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.image.vectorai.rst.txt b/docs/_sources/vectorhub.encoders.image.vectorai.rst.txt deleted file mode 100644 index 2d954655..00000000 --- a/docs/_sources/vectorhub.encoders.image.vectorai.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.image.vectorai package -========================================= - -Submodules ----------- - -vectorhub.encoders.image.vectorai.vi\_encoder module ----------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.vectorai.vi_encoder - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.image.vectorai - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.rst.txt b/docs/_sources/vectorhub.encoders.rst.txt deleted file mode 100644 index d3ded0ab..00000000 --- a/docs/_sources/vectorhub.encoders.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders package -========================== - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.encoders.audio - vectorhub.encoders.image - vectorhub.encoders.text - vectorhub.encoders.video - -Module contents ---------------- - -.. automodule:: vectorhub.encoders - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.text.rst.txt b/docs/_sources/vectorhub.encoders.text.rst.txt deleted file mode 100644 index a6ac652d..00000000 --- a/docs/_sources/vectorhub.encoders.text.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -vectorhub.encoders.text package -=============================== - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.encoders.text.sentence_transformers - vectorhub.encoders.text.tf_transformers - vectorhub.encoders.text.tfhub - vectorhub.encoders.text.torch_transformers - vectorhub.encoders.text.vectorai - -Submodules ----------- - -vectorhub.encoders.text.base module ------------------------------------ - -.. automodule:: vectorhub.encoders.text.base - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.text - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.text.sentence_transformers.rst.txt b/docs/_sources/vectorhub.encoders.text.sentence_transformers.rst.txt deleted file mode 100644 index 8a3e1b17..00000000 --- a/docs/_sources/vectorhub.encoders.text.sentence_transformers.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.text.sentence\_transformers package -====================================================== - -Submodules ----------- - -vectorhub.encoders.text.sentence\_transformers.sentence\_auto\_transformers module ----------------------------------------------------------------------------------- - -.. automodule:: vectorhub.encoders.text.sentence_transformers.sentence_auto_transformers - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.text.sentence_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.text.tf_transformers.rst.txt b/docs/_sources/vectorhub.encoders.text.tf_transformers.rst.txt deleted file mode 100644 index 26b8d6dd..00000000 --- a/docs/_sources/vectorhub.encoders.text.tf_transformers.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.text.tf\_transformers package -================================================ - -Submodules ----------- - -vectorhub.encoders.text.tf\_transformers.tf\_auto\_transformers module ----------------------------------------------------------------------- - -.. automodule:: vectorhub.encoders.text.tf_transformers.tf_auto_transformers - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.text.tf_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.text.tfhub.rst.txt b/docs/_sources/vectorhub.encoders.text.tfhub.rst.txt deleted file mode 100644 index ff2e5ea2..00000000 --- a/docs/_sources/vectorhub.encoders.text.tfhub.rst.txt +++ /dev/null @@ -1,69 +0,0 @@ -vectorhub.encoders.text.tfhub package -===================================== - -Submodules ----------- - -vectorhub.encoders.text.tfhub.albert module -------------------------------------------- - -.. automodule:: vectorhub.encoders.text.tfhub.albert - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.tfhub.bert module ------------------------------------------ - -.. automodule:: vectorhub.encoders.text.tfhub.bert - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.tfhub.elmo module ------------------------------------------ - -.. automodule:: vectorhub.encoders.text.tfhub.elmo - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.tfhub.labse module ------------------------------------------- - -.. automodule:: vectorhub.encoders.text.tfhub.labse - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.tfhub.use module ----------------------------------------- - -.. automodule:: vectorhub.encoders.text.tfhub.use - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.tfhub.use\_lite module ----------------------------------------------- - -.. automodule:: vectorhub.encoders.text.tfhub.use_lite - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.tfhub.use\_multi module ------------------------------------------------ - -.. automodule:: vectorhub.encoders.text.tfhub.use_multi - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.text.tfhub - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.text.torch_transformers.rst.txt b/docs/_sources/vectorhub.encoders.text.torch_transformers.rst.txt deleted file mode 100644 index 65b05a97..00000000 --- a/docs/_sources/vectorhub.encoders.text.torch_transformers.rst.txt +++ /dev/null @@ -1,37 +0,0 @@ -vectorhub.encoders.text.torch\_transformers package -=================================================== - -Submodules ----------- - -vectorhub.encoders.text.torch\_transformers.legal\_bert module --------------------------------------------------------------- - -.. automodule:: vectorhub.encoders.text.torch_transformers.legal_bert - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.torch\_transformers.torch\_auto\_transformers module ----------------------------------------------------------------------------- - -.. automodule:: vectorhub.encoders.text.torch_transformers.torch_auto_transformers - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.torch\_transformers.torch\_longformers module ---------------------------------------------------------------------- - -.. automodule:: vectorhub.encoders.text.torch_transformers.torch_longformers - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.text.torch_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.text.vectorai.rst.txt b/docs/_sources/vectorhub.encoders.text.vectorai.rst.txt deleted file mode 100644 index c6463a14..00000000 --- a/docs/_sources/vectorhub.encoders.text.vectorai.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.text.vectorai package -======================================== - -Submodules ----------- - -vectorhub.encoders.text.vectorai.vi\_encoder module ---------------------------------------------------- - -.. automodule:: vectorhub.encoders.text.vectorai.vi_encoder - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.text.vectorai - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.encoders.video.rst.txt b/docs/_sources/vectorhub.encoders.video.rst.txt deleted file mode 100644 index a68e2bbc..00000000 --- a/docs/_sources/vectorhub.encoders.video.rst.txt +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.video package -================================ - -Submodules ----------- - -vectorhub.encoders.video.sampler module ---------------------------------------- - -.. automodule:: vectorhub.encoders.video.sampler - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.video - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/vectorhub.rst.txt b/docs/_sources/vectorhub.rst.txt deleted file mode 100644 index 7442ecb6..00000000 --- a/docs/_sources/vectorhub.rst.txt +++ /dev/null @@ -1,78 +0,0 @@ -vectorhub package -================= - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.bi_encoders - vectorhub.encoders - -Submodules ----------- - -vectorhub.auto\_encoder module ------------------------------- - -.. automodule:: vectorhub.auto_encoder - :members: - :undoc-members: - :show-inheritance: - -vectorhub.base module ---------------------- - -.. automodule:: vectorhub.base - :members: - :undoc-members: - :show-inheritance: - -vectorhub.doc\_utils module ---------------------------- - -.. automodule:: vectorhub.doc_utils - :members: - :undoc-members: - :show-inheritance: - -vectorhub.errors module ------------------------ - -.. automodule:: vectorhub.errors - :members: - :undoc-members: - :show-inheritance: - -vectorhub.import\_utils module ------------------------------- - -.. automodule:: vectorhub.import_utils - :members: - :undoc-members: - :show-inheritance: - -vectorhub.models\_dict module ------------------------------ - -.. automodule:: vectorhub.models_dict - :members: - :undoc-members: - :show-inheritance: - -vectorhub.utils module ----------------------- - -.. automodule:: vectorhub.utils - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_static/basic.css b/docs/_static/basic.css deleted file mode 100644 index be19270e..00000000 --- a/docs/_static/basic.css +++ /dev/null @@ -1,856 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -div.section::after { - display: block; - content: ''; - clear: left; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -a.brackets:before, -span.brackets > a:before{ - content: "["; -} - -a.brackets:after, -span.brackets > a:after { - content: "]"; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -img.align-default, .figure.align-default { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-default { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px; - background-color: #ffe; - width: 40%; - float: right; - clear: right; - overflow-x: auto; -} - -p.sidebar-title { - font-weight: bold; -} - -div.admonition, div.topic, blockquote { - clear: left; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- content of sidebars/topics/admonitions -------------------------------- */ - -div.sidebar > :last-child, -div.topic > :last-child, -div.admonition > :last-child { - margin-bottom: 0; -} - -div.sidebar::after, -div.topic::after, -div.admonition::after, -blockquote::after { - display: block; - content: ''; - clear: both; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - margin-top: 10px; - margin-bottom: 10px; - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table.align-default { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -th > :first-child, -td > :first-child { - margin-top: 0px; -} - -th > :last-child, -td > :last-child { - margin-bottom: 0px; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist { - margin: 1em 0; -} - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -:not(li) > ol > li:first-child > :first-child, -:not(li) > ul > li:first-child > :first-child { - margin-top: 0px; -} - -:not(li) > ol > li:last-child > :last-child, -:not(li) > ul > li:last-child > :last-child { - margin-bottom: 0px; -} - -ol.simple ol p, -ol.simple ul p, -ul.simple ol p, -ul.simple ul p { - margin-top: 0; -} - -ol.simple > li:not(:first-child) > p, -ul.simple > li:not(:first-child) > p { - margin-top: 0; -} - -ol.simple p, -ul.simple p { - margin-bottom: 0; -} - -dl.footnote > dt, -dl.citation > dt { - float: left; - margin-right: 0.5em; -} - -dl.footnote > dd, -dl.citation > dd { - margin-bottom: 0em; -} - -dl.footnote > dd:after, -dl.citation > dd:after { - content: ""; - clear: both; -} - -dl.field-list { - display: grid; - grid-template-columns: fit-content(30%) auto; -} - -dl.field-list > dt { - font-weight: bold; - word-break: break-word; - padding-left: 0.5em; - padding-right: 5px; -} - -dl.field-list > dt:after { - content: ":"; -} - -dl.field-list > dd { - padding-left: 0.5em; - margin-top: 0em; - margin-left: 0em; - margin-bottom: 0em; -} - -dl { - margin-bottom: 15px; -} - -dd > :first-child { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dl > dd:last-child, -dl > dd:last-child > :last-child { - margin-bottom: 0; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -.classifier:before { - font-style: normal; - margin: 0.5em; - content: ":"; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -pre, div[class*="highlight-"] { - clear: both; -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -div[class*="highlight-"] { - margin: 1em 0; -} - -td.linenos pre { - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - display: block; -} - -table.highlighttable tbody { - display: block; -} - -table.highlighttable tr { - display: flex; -} - -table.highlighttable td { - margin: 0; - padding: 0; -} - -table.highlighttable td.linenos { - padding-right: 0.5em; -} - -table.highlighttable td.code { - flex: 1; - overflow: hidden; -} - -.highlight .hll { - display: block; -} - -div.highlight pre, -table.highlighttable pre { - margin: 0; -} - -div.code-block-caption + div { - margin-top: 0; -} - -div.code-block-caption { - margin-top: 1em; - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -table.highlighttable td.linenos, -span.linenos, -div.doctest > div.highlight span.gp { /* gp: Generic.Prompt */ - user-select: none; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - margin: 1em 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: absolute; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/docs/_static/css/badge_only.css b/docs/_static/css/badge_only.css deleted file mode 100644 index e380325b..00000000 --- a/docs/_static/css/badge_only.css +++ /dev/null @@ -1 +0,0 @@ -.fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} \ No newline at end of file diff --git a/docs/_static/css/fonts/Roboto-Slab-Bold.woff b/docs/_static/css/fonts/Roboto-Slab-Bold.woff deleted file mode 100644 index 6cb60000..00000000 Binary files a/docs/_static/css/fonts/Roboto-Slab-Bold.woff and /dev/null differ diff --git a/docs/_static/css/fonts/Roboto-Slab-Bold.woff2 b/docs/_static/css/fonts/Roboto-Slab-Bold.woff2 deleted file mode 100644 index 7059e231..00000000 Binary files a/docs/_static/css/fonts/Roboto-Slab-Bold.woff2 and /dev/null differ diff --git a/docs/_static/css/fonts/Roboto-Slab-Regular.woff b/docs/_static/css/fonts/Roboto-Slab-Regular.woff deleted file mode 100644 index f815f63f..00000000 Binary files a/docs/_static/css/fonts/Roboto-Slab-Regular.woff and /dev/null differ diff --git a/docs/_static/css/fonts/Roboto-Slab-Regular.woff2 b/docs/_static/css/fonts/Roboto-Slab-Regular.woff2 deleted file mode 100644 index f2c76e5b..00000000 Binary files a/docs/_static/css/fonts/Roboto-Slab-Regular.woff2 and /dev/null differ diff --git a/docs/_static/css/fonts/fontawesome-webfont.eot b/docs/_static/css/fonts/fontawesome-webfont.eot deleted file mode 100644 index e9f60ca9..00000000 Binary files a/docs/_static/css/fonts/fontawesome-webfont.eot and /dev/null differ diff --git a/docs/_static/css/fonts/fontawesome-webfont.svg b/docs/_static/css/fonts/fontawesome-webfont.svg deleted file mode 100644 index 855c845e..00000000 --- a/docs/_static/css/fonts/fontawesome-webfont.svg +++ /dev/null @@ -1,2671 +0,0 @@ - - - - -Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 - By ,,, -Copyright Dave Gandy 2016. All rights reserved. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/_static/css/fonts/fontawesome-webfont.ttf b/docs/_static/css/fonts/fontawesome-webfont.ttf deleted file mode 100644 index 35acda2f..00000000 Binary files a/docs/_static/css/fonts/fontawesome-webfont.ttf and /dev/null differ diff --git a/docs/_static/css/fonts/fontawesome-webfont.woff b/docs/_static/css/fonts/fontawesome-webfont.woff deleted file mode 100644 index 400014a4..00000000 Binary files a/docs/_static/css/fonts/fontawesome-webfont.woff and /dev/null differ diff --git a/docs/_static/css/fonts/fontawesome-webfont.woff2 b/docs/_static/css/fonts/fontawesome-webfont.woff2 deleted file mode 100644 index 4d13fc60..00000000 Binary files a/docs/_static/css/fonts/fontawesome-webfont.woff2 and /dev/null differ diff --git a/docs/_static/css/fonts/lato-bold-italic.woff b/docs/_static/css/fonts/lato-bold-italic.woff deleted file mode 100644 index 88ad05b9..00000000 Binary files a/docs/_static/css/fonts/lato-bold-italic.woff and /dev/null differ diff --git a/docs/_static/css/fonts/lato-bold-italic.woff2 b/docs/_static/css/fonts/lato-bold-italic.woff2 deleted file mode 100644 index c4e3d804..00000000 Binary files a/docs/_static/css/fonts/lato-bold-italic.woff2 and /dev/null differ diff --git a/docs/_static/css/fonts/lato-bold.woff b/docs/_static/css/fonts/lato-bold.woff deleted file mode 100644 index c6dff51f..00000000 Binary files a/docs/_static/css/fonts/lato-bold.woff and /dev/null differ diff --git a/docs/_static/css/fonts/lato-bold.woff2 b/docs/_static/css/fonts/lato-bold.woff2 deleted file mode 100644 index bb195043..00000000 Binary files a/docs/_static/css/fonts/lato-bold.woff2 and /dev/null differ diff --git a/docs/_static/css/fonts/lato-normal-italic.woff b/docs/_static/css/fonts/lato-normal-italic.woff deleted file mode 100644 index 76114bc0..00000000 Binary files a/docs/_static/css/fonts/lato-normal-italic.woff and /dev/null differ diff --git a/docs/_static/css/fonts/lato-normal-italic.woff2 b/docs/_static/css/fonts/lato-normal-italic.woff2 deleted file mode 100644 index 3404f37e..00000000 Binary files a/docs/_static/css/fonts/lato-normal-italic.woff2 and /dev/null differ diff --git a/docs/_static/css/fonts/lato-normal.woff b/docs/_static/css/fonts/lato-normal.woff deleted file mode 100644 index ae1307ff..00000000 Binary files a/docs/_static/css/fonts/lato-normal.woff and /dev/null differ diff --git a/docs/_static/css/fonts/lato-normal.woff2 b/docs/_static/css/fonts/lato-normal.woff2 deleted file mode 100644 index 3bf98433..00000000 Binary files a/docs/_static/css/fonts/lato-normal.woff2 and /dev/null differ diff --git a/docs/_static/css/theme.css b/docs/_static/css/theme.css deleted file mode 100644 index 8cd4f101..00000000 --- a/docs/_static/css/theme.css +++ /dev/null @@ -1,4 +0,0 @@ -html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li span.toctree-expand:before,.wy-nav-top a,.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! - * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome - * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) - */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li span.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p.caption .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a span.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-left.toctree-expand,.wy-menu-vertical li span.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p.caption .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a span.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-right.toctree-expand,.wy-menu-vertical li span.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p.caption .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a span.pull-left.toctree-expand,.wy-menu-vertical li.on a span.pull-left.toctree-expand,.wy-menu-vertical li span.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p.caption .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a span.pull-right.toctree-expand,.wy-menu-vertical li.on a span.pull-right.toctree-expand,.wy-menu-vertical li span.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li span.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li span.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li span.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li a span.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li span.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p.caption .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a span.toctree-expand,.btn .wy-menu-vertical li.on a span.toctree-expand,.btn .wy-menu-vertical li span.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p.caption .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a span.toctree-expand,.nav .wy-menu-vertical li.on a span.toctree-expand,.nav .wy-menu-vertical li span.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p.caption .btn .headerlink,.rst-content p.caption .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn span.toctree-expand,.wy-menu-vertical li.current>a .btn span.toctree-expand,.wy-menu-vertical li.current>a .nav span.toctree-expand,.wy-menu-vertical li .nav span.toctree-expand,.wy-menu-vertical li.on a .btn span.toctree-expand,.wy-menu-vertical li.on a .nav span.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p.caption .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li span.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p.caption .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li span.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p.caption .btn .fa-large.headerlink,.rst-content p.caption .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn span.fa-large.toctree-expand,.wy-menu-vertical li .nav span.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p.caption .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li span.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p.caption .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li span.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p.caption .btn .fa-spin.headerlink,.rst-content p.caption .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn span.fa-spin.toctree-expand,.wy-menu-vertical li .nav span.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p.caption .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li span.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p.caption .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li span.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p.caption .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li span.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p.caption .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini span.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol li,.rst-content ol.arabic li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content ol.arabic li p:last-child,.rst-content ol.arabic li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol li ul li,.rst-content ol.arabic li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.rst-content .wy-breadcrumbs li tt,.wy-breadcrumbs li .rst-content tt,.wy-breadcrumbs li code{padding:5px;border:none;background:none}.rst-content .wy-breadcrumbs li tt.literal,.wy-breadcrumbs li .rst-content tt.literal,.wy-breadcrumbs li code.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:.8em;line-height:1.6em;color:#4d4d4d}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover span.toctree-expand,.wy-menu-vertical li.on a:hover span.toctree-expand{color:grey}.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand{display:block;font-size:.8em;line-height:1.6em;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover span.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 span.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 span.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover span.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active span.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p.caption .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p.caption .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content img{max-width:100%;height:auto}.rst-content div.figure{margin-bottom:24px}.rst-content div.figure p.caption{font-style:italic}.rst-content div.figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp{user-select:none;pointer-events:none}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink{visibility:hidden;font-size:14px}.rst-content .code-block-caption .headerlink:after,.rst-content .toctree-wrapper>p.caption .headerlink:after,.rst-content dl dt .headerlink:after,.rst-content h1 .headerlink:after,.rst-content h2 .headerlink:after,.rst-content h3 .headerlink:after,.rst-content h4 .headerlink:after,.rst-content h5 .headerlink:after,.rst-content h6 .headerlink:after,.rst-content p.caption .headerlink:after,.rst-content table>caption .headerlink:after{content:"\f0c1";font-family:FontAwesome}.rst-content .code-block-caption:hover .headerlink:after,.rst-content .toctree-wrapper>p.caption:hover .headerlink:after,.rst-content dl dt:hover .headerlink:after,.rst-content h1:hover .headerlink:after,.rst-content h2:hover .headerlink:after,.rst-content h3:hover .headerlink:after,.rst-content h4:hover .headerlink:after,.rst-content h5:hover .headerlink:after,.rst-content h6:hover .headerlink:after,.rst-content p.caption:hover .headerlink:after,.rst-content table>caption:hover .headerlink:after{visibility:visible}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .hlist{width:100%}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl dt span.classifier:before{content:" : "}html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.field-list>dt:after,html.writer-html5 .rst-content dl.footnote>dt:after{content:":"}html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.footnote>dt>span.brackets{margin-right:.5rem}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{font-style:italic}html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.footnote>dd p,html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{font-size:inherit;line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code,html.writer-html4 .rst-content dl:not(.docutils) tt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #7fbbe3;background:#e7f2fa;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} \ No newline at end of file diff --git a/docs/_static/doctools.js b/docs/_static/doctools.js deleted file mode 100644 index 61ac9d26..00000000 --- a/docs/_static/doctools.js +++ /dev/null @@ -1,321 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - * - * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL - */ -jQuery.urldecode = function(x) { - if (!x) { - return x - } - return decodeURIComponent(x.replace(/\+/g, ' ')); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('

') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keydown(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box, textarea, dropdown or button - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT' - && activeElementType !== 'BUTTON' && !event.altKey && !event.ctrlKey && !event.metaKey - && !event.shiftKey) { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/docs/_static/documentation_options.js b/docs/_static/documentation_options.js deleted file mode 100644 index 8839ac8c..00000000 --- a/docs/_static/documentation_options.js +++ /dev/null @@ -1,12 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '0.1', - LANGUAGE: 'None', - COLLAPSE_INDEX: false, - BUILDER: 'html', - FILE_SUFFIX: '.html', - LINK_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false -}; \ No newline at end of file diff --git a/docs/_static/file.png b/docs/_static/file.png deleted file mode 100644 index a858a410..00000000 Binary files a/docs/_static/file.png and /dev/null differ diff --git a/docs/_static/jquery-3.5.1.js b/docs/_static/jquery-3.5.1.js deleted file mode 100644 index 50937333..00000000 --- a/docs/_static/jquery-3.5.1.js +++ /dev/null @@ -1,10872 +0,0 @@ -/*! - * jQuery JavaScript Library v3.5.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2020-05-04T22:49Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var flat = arr.flat ? function( array ) { - return arr.flat.call( array ); -} : function( array ) { - return arr.concat.apply( [], array ); -}; - - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - -var isFunction = function isFunction( obj ) { - - // Support: Chrome <=57, Firefox <=52 - // In some browsers, typeof returns "function" for HTML elements - // (i.e., `typeof document.createElement( "object" ) === "function"`). - // We don't want to classify *any* DOM node as a function. - return typeof obj === "function" && typeof obj.nodeType !== "number"; - }; - - -var isWindow = function isWindow( obj ) { - return obj != null && obj === obj.window; - }; - - -var document = window.document; - - - - var preservedScriptAttributes = { - type: true, - src: true, - nonce: true, - noModule: true - }; - - function DOMEval( code, node, doc ) { - doc = doc || document; - - var i, val, - script = doc.createElement( "script" ); - - script.text = code; - if ( node ) { - for ( i in preservedScriptAttributes ) { - - // Support: Firefox 64+, Edge 18+ - // Some browsers don't support the "nonce" property on scripts. - // On the other hand, just using `getAttribute` is not enough as - // the `nonce` attribute is reset to an empty string whenever it - // becomes browsing-context connected. - // See https://github.com/whatwg/html/issues/2369 - // See https://html.spec.whatwg.org/#nonce-attributes - // The `node.getAttribute` check was added for the sake of - // `jQuery.globalEval` so that it can fake a nonce-containing node - // via an object. - val = node[ i ] || node.getAttribute && node.getAttribute( i ); - if ( val ) { - script.setAttribute( i, val ); - } - } - } - doc.head.appendChild( script ).parentNode.removeChild( script ); - } - - -function toType( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; -} -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.5.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - even: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return ( i + 1 ) % 2; - } ) ); - }, - - odd: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return i % 2; - } ) ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - copy = options[ name ]; - - // Prevent Object.prototype pollution - // Prevent never-ending loop - if ( name === "__proto__" || target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - src = target[ name ]; - - // Ensure proper type for the source value - if ( copyIsArray && !Array.isArray( src ) ) { - clone = []; - } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { - clone = {}; - } else { - clone = src; - } - copyIsArray = false; - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - // Evaluates a script in a provided context; falls back to the global one - // if not specified. - globalEval: function( code, options, doc ) { - DOMEval( code, { nonce: options && options.nonce }, doc ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return flat( ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( _i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = toType( obj ); - - if ( isFunction( obj ) || isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.5 - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://js.foundation/ - * - * Date: 2020-03-14 - */ -( function( window ) { -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - nonnativeSelectorCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ( {} ).hasOwnProperty, - arr = [], - pop = arr.pop, - pushNative = arr.push, - push = arr.push, - slice = arr.slice, - - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[ i ] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + - "ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram - identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + - "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - - // "Attribute values must be CSS identifiers [capture 5] - // or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + - whitespace + "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + - whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + - "*" ), - rdescend = new RegExp( whitespace + "|>" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + - whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + - whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + - "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + - "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rhtml = /HTML$/i, - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), - funescape = function( escape, nonHex ) { - var high = "0x" + escape.slice( 1 ) - 0x10000; - - return nonHex ? - - // Strip the backslash prefix from a non-hex escape sequence - nonHex : - - // Replace a hexadecimal escape sequence with the encoded Unicode code point - // Support: IE <=11+ - // For values outside the Basic Multilingual Plane (BMP), manually construct a - // surrogate pair - high < 0 ? - String.fromCharCode( high + 0x10000 ) : - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + - ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - inDisabledFieldset = addCombinator( - function( elem ) { - return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - ( arr = slice.call( preferredDoc.childNodes ) ), - preferredDoc.childNodes - ); - - // Support: Android<4.0 - // Detect silently failing push.apply - // eslint-disable-next-line no-unused-expressions - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - pushNative.apply( target, slice.call( els ) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - - // Can't trust NodeList.length - while ( ( target[ j++ ] = els[ i++ ] ) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - setDocument( context ); - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { - - // ID selector - if ( ( m = match[ 1 ] ) ) { - - // Document context - if ( nodeType === 9 ) { - if ( ( elem = context.getElementById( m ) ) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && ( elem = newContext.getElementById( m ) ) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[ 2 ] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !nonnativeSelectorCache[ selector + " " ] && - ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && - - // Support: IE 8 only - // Exclude object elements - ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { - - newSelector = selector; - newContext = context; - - // qSA considers elements outside a scoping root when evaluating child or - // descendant combinators, which is not what we want. - // In such cases, we work around the behavior by prefixing every selector in the - // list with an ID selector referencing the scope context. - // The technique has to be used as well when a leading combinator is used - // as such selectors are not recognized by querySelectorAll. - // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && - ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - - // We can use :scope instead of the ID hack if the browser - // supports it & if we're not changing the context. - if ( newContext !== context || !support.scope ) { - - // Capture the context ID, setting it first if necessary - if ( ( nid = context.getAttribute( "id" ) ) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", ( nid = expando ) ); - } - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + - toSelector( groups[ i ] ); - } - newSelector = groups.join( "," ); - } - - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - nonnativeSelectorCache( selector, true ); - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return ( cache[ key + " " ] = value ); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement( "fieldset" ); - - try { - return !!fn( el ); - } catch ( e ) { - return false; - } finally { - - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split( "|" ), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[ i ] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( ( cur = cur.nextSibling ) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return ( name === "input" || name === "button" ) && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction( function( argument ) { - argument = +argument; - return markFunction( function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ ( j = matchIndexes[ i ] ) ] ) { - seed[ j ] = !( matches[ j ] = seed[ j ] ); - } - } - } ); - } ); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - var namespace = elem.namespaceURI, - docElem = ( elem.ownerDocument || elem ).documentElement; - - // Support: IE <=8 - // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes - // https://bugs.jquery.com/ticket/4833 - return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9 - 11+, Edge 12 - 18+ - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( preferredDoc != document && - ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, - // Safari 4 - 5 only, Opera <=11.6 - 12.x only - // IE/Edge & older browsers don't support the :scope pseudo-class. - // Support: Safari 6.0 only - // Safari 6.0 supports :scope but it's an alias of :root there. - support.scope = assert( function( el ) { - docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); - return typeof el.querySelectorAll !== "undefined" && - !el.querySelectorAll( ":scope fieldset div" ).length; - } ); - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert( function( el ) { - el.className = "i"; - return !el.getAttribute( "className" ); - } ); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert( function( el ) { - el.appendChild( document.createComment( "" ) ); - return !el.getElementsByTagName( "*" ).length; - } ); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert( function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - } ); - - // ID filter and find - if ( support.getById ) { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute( "id" ) === attrId; - }; - }; - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode( "id" ); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( ( elem = elems[ i++ ] ) ) { - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find[ "TAG" ] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { - - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert( function( el ) { - - var input; - - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll( "[selected]" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push( "~=" ); - } - - // Support: IE 11+, Edge 15 - 18+ - // IE 11/Edge don't find elements on a `[name='']` query in some cases. - // Adding a temporary attribute to the document before the selection works - // around the issue. - // Interestingly, IE 10 & older don't seem to have the issue. - input = document.createElement( "input" ); - input.setAttribute( "name", "" ); - el.appendChild( input ); - if ( !el.querySelectorAll( "[name='']" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + - whitespace + "*(?:''|\"\")" ); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll( ":checked" ).length ) { - rbuggyQSA.push( ":checked" ); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push( ".#.+[+~]" ); - } - - // Support: Firefox <=3.6 - 5 only - // Old Firefox doesn't throw on a badly-escaped identifier. - el.querySelectorAll( "\\\f" ); - rbuggyQSA.push( "[\\r\\n\\f]" ); - } ); - - assert( function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement( "input" ); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll( "[name=d]" ).length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: Opera 10 - 11 only - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll( "*,:x" ); - rbuggyQSA.push( ",.*:" ); - } ); - } - - if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector ) ) ) ) { - - assert( function( el ) { - - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - } ); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - ) ); - } : - function( a, b ) { - if ( b ) { - while ( ( b = b.parentNode ) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { - - // Choose the first element that is related to our preferred document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( a == document || a.ownerDocument == preferredDoc && - contains( preferredDoc, a ) ) { - return -1; - } - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( b == document || b.ownerDocument == preferredDoc && - contains( preferredDoc, b ) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - return a == document ? -1 : - b == document ? 1 : - /* eslint-enable eqeqeq */ - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( ( cur = cur.parentNode ) ) { - ap.unshift( cur ); - } - cur = b; - while ( ( cur = cur.parentNode ) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[ i ] === bp[ i ] ) { - i++; - } - - return i ? - - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[ i ], bp[ i ] ) : - - // Otherwise nodes in our document sort first - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - ap[ i ] == preferredDoc ? -1 : - bp[ i ] == preferredDoc ? 1 : - /* eslint-enable eqeqeq */ - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - setDocument( elem ); - - if ( support.matchesSelector && documentIsHTML && - !nonnativeSelectorCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch ( e ) { - nonnativeSelectorCache( expr, true ); - } - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( context.ownerDocument || context ) != document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( elem.ownerDocument || elem ) != document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return ( sel + "" ).replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - - // If no nodeType, this is expected to be an array - while ( ( node = elem[ i++ ] ) ) { - - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[ 1 ] = match[ 1 ].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[ 3 ] = ( match[ 3 ] || match[ 4 ] || - match[ 5 ] || "" ).replace( runescape, funescape ); - - if ( match[ 2 ] === "~=" ) { - match[ 3 ] = " " + match[ 3 ] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[ 1 ] = match[ 1 ].toLowerCase(); - - if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { - - // nth-* requires argument - if ( !match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[ 4 ] = +( match[ 4 ] ? - match[ 5 ] + ( match[ 6 ] || 1 ) : - 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); - match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); - - // other types prohibit arguments - } else if ( match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[ 6 ] && match[ 2 ]; - - if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[ 3 ] ) { - match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - - // Get excess from tokenize (recursively) - ( excess = tokenize( unquoted, true ) ) && - - // advance to the next closing parenthesis - ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { - - // excess is a negative index - match[ 0 ] = match[ 0 ].slice( 0, excess ); - match[ 2 ] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { - return true; - } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - ( pattern = new RegExp( "(^|" + whitespace + - ")" + className + "(" + whitespace + "|$)" ) ) && classCache( - className, function( elem ) { - return pattern.test( - typeof elem.className === "string" && elem.className || - typeof elem.getAttribute !== "undefined" && - elem.getAttribute( "class" ) || - "" - ); - } ); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - /* eslint-disable max-len */ - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - /* eslint-enable max-len */ - - }; - }, - - "CHILD": function( type, what, _argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, _context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( ( node = node[ dir ] ) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( ( node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - - // Use previously-cached element index if available - if ( useCache ) { - - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - - // Use the same loop as above to seek `elem` from the start - while ( ( node = ++nodeIndex && node && node[ dir ] || - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || - ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction( function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[ i ] ); - seed[ idx ] = !( matches[ idx ] = matched[ i ] ); - } - } ) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - - // Potentially complex pseudos - "not": markFunction( function( selector ) { - - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction( function( seed, matches, _context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( ( elem = unmatched[ i ] ) ) { - seed[ i ] = !( matches[ i ] = elem ); - } - } - } ) : - function( elem, _context, xml ) { - input[ 0 ] = elem; - matcher( input, null, xml, results ); - - // Don't keep the element (issue #299) - input[ 0 ] = null; - return !results.pop(); - }; - } ), - - "has": markFunction( function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - } ), - - "contains": markFunction( function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; - }; - } ), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - - // lang value must be a valid identifier - if ( !ridentifier.test( lang || "" ) ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( ( elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); - return false; - }; - } ), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && - ( !document.hasFocus || document.hasFocus() ) && - !!( elem.type || elem.href || ~elem.tabIndex ); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return ( nodeName === "input" && !!elem.checked ) || - ( nodeName === "option" && !!elem.selected ); - }, - - "selected": function( elem ) { - - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - // eslint-disable-next-line no-unused-expressions - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos[ "empty" ]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( ( attr = elem.getAttribute( "type" ) ) == null || - attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo( function() { - return [ 0 ]; - } ), - - "last": createPositionalPseudo( function( _matchIndexes, length ) { - return [ length - 1 ]; - } ), - - "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - } ), - - "even": createPositionalPseudo( function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "odd": createPositionalPseudo( function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? - argument + length : - argument > length ? - length : - argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ) - } -}; - -Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || ( match = rcomma.exec( soFar ) ) ) { - if ( match ) { - - // Don't consume trailing commas as valid - soFar = soFar.slice( match[ 0 ].length ) || soFar; - } - groups.push( ( tokens = [] ) ); - } - - matched = false; - - // Combinators - if ( ( match = rcombinators.exec( soFar ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - - // Cast descendant combinators to space - type: match[ 0 ].replace( rtrim, " " ) - } ); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || - ( match = preFilters[ type ]( match ) ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - type: type, - matches: match - } ); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[ i ].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || ( elem[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || - ( outerCache[ elem.uniqueID ] = {} ); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( ( oldCache = uniqueCache[ key ] ) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return ( newCache[ 2 ] = oldCache[ 2 ] ); - } else { - - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[ i ]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[ 0 ]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[ i ], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( ( elem = unmatched[ i ] ) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction( function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( - selector || "*", - context.nodeType ? [ context ] : context, - [] - ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( ( elem = temp[ i ] ) ) { - matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) ) { - - // Restore matcherIn since elem is not yet a final match - temp.push( ( matcherIn[ i ] = elem ) ); - } - } - postFinder( null, ( matcherOut = [] ), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) && - ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { - - seed[ temp ] = !( results[ temp ] = elem ); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - } ); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[ 0 ].type ], - implicitRelative = leadingRelative || Expr.relative[ " " ], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - ( checkContext = context ).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { - matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; - } else { - matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[ j ].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens - .slice( 0, i - 1 ) - .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), - - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), - len = elems.length; - - if ( outermost ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - outermostContext = context == document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( !context && elem.ownerDocument != document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( ( matcher = elementMatchers[ j++ ] ) ) { - if ( matcher( elem, context || document, xml ) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - - // They will have gone through all possible matchers - if ( ( elem = !matcher && elem ) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( ( matcher = setMatchers[ j++ ] ) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !( unmatched[ i ] || setMatched[ i ] ) ) { - setMatched[ i ] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[ i ] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( - selector, - matcherFromGroupMatchers( elementMatchers, setMatchers ) - ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( ( selector = compiled.selector || selector ) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[ 0 ] = match[ 0 ].slice( 0 ); - if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { - - context = ( Expr.find[ "ID" ]( token.matches[ 0 ] - .replace( runescape, funescape ), context ) || [] )[ 0 ]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[ i ]; - - // Abort if we hit a combinator - if ( Expr.relative[ ( type = token.type ) ] ) { - break; - } - if ( ( find = Expr.find[ type ] ) ) { - - // Search, expanding context for leading sibling combinators - if ( ( seed = find( - token.matches[ 0 ].replace( runescape, funescape ), - rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || - context - ) ) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert( function( el ) { - - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; -} ); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert( function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute( "href" ) === "#"; -} ) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - } ); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert( function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -} ) ) { - addHandle( "value", function( elem, _name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - } ); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert( function( el ) { - return el.getAttribute( "disabled" ) == null; -} ) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; - } - } ); -} - -return Sizzle; - -} )( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Filtered directly for both simple and complex selectors - return jQuery.filter( qualifier, elements, not ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, _i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, _i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, _i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( elem.contentDocument != null && - - // Support: IE 11+ - // elements with no `data` attribute has an object - // `contentDocument` with a `null` prototype. - getProto( elem.contentDocument ) ) { - - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && toType( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( _i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // rejected_handlers.disable - // fulfilled_handlers.disable - tuples[ 3 - i ][ 3 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock, - - // progress_handlers.lock - tuples[ 0 ][ 3 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( toType( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, _key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; - - -// Matches dashed string for camelizing -var rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g; - -// Used by camelCase as callback to replace() -function fcamelCase( _all, letter ) { - return letter.toUpperCase(); -} - -// Convert dashed to camelCase; used by the css and data modules -// Support: IE <=9 - 11, Edge 12 - 15 -// Microsoft forgot to hump their vendor prefix (#9572) -function camelCase( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); -} -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( camelCase ); - } else { - key = camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var documentElement = document.documentElement; - - - - var isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ); - }, - composed = { composed: true }; - - // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only - // Check attachment across shadow DOM boundaries when possible (gh-3504) - // Support: iOS 10.0-10.2 only - // Early iOS 10 versions support `attachShadow` but not `getRootNode`, - // leading to errors. We need to check for `getRootNode`. - if ( documentElement.getRootNode ) { - isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ) || - elem.getRootNode( composed ) === elem.ownerDocument; - }; - } -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - isAttached( elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, scale, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = elem.nodeType && - ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Support: Firefox <=54 - // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) - initial = initial / 2; - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - while ( maxIterations-- ) { - - // Evaluate and update our best guess (doubling guesses that zero out). - // Finish if the scale equals or crosses 1 (making the old*new product non-positive). - jQuery.style( elem, prop, initialInUnit + unit ); - if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { - maxIterations = 0; - } - initialInUnit = initialInUnit / scale; - - } - - initialInUnit = initialInUnit * 2; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); - -var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); - - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; - - // Support: IE <=9 only - // IE <=9 replaces "; - support.option = !!div.lastChild; -} )(); - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -// Support: IE <=9 only -if ( !support.option ) { - wrapMap.optgroup = wrapMap.option = [ 1, "" ]; -} - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, attached, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( toType( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - attached = isAttached( elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( attached ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 - 11+ -// focus() and blur() are asynchronous, except when they are no-op. -// So expect focus to be synchronous when the element is already active, -// and blur to be synchronous when the element is not already active. -// (focus and blur are always synchronous in other supported browsers, -// this just defines when we can count on it). -function expectSync( elem, type ) { - return ( elem === safeActiveElement() ) === ( type === "focus" ); -} - -// Support: IE <=9 only -// Accessing document.activeElement can throw unexpectedly -// https://bugs.jquery.com/ticket/13393 -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Only attach events to objects that accept data - if ( !acceptData( elem ) ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = Object.create( null ); - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( nativeEvent ), - - handlers = ( - dataPriv.get( this, "events" ) || Object.create( null ) - )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // If the event is namespaced, then each handler is only invoked if it is - // specially universal or its namespaces are a superset of the event's. - if ( !event.rnamespace || handleObj.namespace === false || - event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - - // Utilize native event to ensure correct state for checkable inputs - setup: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Claim the first handler - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - // dataPriv.set( el, "click", ... ) - leverageNative( el, "click", returnTrue ); - } - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Force setup before triggering a click - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - leverageNative( el, "click" ); - } - - // Return non-false to allow normal event-path propagation - return true; - }, - - // For cross-browser consistency, suppress native .click() on links - // Also prevent it if we're currently inside a leveraged native-event stack - _default: function( event ) { - var target = event.target; - return rcheckableType.test( target.type ) && - target.click && nodeName( target, "input" ) && - dataPriv.get( target, "click" ) || - nodeName( target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -// Ensure the presence of an event listener that handles manually-triggered -// synthetic events by interrupting progress until reinvoked in response to -// *native* events that it fires directly, ensuring that state changes have -// already occurred before other listeners are invoked. -function leverageNative( el, type, expectSync ) { - - // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add - if ( !expectSync ) { - if ( dataPriv.get( el, type ) === undefined ) { - jQuery.event.add( el, type, returnTrue ); - } - return; - } - - // Register the controller as a special universal handler for all event namespaces - dataPriv.set( el, type, false ); - jQuery.event.add( el, type, { - namespace: false, - handler: function( event ) { - var notAsync, result, - saved = dataPriv.get( this, type ); - - if ( ( event.isTrigger & 1 ) && this[ type ] ) { - - // Interrupt processing of the outer synthetic .trigger()ed event - // Saved data should be false in such cases, but might be a leftover capture object - // from an async native handler (gh-4350) - if ( !saved.length ) { - - // Store arguments for use when handling the inner native event - // There will always be at least one argument (an event object), so this array - // will not be confused with a leftover capture object. - saved = slice.call( arguments ); - dataPriv.set( this, type, saved ); - - // Trigger the native event and capture its result - // Support: IE <=9 - 11+ - // focus() and blur() are asynchronous - notAsync = expectSync( this, type ); - this[ type ](); - result = dataPriv.get( this, type ); - if ( saved !== result || notAsync ) { - dataPriv.set( this, type, false ); - } else { - result = {}; - } - if ( saved !== result ) { - - // Cancel the outer synthetic event - event.stopImmediatePropagation(); - event.preventDefault(); - return result.value; - } - - // If this is an inner synthetic event for an event with a bubbling surrogate - // (focus or blur), assume that the surrogate already propagated from triggering the - // native event and prevent that from happening again here. - // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the - // bubbling surrogate propagates *after* the non-bubbling base), but that seems - // less bad than duplication. - } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { - event.stopPropagation(); - } - - // If this is a native event triggered above, everything is now in order - // Fire an inner synthetic event with the original arguments - } else if ( saved.length ) { - - // ...and capture the result - dataPriv.set( this, type, { - value: jQuery.event.trigger( - - // Support: IE <=9 - 11+ - // Extend with the prototype to reset the above stopImmediatePropagation() - jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), - saved.slice( 1 ), - this - ) - } ); - - // Abort handling of the native event - event.stopImmediatePropagation(); - } - } - } ); -} - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || Date.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - code: true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { - jQuery.event.special[ type ] = { - - // Utilize native event if possible so blur/focus sequence is correct - setup: function() { - - // Claim the first handler - // dataPriv.set( this, "focus", ... ) - // dataPriv.set( this, "blur", ... ) - leverageNative( this, type, expectSync ); - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function() { - - // Force setup before trigger - leverageNative( this, type ); - - // Return non-false to allow normal event-path propagation - return true; - }, - - delegateType: delegateType - }; -} ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - // Support: IE <=10 - 11, Edge 12 - 13 only - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( elem ).children( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { - elem.type = elem.type.slice( 5 ); - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.get( src ); - events = pdataOld.events; - - if ( events ) { - dataPriv.remove( dest, "handle events" ); - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = flat( args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - valueIsFunction = isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( valueIsFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( valueIsFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl && !node.noModule ) { - jQuery._evalUrl( node.src, { - nonce: node.nonce || node.getAttribute( "nonce" ) - }, doc ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && isAttached( node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html; - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = isAttached( elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - -var swap = function( elem, options, callback ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.call( elem ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - -var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - container.style.cssText = "position:absolute;left:-11111px;width:60px;" + - "margin-top:1px;padding:0;border:0"; - div.style.cssText = - "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + - "margin:auto;border:1px;padding:1px;" + - "width:60%;top:1%"; - documentElement.appendChild( container ).appendChild( div ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; - - // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 - // Some styles come back with percentage values, even though they shouldn't - div.style.right = "60%"; - pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; - - // Support: IE 9 - 11 only - // Detect misreporting of content dimensions for box-sizing:border-box elements - boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; - - // Support: IE 9 only - // Detect overflow:scroll screwiness (gh-3699) - // Support: Chrome <=64 - // Don't get tricked when zoom affects offsetWidth (gh-4029) - div.style.position = "absolute"; - scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - function roundPixelMeasures( measure ) { - return Math.round( parseFloat( measure ) ); - } - - var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableTrDimensionsVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - jQuery.extend( support, { - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelBoxStyles: function() { - computeStyleTests(); - return pixelBoxStylesVal; - }, - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - }, - scrollboxSize: function() { - computeStyleTests(); - return scrollboxSizeVal; - }, - - // Support: IE 9 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Behavior in IE 9 is more subtle than in newer versions & it passes - // some versions of this test; make sure not to make it pass there! - reliableTrDimensions: function() { - var table, tr, trChild, trStyle; - if ( reliableTrDimensionsVal == null ) { - table = document.createElement( "table" ); - tr = document.createElement( "tr" ); - trChild = document.createElement( "div" ); - - table.style.cssText = "position:absolute;left:-11111px"; - tr.style.height = "1px"; - trChild.style.height = "9px"; - - documentElement - .appendChild( table ) - .appendChild( tr ) - .appendChild( trChild ); - - trStyle = window.getComputedStyle( tr ); - reliableTrDimensionsVal = parseInt( trStyle.height ) > 3; - - documentElement.removeChild( table ); - } - return reliableTrDimensionsVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !isAttached( elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style, - vendorProps = {}; - -// Return a vendor-prefixed property or undefined -function vendorPropName( name ) { - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a potentially-mapped jQuery.cssProps or vendor prefixed property -function finalPropName( name ) { - var final = jQuery.cssProps[ name ] || vendorProps[ name ]; - - if ( final ) { - return final; - } - if ( name in emptyStyle ) { - return name; - } - return vendorProps[ name ] = vendorPropName( name ) || name; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }; - -function setPositiveNumber( _elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { - var i = dimension === "width" ? 1 : 0, - extra = 0, - delta = 0; - - // Adjustment may not be necessary - if ( box === ( isBorderBox ? "border" : "content" ) ) { - return 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin - if ( box === "margin" ) { - delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); - } - - // If we get here with a content-box, we're seeking "padding" or "border" or "margin" - if ( !isBorderBox ) { - - // Add padding - delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // For "border" or "margin", add border - if ( box !== "padding" ) { - delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - - // But still keep track of it otherwise - } else { - extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - - // If we get here with a border-box (content + padding + border), we're seeking "content" or - // "padding" or "margin" - } else { - - // For "content", subtract padding - if ( box === "content" ) { - delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // For "content" or "padding", subtract border - if ( box !== "margin" ) { - delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - // Account for positive content-box scroll gutter when requested by providing computedVal - if ( !isBorderBox && computedVal >= 0 ) { - - // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border - // Assuming integer scroll gutter, subtract the rest and round down - delta += Math.max( 0, Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - computedVal - - delta - - extra - - 0.5 - - // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter - // Use an explicit zero to avoid NaN (gh-3964) - ) ) || 0; - } - - return delta; -} - -function getWidthOrHeight( elem, dimension, extra ) { - - // Start with computed style - var styles = getStyles( elem ), - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). - // Fake content-box until we know it's needed to know the true value. - boxSizingNeeded = !support.boxSizingReliable() || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - valueIsBorderBox = isBorderBox, - - val = curCSS( elem, dimension, styles ), - offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); - - // Support: Firefox <=54 - // Return a confounding non-pixel value or feign ignorance, as appropriate. - if ( rnumnonpx.test( val ) ) { - if ( !extra ) { - return val; - } - val = "auto"; - } - - - // Support: IE 9 - 11 only - // Use offsetWidth/offsetHeight for when box sizing is unreliable. - // In those cases, the computed value can be trusted to be border-box. - if ( ( !support.boxSizingReliable() && isBorderBox || - - // Support: IE 10 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Interestingly, in some cases IE 9 doesn't suffer from this issue. - !support.reliableTrDimensions() && nodeName( elem, "tr" ) || - - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - val === "auto" || - - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && - - // Make sure the element is visible & connected - elem.getClientRects().length ) { - - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Where available, offsetWidth/offsetHeight approximate border box dimensions. - // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the - // retrieved value as a content box dimension. - valueIsBorderBox = offsetProp in elem; - if ( valueIsBorderBox ) { - val = elem[ offsetProp ]; - } - } - - // Normalize "" and auto - val = parseFloat( val ) || 0; - - // Adjust for the element's box model - return ( val + - boxModelAdjustment( - elem, - dimension, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles, - - // Provide the current computed size to request scroll gutter calculation (gh-3589) - val - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "gridArea": true, - "gridColumn": true, - "gridColumnEnd": true, - "gridColumnStart": true, - "gridRow": true, - "gridRowEnd": true, - "gridRowStart": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: {}, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append - // "px" to a few hardcoded values. - if ( type === "number" && !isCustomProp ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( _i, dimension ) { - jQuery.cssHooks[ dimension ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, dimension, extra ); - } ) : - getWidthOrHeight( elem, dimension, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = getStyles( elem ), - - // Only read styles.position if the test has a chance to fail - // to avoid forcing a reflow. - scrollboxSizeBuggy = !support.scrollboxSize() && - styles.position === "absolute", - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) - boxSizingNeeded = scrollboxSizeBuggy || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - subtract = extra ? - boxModelAdjustment( - elem, - dimension, - extra, - isBorderBox, - styles - ) : - 0; - - // Account for unreliable border-box dimensions by comparing offset* to computed and - // faking a content-box to get border and padding (gh-3699) - if ( isBorderBox && scrollboxSizeBuggy ) { - subtract -= Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - parseFloat( styles[ dimension ] ) - - boxModelAdjustment( elem, dimension, "border", false, styles ) - - 0.5 - ); - } - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ dimension ] = value; - value = jQuery.css( elem, dimension ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( prefix !== "margin" ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && ( - jQuery.cssHooks[ tween.prop ] || - tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = Date.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 15 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY and Edge just mirrors - // the overflowX value there. - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - result.stop.bind( result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = Date.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -function classesToArray( value ) { - if ( Array.isArray( value ) ) { - return value; - } - if ( typeof value === "string" ) { - return value.match( rnothtmlwhite ) || []; - } - return []; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isValidValue = type === "string" || Array.isArray( value ); - - if ( typeof stateVal === "boolean" && isValidValue ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( isValidValue ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = classesToArray( value ); - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, valueIsFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - valueIsFunction = isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( valueIsFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -support.focusin = "onfocusin" in window; - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - stopPropagationCallback = function( e ) { - e.stopPropagation(); - }; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = lastElement = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - lastElement = cur; - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( - dataPriv.get( cur, "events" ) || Object.create( null ) - )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - - if ( event.isPropagationStopped() ) { - lastElement.addEventListener( type, stopPropagationCallback ); - } - - elem[ type ](); - - if ( event.isPropagationStopped() ) { - lastElement.removeEventListener( type, stopPropagationCallback ); - } - - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - - // Handle: regular nodes (via `this.ownerDocument`), window - // (via `this.document`) & document (via `this`). - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = { guid: Date.now() }; - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && toType( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - if ( a == null ) { - return ""; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( _i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() + " " ] = - ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) - .concat( match[ 2 ] ); - } - } - match = responseHeaders[ key.toLowerCase() + " " ]; - } - return match == null ? null : match.join( ", " ); - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 15 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available and should be processed, append data to url - if ( s.data && ( s.processData || typeof s.data === "string" ) ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + - uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Use a noop converter for missing script - if ( !isSuccess && jQuery.inArray( "script", s.dataTypes ) > -1 ) { - s.converters[ "text script" ] = function() {}; - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( _i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - -jQuery.ajaxPrefilter( function( s ) { - var i; - for ( i in s.headers ) { - if ( i.toLowerCase() === "content-type" ) { - s.contentType = s.headers[ i ] || ""; - } - } -} ); - - -jQuery._evalUrl = function( url, options, doc ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - - // Only evaluate the response if it is successful (gh-4126) - // dataFilter is not invoked for failure responses, so using it instead - // of the default converter is kludgy but it works. - converters: { - "text script": function() {} - }, - dataFilter: function( response ) { - jQuery.globalEval( response, options, doc ); - } - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var htmlIsFunction = isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.ontimeout = - xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain or forced-by-attrs requests - if ( s.crossDomain || s.scriptAttrs ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Guide to using Auto-Encoder

-

Inspired by transformers’ adoption of the auto-models, we created an -AutoEncoder class that allows you to easily get the relevant models. Not to be confused with the autoencoder architecture.

-

The relevant models can be found here:

-
from vectorhub import AutoEncoder
-encoder = AutoEncoder('text/bert')
-encoder.encode("Hi...")
-
-
-

To view the list of available models, you can call:

-
import vectorhub as vh
-vh.list_available_auto_models()
-
-
-

When you instantiate the autoencoder, you will need to pip install -the relevant module. The requirements here can be given here.

-

The list of supported models are:

-
['text/albert', 'text/bert', 'text/labse', 'text/use', 'text/use-multi', 'text/use-lite', 'text/legal-bert', 'audio/fairseq', 'audio/speech-embedding', 'audio/trill', 'audio/trill-distilled', 'audio/vggish', 'audio/yamnet', 'audio/wav2vec', 'image/bit', 'image/bit-medium', 'image/inception', 'image/inception-v2', 'image/inception-v3', 'image/inception-resnet', 'image/mobilenet', 'image/mobilenet-v2', 'image/resnet', 'image/resnet-v2', 'text_text/use-multi-qa', 'text_text/use-qa', 'text_text/dpr', 'text_text/lareqa-qa]
-
-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/bi_encoders.text_text.dpr2vec.html b/docs/bi_encoders.text_text.dpr2vec.html deleted file mode 100644 index 6a8e71a5..00000000 --- a/docs/bi_encoders.text_text.dpr2vec.html +++ /dev/null @@ -1,257 +0,0 @@ - - - - - - - - - - DPR2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

DPR2Vec

-
-

Transformers

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/bi_encoders.text_text.lareqa_qa2vec.html b/docs/bi_encoders.text_text.lareqa_qa2vec.html deleted file mode 100644 index fe41344c..00000000 --- a/docs/bi_encoders.text_text.lareqa_qa2vec.html +++ /dev/null @@ -1,257 +0,0 @@ - - - - - - - - - - LAReQA2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

LAReQA2Vec

-
-

TFHub

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/bi_encoders.text_text.use_qa2vec.html b/docs/bi_encoders.text_text.use_qa2vec.html deleted file mode 100644 index 657f122f..00000000 --- a/docs/bi_encoders.text_text.use_qa2vec.html +++ /dev/null @@ -1,257 +0,0 @@ - - - - - - - - - - USEQA2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

USEQA2Vec

-
-

TFHub

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.audio.speech_embedding2vec.html b/docs/encoders.audio.speech_embedding2vec.html deleted file mode 100644 index 71f29697..00000000 --- a/docs/encoders.audio.speech_embedding2vec.html +++ /dev/null @@ -1,404 +0,0 @@ - - - - - - - - - - SpeechEmbedding2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

SpeechEmbedding2Vec

-
-

TFHub

-

Model Name: Speech Embedding

-

Vector Length: 96 (default)

-

Description: -With the rise of low power speech-enabled devices, there is a growing demand to quickly produce models for recognizing arbitrary sets of keywords. As with many machine learning tasks, one of the most challenging parts in the model creation process is obtaining a sufficient amount of training data. In this paper, we explore the effectiveness of synthesized speech data in training small spoken term detection models of around 400k parameters. Instead of training such models directly on the audio or low level features such as MFCCs, we use a pre-trained speech embedding model trained to extract useful features for keyword spotting models. Using this speech embedding, we show that a model which detects 10 keywords when trained on only synthetic speech is equivalent to a model trained on over 500 real examples. We also show that a model without our speech embeddings would need to be trained on over 4000 real examples to reach the same accuracy.

-

Paper: https://arxiv.org/abs/2002.01322

-

Repository: https://tfhub.dev/google/speech_embedding/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-01-31

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-audio-tfhub]

-

Example:

-

-
-
-

` -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import SpeechEmbedding2Vec -model = SpeechEmbedding2Vec() -vector = model.encode('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -`

-
-
-class vectorhub.encoders.audio.tfhub.speech_embedding.SpeechEmbedding2Vec(model_url: str = 'https://tfhub.dev/google/speech_embedding/1', signature: str = 'default')
-

Bases: vectorhub.encoders.audio.base.BaseAudio2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(audios, vector_operation='mean')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(audio, vector_operation='mean')
-

Encode the vector. -Example:

-
>>> from vectorhub.encoders.audio import SpeechEmbedding2Vec
->>> encoder = SpeechEmbedding2Vec()
->>> encoder.encode(...)
-
-
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(audio: str, new_sampling_rate: int = 16000)
-

An method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-urls = {'https://tfhub.dev/google/speech_embedding/1': {'vector_length': 96}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.audio.trill2vec.html b/docs/encoders.audio.trill2vec.html deleted file mode 100644 index 8f5bc39c..00000000 --- a/docs/encoders.audio.trill2vec.html +++ /dev/null @@ -1,404 +0,0 @@ - - - - - - - - - - Trill2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Trill2Vec

-
-

TFHub

-

Model Name: Trill - Triplet Loss Network

-

Vector Length: 512 (default)

-

Description: -The ultimate goal of transfer learning is to reduce labeled data requirements by exploiting a pre-existing embedding model trained for different datasets or tasks. The visual and language communities have established benchmarks to compare embeddings, but the speech community has yet to do so. This paper proposes a benchmark for comparing speech representations on non-semantic tasks, and proposes a representation based on an unsupervised triplet-loss objective. The proposed representation outperforms other representations on the benchmark, and even exceeds state-of-the-art performance on a number of transfer learning tasks. The embedding is trained on a publicly available dataset, and it is tested on a variety of low-resource downstream tasks, including personalization tasks and medical domain. The benchmark, models, and evaluation code are publicly released.

-

Paper: https://arxiv.org/abs/2002.12764

-

Repository: https://tfhub.dev/google/nonsemantic-speech-benchmark/trill/3

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-02-25

-

Limitations: Not stated.

-

Installation: pip install vectorhub['encoders-audio-tfhub']

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import Trill2Vec -model = Trill2Vec() -sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -model.encode(sample) -`

-
-
-class vectorhub.encoders.audio.tfhub.trill.Trill2Vec(model_url: str = 'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill/3', layer: str = 'embedding')
-

Bases: vectorhub.encoders.audio.base.BaseAudio2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(audios, vector_operation='mean')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(audio, vector_operation='mean')
-

Example

-
>>> from encoders.audio.trill import Trill2Vec
->>> encoder = Trill2Vec()
->>> encoder.encode(...)
-
-
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(audio: str, new_sampling_rate: int = 16000)
-

An method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-urls = {'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill/3': {'vector_length': 512}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.audio.vectorai2vec.html b/docs/encoders.audio.vectorai2vec.html deleted file mode 100644 index 7134e669..00000000 --- a/docs/encoders.audio.vectorai2vec.html +++ /dev/null @@ -1,290 +0,0 @@ - - - - - - - - - - ViAudio2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

ViAudio2Vec

-
-

Vector AI

-

For Vector AI users to access to our deployed vector models and evaluate embeddings.

-

Vector AI’s deployed model. The purpose of this model is to -allow developers to easily build encodings and see for themselves -how the embedding works. These models are selected to work out-of-the-box -after testing for their success on our end.

-

To get access to Vector AI, we need to use

-

Example

-
>>> from vectorhub.text.encoder.vectorai import ViText2Vec
->>> model = ViText2Vec(username, api_key)
->>> model.encode("audio_file.wav")
-
-
-
-
-class vectorhub.encoders.audio.vectorai.vi_encoder.ViAudio2Vec(username, api_key, url: str = 'https://api.vctr.ai', collection_name='base')
-

Bases: object

-

Request for a username and API key from gh.vctr.ai -:param Username and api_key: You can request a username and api key from vector AI Github package -:param using request_api_key method.: -:param url: Url for Vector AI website. -:param collection_name: Not necessary for users.

-
-
-encode(audio)
-
- -
-
-property vector_length
-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.audio.vggish2vec.html b/docs/encoders.audio.vggish2vec.html deleted file mode 100644 index 54b91b7d..00000000 --- a/docs/encoders.audio.vggish2vec.html +++ /dev/null @@ -1,404 +0,0 @@ - - - - - - - - - - Vggish2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Vggish2Vec

-
-

TFHub

-

Model Name: VGGish

-

Vector Length: 128 (default)

-

Description: -An audio event embedding model trained on the YouTube-8M dataset. -VGGish should be used: -- as a high-level feature extractor: the 128-D embedding output of VGGish can be used as the input features of another shallow model which can then be trained on a small amount of data for a particular task. This allows quickly creating specialized audio classifiers without requiring a lot of labeled data and without having to train a large model end-to-end. -- as a warm start: the VGGish model parameters can be used to initialize part of a larger model which allows faster fine-tuning and model exploration.

-

Paper:

-

Repository: https://tfhub.dev/google/vggish/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-03-11

-

Limitations: -VGGish has been trained on millions of YouTube videos and although these are very diverse, there can still be a domain -mismatch between the average YouTube video and the audio inputs expected for any given task. You should expect to do some -amount of fine-tuning and calibration to make VGGish usable in any system that you build.

-

Installation: pip install vectorhub[encoders-audio-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import Vggish2Vec -model = Vggish2Vec() -sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -model.encode(sample) -`

-
-
-class vectorhub.encoders.audio.tfhub.vggish.Vggish2Vec(model_url: str = 'https://tfhub.dev/google/vggish/1')
-

Bases: vectorhub.encoders.audio.base.BaseAudio2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(audios, vector_operation='mean')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(audio, vector_operation='mean')
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(audio: str, new_sampling_rate: int = 16000)
-

An method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-urls = {'https://tfhub.dev/google/vggish/1': {'vector_length': 128}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.audio.wav2vec.html b/docs/encoders.audio.wav2vec.html deleted file mode 100644 index 13936b64..00000000 --- a/docs/encoders.audio.wav2vec.html +++ /dev/null @@ -1,257 +0,0 @@ - - - - - - - - - - Wav2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Wav2Vec

-
-

PyTorch

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.audio.yamnet2vec.html b/docs/encoders.audio.yamnet2vec.html deleted file mode 100644 index 9afda86e..00000000 --- a/docs/encoders.audio.yamnet2vec.html +++ /dev/null @@ -1,413 +0,0 @@ - - - - - - - - - - Yamnet2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Yamnet2Vec

-
-

TFHub

-

Model Name: Yamnet

-

Vector Length: 1024 (default)

-

Description: -YAMNet is an audio event classifier that takes audio waveform as input and makes independent predictions for each -of 521 audio events from the AudioSet ontology. The model uses the MobileNet v1 architecture and was trained using -the AudioSet corpus. This model was originally released in the TensorFlow Model Garden, where we have the model -source code, the original model checkpoint, and more detailed documentation. -This model can be used:

-
    -
  • as a stand-alone audio event classifier that provides a reasonable baseline across a wide variety of audio events.

  • -
  • as a high-level feature extractor: the 1024-D embedding output of YAMNet can be used as the input features of another shallow model which can then be trained on a small amount of data for a particular task. This allows quickly creating specialized audio classifiers without requiring a lot of labeled data and without having to train a large model end-to-end.

  • -
  • as a warm start: the YAMNet model parameters can be used to initialize part of a larger model which allows faster fine-tuning and model exploration.

  • -
-

Paper:

-

Repository: https://tfhub.dev/google/yamnet/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-03-11

-

Limitations: -YAMNet’s classifier outputs have not been calibrated across classes, so you cannot directly treat -the outputs as probabilities. For any given task, you will very likely need to perform a calibration with task-specific data -which lets you assign proper per-class score thresholds and scaling. -YAMNet has been trained on millions of YouTube videos and although these are very diverse, there can still be a domain mismatch -between the average YouTube video and the audio inputs expected for any given task. You should expect to do some amount of -fine-tuning and calibration to make YAMNet usable in any system that you build.

-

Installation: pip install vectorhub[encoders-audio-tfhub]

-

Example:

-

-
-
-

` -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import Yamnet2Vec -model = Yamnet2Vec() -sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -model.encode(sample) -`

-
-
-class vectorhub.encoders.audio.tfhub.yamnet.Yamnet2Vec(model_url: str = 'https://tfhub.dev/google/yamnet/1')
-

Bases: vectorhub.encoders.audio.base.BaseAudio2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(audios, vector_operation='mean', layer='embeddings')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(audio, vector_operation='mean', layer='embeddings')
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(audio: str, new_sampling_rate: int = 16000)
-

An method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-urls = {'https://tfhub.dev/google/yamnet/1': {'vector_length': 1024}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.image.bit2vec.html b/docs/encoders.image.bit2vec.html deleted file mode 100644 index 9bcddffc..00000000 --- a/docs/encoders.image.bit2vec.html +++ /dev/null @@ -1,439 +0,0 @@ - - - - - - - - - - Bit2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Bit2Vec

-
-

TFHub

-

Model Name: BiT - Big Transfer, General Visual Representation Learning (Small)

-

Vector Length: 2048 (default)

-

Description: -Transfer of pre-trained representations improves sample efficiency and simplifies hyperparameter tuning when training -deep neural networks for vision. We revisit the paradigm of pre-training on large supervised datasets and fine-tuning the model -on a target task. We scale up pre-training, and propose a simple recipe that we call Big Transfer (BiT). By combining a few carefully -selected components, and transferring using a simple heuristic, we achieve strong performance on over 20 datasets. BiT performs well across -a surprisingly wide range of data regimes – from 1 example per class to 1M total examples. BiT achieves 87.5% top-1 accuracy on ILSVRC-2012, 99.4% on CIFAR-10, and 76.3% on the 19 task Visual Task Adaptation Benchmark (VTAB). On small datasets, BiT attains 76.8% on -ILSVRC-2012 with 10 examples per class, and 97.0% on CIFAR-10 with 10 examples per class. We conduct detailed analysis -of the main components that lead to high transfer performance.

-

Paper: https://arxiv.org/abs/1912.11370

-

Repository: https://github.com/google-research/big_transfer

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2019-12-24

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import BitSmall2Vec -model = BitSmall2Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.bit.BitSmall2Vec(model_url: str = 'https://tfhub.dev/google/bit/s-r50x1/1')
-

Bases: vectorhub.encoders.image.base.BaseImage2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images)
-

Bulk encode. Chunk size should be specified outside of the images.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-urls = {'https://tfhub.dev/google/bit/s-r101x1/1': {'vector_length': 2048}, 'https://tfhub.dev/google/bit/s-r101x3/1': {'vector_length': 6144}, 'https://tfhub.dev/google/bit/s-r152x4/1': {'vector_length': 8192}, 'https://tfhub.dev/google/bit/s-r50x1/1': {'vector_length': 2048}, 'https://tfhub.dev/google/bit/s-r50x3/1': {'vector_length': 6144}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.image.inception2vec.html b/docs/encoders.image.inception2vec.html deleted file mode 100644 index 1faab626..00000000 --- a/docs/encoders.image.inception2vec.html +++ /dev/null @@ -1,257 +0,0 @@ - - - - - - - - - - Inception2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Inception2Vec

-
-

TFHub

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.image.inception_resnet2vec.html b/docs/encoders.image.inception_resnet2vec.html deleted file mode 100644 index b0fb03a2..00000000 --- a/docs/encoders.image.inception_resnet2vec.html +++ /dev/null @@ -1,445 +0,0 @@ - - - - - - - - - - InceptionResnet2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

InceptionResnet2Vec

-
-

TFHub

-

Model Name: Inception Resnet

-

Vector Length: 1536 (default)

-

Description: -Very deep convolutional networks have been central to the largest advances in image recognition performance in -recent years. One example is the Inception architecture that has been shown to achieve very good performance at -relatively low computational cost. Recently, the introduction of residual connections in conjunction with a more traditional -architecture has yielded state-of-the-art performance in the 2015 ILSVRC challenge; its performance was similar to the latest -generation Inception-v3 network. This raises the question of whether there are any benefit in combining the Inception architecture -with residual connections. Here we give clear empirical evidence that training with residual connections accelerates the training -of Inception networks significantly. There is also some evidence of residual Inception networks outperforming similarly expensive -Inception networks without residual connections by a thin margin. We also present several new streamlined architectures for both -residual and non-residual Inception networks. These variations improve the single-frame recognition performance on the ILSVRC 2012 -classification task significantly. We further demonstrate how proper activation scaling stabilizes the training of very wide residual -Inception networks. With an ensemble of three residual and one Inception-v4, we achieve 3.08 percent top-5 error on the test set of the -ImageNet classification (CLS) challenge.

-

Paper: https://arxiv.org/abs/1602.07261

-

Repository: https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2016-02-23

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import InceptionResnet2Vec -model = InceptionResnet2Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.inception_resnet.InceptionResnet2Vec(model_url='https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4')
-

Bases: vectorhub.encoders.image.base.BaseImage2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-

Encode an image using InceptionResnet.

-

Example

-
>>> from vectorhub.image.encoder.tfhub import inception_resnet
->>> model = InceptionResnet2Vec(username, api_key)
->>> model.encode("Hey!")
-
-
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-urls = {'https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4': {'vector_length': 1536}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.image.mobilenet2vec.html b/docs/encoders.image.mobilenet2vec.html deleted file mode 100644 index a56e48b6..00000000 --- a/docs/encoders.image.mobilenet2vec.html +++ /dev/null @@ -1,433 +0,0 @@ - - - - - - - - - - MobileNet2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

MobileNet2Vec

-
-

TFHub

-

Model Name: MobileNet

-

Vector Length: 1024 (default)

-

Description: -We present a class of efficient models called MobileNets for mobile and embedded vision applications. MobileNets are based on a streamlined architecture that uses depth-wise separable convolutions to build light weight deep neural networks. We introduce two simple global hyper-parameters that efficiently trade off between latency and accuracy. These hyper-parameters allow the model builder to choose the right sized model for their application based on the constraints of the problem. We present extensive experiments on resource and accuracy tradeoffs and show strong performance compared to other popular models on ImageNet classification. We then demonstrate the effectiveness of MobileNets across a wide range of applications and use cases including object detection, finegrain classification, face attributes and large scale geo-localization.

-

Paper: https://arxiv.org/abs/1704.04861

-

Repository: https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/4

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2017-04-17

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import MobileNetV12Vec -model = MobileNetV12Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.mobilenet.MobileNetV12Vec(model_url: str = 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/4', resize_mode: str = 'symmetric')
-

Bases: vectorhub.encoders.image.base.BaseImage2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images)
-

Bulk encode. Chunk size should be specified outside of the images.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-urls = {'https://tfhub.dev/google/imagenet/mobilenet_v1_025_128/feature_vector/4': {'image_dimensions': 128, 'vector_length': 256}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_160/feature_vector/4': {'image_dimensions': 160, 'vector_length': 256}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_192/feature_vector/4': {'image_dimensions': 192, 'vector_length': 256}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 256}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_128/feature_vector/4': {'image_dimensions': 128, 'vector_length': 512}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_160/feature_vector/4': {'image_dimensions': 160, 'vector_length': 512}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_192/feature_vector/4': {'image_dimensions': 192, 'vector_length': 512}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 512}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_075_128/feature_vector/4': {'image_dimensions': 128, 'vector_length': 768}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_075_160/feature_vector/4': {'image_dimensions': 160, 'vector_length': 768}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_075_192/feature_vector/4': {'image_dimensions': 192, 'vector_length': 768}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_075_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 768}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_128/feature_vector/4': {'image_dimensions': 128, 'vector_length': 1024}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_160/feature_vector/4': {'image_dimensions': 160, 'vector_length': 1024}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_192/feature_vector/4': {'image_dimensions': 192, 'vector_length': 1024}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 1024}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.image.resnet2vec.html b/docs/encoders.image.resnet2vec.html deleted file mode 100644 index 93b58f4e..00000000 --- a/docs/encoders.image.resnet2vec.html +++ /dev/null @@ -1,433 +0,0 @@ - - - - - - - - - - ResNet2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

ResNet2Vec

-
-

TFHub

-

Model Name: ResNet

-

Vector Length: 2048 (default)

-

Description: -Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers—8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers.

-

Paper: https://arxiv.org/abs/1512.03385

-

Repository:

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2015-12-10

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tfhub]

-

Example:

-

-
-
-

This is an example

-

`python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import ResnetV12Vec -model = ResnetV12Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.resnet.ResnetV12Vec(model_url: str = 'https://tfhub.dev/google/imagenet/resnet_v1_50/feature_vector/4')
-

Bases: vectorhub.encoders.image.base.BaseImage2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images: List[str])
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-urls = {'https://tfhub.dev/google/imagenet/resnet_v1_101/feature_vector/4': {'vector_length': 2048}, 'https://tfhub.dev/google/imagenet/resnet_v1_152/feature_vector/4': {'vector_length': 2048}, 'https://tfhub.dev/google/imagenet/resnet_v1_50/feature_vector/4': {'vector_length': 2048}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.image.vectorai2vec.html b/docs/encoders.image.vectorai2vec.html deleted file mode 100644 index 30b0390f..00000000 --- a/docs/encoders.image.vectorai2vec.html +++ /dev/null @@ -1,279 +0,0 @@ - - - - - - - - - - ViImage2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

ViImage2Vec

-
-

Vector AI

-

For Vector AI users to access to our deployed vector models and evaluate embeddings.

-

Vector AI’s deployed model. The purpose of this model is to allow developers to easily build encodings and see for themselves -how the embedding works. These models are selected to work out-of-the-box after testing for their success on our end.

-

To get access to Vector AI, we need to use

-

Example

-
>>> from vectorhub.text.encoder.vectorai import ViText2Vec
->>> model = ViText2Vec(username, api_key)
->>> model.encode("sample.jpg")
-
-
-
-
-class vectorhub.encoders.image.vectorai.vi_encoder.ViImage2Vec(username, api_key, url=None, collection_name='base')
-

Bases: object

-

Request for a username and API key from gh.vctr.ai

-
-
-encode(image)
-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.text.albert2vec.html b/docs/encoders.text.albert2vec.html deleted file mode 100644 index e6e2f4a7..00000000 --- a/docs/encoders.text.albert2vec.html +++ /dev/null @@ -1,419 +0,0 @@ - - - - - - - - - - AlBert2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

AlBert2Vec

-
-

TFHub

-

Model Name: Albert - A Lite Bert

-

Vector Length: 768 (albert_en_base)

-

Description: -Increasing model size when pretraining natural language representations often results in improved performance on downstream tasks. However, at some point further model increases become harder due to GPU/TPU memory limitations and longer training times. To address these problems, we present two parameter-reduction techniques to lower memory consumption and increase the training speed of BERT. Comprehensive empirical evidence shows that our proposed methods lead to models that scale much better compared to the original BERT. We also use a self-supervised loss that focuses on modeling inter-sentence coherence, and show it consistently helps downstream tasks with multi-sentence inputs. As a result, our best model establishes new state-of-the-art results on the GLUE, RACE, and squad benchmarks while having fewer parameters compared to BERT-large.

-

Paper: https://arxiv.org/abs/1909.11942

-

Repository: https://tfhub.dev/tensorflow/albert_en_base/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2019-09-26

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import Albert2Vec -model = Albert2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.tfhub.albert.Albert2Vec(model_url: str = 'https://tfhub.dev/tensorflow/albert_en_base/2', max_seq_length: int = 228, normalize: bool = True, preprocessor_url: str = 'http://tfhub.dev/tensorflow/albert_en_preprocess/1')
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: list, pooling_strategy='pooled_output')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: str, pooling_strategy='pooled_output')
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-init(model_url)
-
- -
-
-init_tokenizer(preprocessor_url)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/tensorflow/albert_en_base/1': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/albert_en_base/2': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/albert_en_large/1': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/albert_en_large/2': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/albert_en_xlarge/1': {'vector_length': 2048}, 'https://tfhub.dev/tensorflow/albert_en_xlarge/2': {'vector_length': 2048}, 'https://tfhub.dev/tensorflow/albert_en_xxlarge/1': {'vector_length': 4096}, 'https://tfhub.dev/tensorflow/albert_en_xxlarge/2': {'vector_length': 4096}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.text.bert2vec.html b/docs/encoders.text.bert2vec.html deleted file mode 100644 index 4462bc8b..00000000 --- a/docs/encoders.text.bert2vec.html +++ /dev/null @@ -1,426 +0,0 @@ - - - - - - - - - - Bert2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Bert2Vec

-
-

TFHub

-

Model Name: BERT - Bidirectional Encoder Representations

-

Vector Length: 1024 (Bert Large)

-

Description: -We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications.

-

![Bert Image](https://miro.medium.com/max/619/1*iJqlhZz-g6ZQJ53-rE9VvA.png)

-

Paper: https://arxiv.org/abs/1810.04805v2

-

Repository: https://tfhub.dev/google/collections/bert/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2018-10-11

-

Limitations: -* NA

-

Installation: pip install vectorhub[encoders-text-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import Bert2Vec -model = Bert2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.tfhub.bert.Bert2Vec(model_url: str = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/3', max_seq_length: int = 64, normalize: bool = True)
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: list, pooling_strategy='pooled_output')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: str, pooling_strategy='pooled_output')
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-init(model_url: str)
-
- -
-
-init_tokenizer()
-
- -
-
-static is_url_working(url)
-
- -
-
-process(input_strings: str)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/2': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/3': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_en_cased_L-24_H-1024_A-16/2': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_cased_L-24_H-1024_A-16/3': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/2': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/3': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_wwm_cased_L-24_H-1024_A-16/2': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_wwm_cased_L-24_H-1024_A-16/3': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/2': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/3': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/2': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/3': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/2': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/3': {'vector_length': 768}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.text.labse2vec.html b/docs/encoders.text.labse2vec.html deleted file mode 100644 index e6745bbb..00000000 --- a/docs/encoders.text.labse2vec.html +++ /dev/null @@ -1,425 +0,0 @@ - - - - - - - - - - LaBSE2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

LaBSE2Vec

-
-

TFHub

-

Model Name: LaBSE - Language-agnostic BERT Sentence Embedding

-

Vector Length: 768 (default)

-

Description: -The language-agnostic BERT sentence embedding encodes text into high dimensional vectors. The model is trained and optimized to produce similar representations exclusively for bilingual sentence pairs that are translations of each other. So it can be used for mining for translations of a sentence in a larger corpus. -In “Language-agnostic BERT Sentence Embedding”, we present a multilingual BERT embedding model, called LaBSE, that produces language-agnostic cross-lingual sentence embeddings for 109 languages. The model is trained on 17 billion monolingual sentences and 6 billion bilingual sentence pairs using MLM and TLM pre-training, resulting in a model that is effective even on low-resource languages for which there is no data available during training. Further, the model establishes a new state of the art on multiple parallel text (a.k.a. bitext) retrieval tasks. We have released the pre-trained model to the community through tfhub, which includes modules that can be used as-is or can be fine-tuned using domain-specific data.

-

Paper: https://arxiv.org/pdf/2007.01852v1.pdf

-

Repository: https://tfhub.dev/google/LaBSE/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-07-03

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import LaBSE2Vec -model = LaBSE2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.tfhub.labse.LaBSE2Vec(model_url: str = 'https://tfhub.dev/google/LaBSE/1', max_seq_length: int = 128, normalize: bool = True)
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: list)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: str)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-init(model_url: str)
-
- -
-
-init_tokenizer()
-
- -
-
-static is_url_working(url)
-
- -
-
-process(input_strings: str)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/google/LaBSE/1': {'vector_length': 768}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.text.legalbert2vec.html b/docs/encoders.text.legalbert2vec.html deleted file mode 100644 index 6e82e98b..00000000 --- a/docs/encoders.text.legalbert2vec.html +++ /dev/null @@ -1,412 +0,0 @@ - - - - - - - - - - LegalBert2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

LegalBert2Vec

-
-

Transformers

-

Model Name: Legal Bert

-

Vector Length: 768 (default)

-

Description: -BERT has achieved impressive performance in several NLP tasks. However, there has been limited investigation on its adaptation guidelines in specialised domains. Here we focus on the legal domain, where we explore several approaches for applying BERT models to downstream legal tasks, evaluating on multiple datasets. Our findings indicate that the previous guidelines for pre-training and fine-tuning, often blindly followed, do not always generalize well in the legal domain. Thus we propose a systematic investigation of the available strategies when applying BERT in specialised domains. These are: (a) use the original BERT out of the box, (b) adapt BERT by additional pre-training on domain-specific corpora, and (c) pre-train BERT from scratch on domain-specific corpora. We also propose a broader hyper-parameter search space when fine-tuning for downstream tasks and we release LEGAL-BERT, a family of BERT models intended to assist legal NLP research, computational law, and legal technology applications.

-

Paper: https://arxiv.org/abs/2010.02559

-

Repository: https://huggingface.co/nlpaueb/legal-bert-base-uncased

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-10-06

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-torch-transformers]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-torch-transformers] -from vectorhub.encoders.text.torch_transformers import LegalBert2Vec -model = LegalBert2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.torch_transformers.legal_bert.LegalBert2Vec(model_name: str = 'nlpaueb/legal-bert-base-uncased')
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: List[str])List[List[float]]
-

Encode multiple sentences using transformers. -:param texts: List[str]

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: Union[str, List[str]])List[float]
-

Encode words using transformers. -:param text: str

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'nlpaueb/bert-base-uncased-contracts': {'data': 'Trained on US contracts'}, 'nlpaueb/bert-base-uncased-echr\t': {'data': 'Trained on ECHR cases'}, 'nlpaueb/bert-base-uncased-eurlex': {'data': 'Trained on EU legislation'}, 'nlpaueb/legal-bert-base-uncased': {'data': 'Trained on all the above'}, 'nlpaueb/legal-bert-small-uncased': {'data': 'Trained on all the above'}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.text.sentencetransformer2vec.html b/docs/encoders.text.sentencetransformer2vec.html deleted file mode 100644 index d47d5ca9..00000000 --- a/docs/encoders.text.sentencetransformer2vec.html +++ /dev/null @@ -1,419 +0,0 @@ - - - - - - - - - - SentenceTransformer2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

SentenceTransformer2Vec

-
-

Sentence-Transformers

-

Model Name: Sentence Transformer Models

-

Vector Length: Depends on model.

-

Description: -These are Sentence Transformer models from sbert.net by UKPLab.

-

Paper: https://arxiv.org/abs/1908.10084

-

Repository: https://github.com/UKPLab/sentence-transformers

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2019-08-27

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-sentence-transformers]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-sentence-transformers] -from vectorhub.encoders.text.sentence_transformers import SentenceTransformer2Vec -model = SentenceTransformer2Vec('distilroberta-base-paraphrase-v1') -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.sentence_transformers.sentence_auto_transformers.SentenceTransformer2Vec(model_name: str)
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: List[str])List[List[float]]
-

Bulk encode words from transformers.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: str)List[float]
-

Encode word from transformers. -This takes the beginning set of tokens and turns them into vectors -and returns mean pooling of the tokens. -:param word: string

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_list_of_urls()
-

Return list of URLS.

-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'LaBSE': {'vector_length': 768}, 'average_word_embeddings_glove.6B.300d': {'vector_length': 300}, 'average_word_embeddings_glove.840B.300d': {'vector_length': 300}, 'average_word_embeddings_komninos': {'vector_length': 300}, 'average_word_embeddings_levy_dependency': {'vector_length': 768}, 'bert-base-wikipedia-sections-mean-tokens': {'vector_length': 768}, 'bert-large-nli-stsb-mean-tokens': {'vector_length': 1024}, 'distilbert-base-nli-stsb-mean-tokens': {'vector_length': 768}, 'distilbert-base-nli-stsb-quora-ranking': {'vector_length': 768}, 'distilbert-multilingual-nli-stsb-quora-ranking': {'vector_length': 768}, 'distilroberta-base-msmarco-v1': {'vector_length': 768}, 'distilroberta-base-paraphrase-v1': {'vector_length': 768}, 'distiluse-base-multilingual-cased-v2': {'vector_length': 512}, 'roberta-base-nli-stsb-mean-tokens': {'vector_length': 768}, 'roberta-large-nli-stsb-mean-tokens': {'vector_length': 1024}, 'xlm-r-bert-base-nli-stsb-mean-tokens': {'vector_length': 768}, 'xlm-r-distilroberta-base-paraphrase-v1': {'vector_length': 768}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.text.transformer2vec.html b/docs/encoders.text.transformer2vec.html deleted file mode 100644 index ff43e0d6..00000000 --- a/docs/encoders.text.transformer2vec.html +++ /dev/null @@ -1,573 +0,0 @@ - - - - - - - - - - Transformer2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Transformer2Vec

-
-

PyTorch Transformers

-

Model Name: Transformer Models

-

Vector Length: Depends on model.

-

Description: -These are Tensorflow Automodels from HuggingFace.

-

Paper: https://arxiv.org/abs/1910.03771

-

Repository: https://huggingface.co/transformers/pretrained_models.html

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: None

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-torch-transformers-auto]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tf-transformers] -from vectorhub.encoders.text.tf_transformers import TFTransformer2Vec -model = TFTransformer2Vec('bert-base-uncased') -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.torch_transformers.torch_auto_transformers.Transformer2Vec(model_name: str)
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: List[str])List[List[float]]
-

Encode multiple sentences using transformers. -:param Sentences: List[str]

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: Union[str, List[str]])List[float]
-

Encode words using transformers. -:param word: str

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'bert-base-uncased': {'vector_length': 768}, 'distilbert-base-uncased': {'vector_length': 768}, 'facebook/bart-base': {'vector_length': 768}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-vectorhub.encoders.text.torch_transformers.torch_auto_transformers.list_tested_transformer_models()
-

List the transformed models.

-
- -
-
-

Tensorflow Transformers

-

Model Name: Transformer Models

-

Vector Length: Depends on model.

-

Description: -These are Tensorflow Automodels from HuggingFace.

-

Paper: https://arxiv.org/abs/1910.03771

-

Repository: https://huggingface.co/transformers/pretrained_models.html

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: None

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-tf-transformers]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tf-transformers] -from vectorhub.encoders.text.tf_transformers import TFTransformer2Vec -model = TFTransformer2Vec('bert-base-uncased') -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.tf_transformers.tf_auto_transformers.TFTransformer2Vec(model_name: str, config=None)
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: List[str])List[List[float]]
-

Bulk encode words from transformers.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: str)List[float]
-

Encode word from transformers. -This takes the beginning set of tokens and turns them into vectors -and returns mean pooling of the tokens. -:param word: string

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.text.use2vec.html b/docs/encoders.text.use2vec.html deleted file mode 100644 index 773882a2..00000000 --- a/docs/encoders.text.use2vec.html +++ /dev/null @@ -1,416 +0,0 @@ - - - - - - - - - - USE2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

USE2Vec

-
-

TFHub

-

Model Name: USE - Universal Sentence Encoder

-

Vector Length: 512 (Base model)

-

Description: -We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.”,

-

![USE Image](https://www.gstatic.com/aihub/tfhub/universal-sentence-encoder/example-similarity.png)

-

Image from [Google](https://tfhub.dev/google/universal-sentence-encoder/1).

-

Paper: https://arxiv.org/abs/1803.11175

-

Repository: https://tfhub.dev/google/collections/universal-sentence-encoder/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2018-03-29

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import USE2Vec -model = USE2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.tfhub.use.USE2Vec(model_url: str = 'https://tfhub.dev/google/universal-sentence-encoder/4')
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts, threads=10, chunks=100)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/google/universal-sentence-encoder-large/5': {'vector_length': 512}, 'https://tfhub.dev/google/universal-sentence-encoder/4': {'vector_length': 512}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.text.use_multi2vec.html b/docs/encoders.text.use_multi2vec.html deleted file mode 100644 index 3ff559a7..00000000 --- a/docs/encoders.text.use_multi2vec.html +++ /dev/null @@ -1,413 +0,0 @@ - - - - - - - - - - USEMulti2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

USEMulti2Vec

-
-

TFHub

-

Model Name: USE Multi - Universal Sentence Encoder Multilingual

-

Vector Length: 512 (Base model)

-

Description: -The Universal Sentence Encoder Multilingual module is an extension of the Universal Sentence Encoder Large that includes training on multiple tasks across languages. Supports 16 languages (Arabic, Chinese-simplified, Chinese-traditional, English, French, German, Italian, Japanese, Korean, Dutch, Polish, Portuguese, Spanish, Thai, Turkish, Russian) text encoder.

-

Paper: https://arxiv.org/abs/1803.11175

-

Repository: https://tfhub.dev/google/collections/universal-sentence-encoder/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2018-03-29

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tfhub] -from vectorhub.encoders.text.tfhub import USEMulti2Vec -model = USEMulti2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.tfhub.use_multi.USEMulti2Vec(model_url: str = 'https://tfhub.dev/google/universal-sentence-encoder-multilingual/3')
-

Bases: vectorhub.encoders.text.tfhub.use.USE2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts, threads=10, chunks=100)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3': {'vector_length': 512}, 'https://tfhub.dev/google/universal-sentence-encoder-multilingual/3': {'vector_length': 512}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/encoders.text.vectorai2vec.html b/docs/encoders.text.vectorai2vec.html deleted file mode 100644 index 1b2d2a47..00000000 --- a/docs/encoders.text.vectorai2vec.html +++ /dev/null @@ -1,391 +0,0 @@ - - - - - - - - - - ViText2Vec — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

ViText2Vec

-
-

Vector AI

-

For Vector AI users to access to our deployed vector models and evaluate embeddings.

-

Vector AI’s deployed model. The purpose of this model is to allow developers to easily build encodings and see for themselves -how the embedding works. These models are selected to work out-of-the-box after testing for their success on our end.

-

To get access to Vector AI, we need to use

-

Example

-
>>> from vectorhub.text.encoder.vectorai import ViText2Vec
->>> model = ViText2Vec(username, api_key)
->>> model.encode("Hey!")
->>> model.bulk_encode(["hey", "stranger"])
-
-
-
-
-class vectorhub.encoders.text.vectorai.vi_encoder.ViText2Vec(username, api_key, url=None, collection_name='base')
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-

Request for a username and API key from gh.vctr.ai!

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: List[str])
-

Bulk convert text to vectors

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: Union[str, List[str]])
-

Convert text to vectors.

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/genindex.html b/docs/genindex.html deleted file mode 100644 index e88d9879..00000000 --- a/docs/genindex.html +++ /dev/null @@ -1,3022 +0,0 @@ - - - - - - - - - - Index — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • Index
  • - - -
  • - - - -
  • - -
- - -
-
-
-
- - -

Index

- -
- A - | B - | C - | D - | E - | F - | G - | I - | L - | M - | P - | Q - | R - | S - | T - | U - | V - | W - | Y - -
-

A

- - - -
- -

B

- - - -
- -

C

- - - -
- -

D

- - - -
- -

E

- - - -
- -

F

- - - -
- -

G

- - -
- -

I

- - - -
- -

L

- - - -
- -

M

- - -
- -

P

- - - -
- -

Q

- - - -
- -

R

- - - -
- -

S

- - - -
- -

T

- - - -
- -

U

- - - -
- -

V

- - - -
    -
  • - vectorhub.encoders.audio.tfhub.trill_distilled - -
  • -
  • - vectorhub.encoders.audio.tfhub.vggish - -
  • -
  • - vectorhub.encoders.audio.tfhub.yamnet - -
  • -
  • - vectorhub.encoders.audio.vectorai - -
  • -
  • - vectorhub.encoders.audio.vectorai.vi_encoder - -
  • -
  • - vectorhub.encoders.code - -
  • -
  • - vectorhub.encoders.code.transformers - -
  • -
  • - vectorhub.encoders.code.transformers.codebert - -
  • -
  • - vectorhub.encoders.face - -
  • -
  • - vectorhub.encoders.face.tf - -
  • -
  • - vectorhub.encoders.face.tf.face2vec - -
  • -
  • - vectorhub.encoders.image - -
  • -
  • - vectorhub.encoders.image.base - -
  • -
  • - vectorhub.encoders.image.fastai - -
  • -
  • - vectorhub.encoders.image.fastai.base - -
  • -
  • - vectorhub.encoders.image.fastai.resnet - -
  • -
  • - vectorhub.encoders.image.tfhub - -
  • -
  • - vectorhub.encoders.image.tfhub.bit - -
  • -
  • - vectorhub.encoders.image.tfhub.bit_medium - -
  • -
  • - vectorhub.encoders.image.tfhub.inception_resnet - -
  • -
  • - vectorhub.encoders.image.tfhub.inceptionv1 - -
  • -
  • - vectorhub.encoders.image.tfhub.inceptionv2 - -
  • -
  • - vectorhub.encoders.image.tfhub.inceptionv3 - -
  • -
  • - vectorhub.encoders.image.tfhub.mobilenet - -
  • -
  • - vectorhub.encoders.image.tfhub.mobilenetv2 - -
  • -
  • - vectorhub.encoders.image.tfhub.resnet - -
  • -
  • - vectorhub.encoders.image.tfhub.resnetv2 - -
  • -
  • - vectorhub.encoders.image.vectorai - -
  • -
  • - vectorhub.encoders.image.vectorai.vi_encoder - -
  • -
  • - vectorhub.encoders.text - -
  • -
  • - vectorhub.encoders.text.base - -
  • -
  • - vectorhub.encoders.text.sentence_transformers - -
  • -
  • - vectorhub.encoders.text.sentence_transformers.sentence_auto_transformers - -
  • -
  • - vectorhub.encoders.text.tf_transformers - -
  • -
  • - vectorhub.encoders.text.tf_transformers.tf_auto_transformers - -
  • -
  • - vectorhub.encoders.text.tfhub - -
  • -
  • - vectorhub.encoders.text.tfhub.albert - -
  • -
  • - vectorhub.encoders.text.tfhub.bert - -
  • -
  • - vectorhub.encoders.text.tfhub.elmo - -
  • -
  • - vectorhub.encoders.text.tfhub.labse - -
  • -
  • - vectorhub.encoders.text.tfhub.use - -
  • -
  • - vectorhub.encoders.text.tfhub.use_lite - -
  • -
  • - vectorhub.encoders.text.tfhub.use_multi - -
  • -
  • - vectorhub.encoders.text.torch_transformers - -
  • -
  • - vectorhub.encoders.text.torch_transformers.legal_bert - -
  • -
  • - vectorhub.encoders.text.torch_transformers.torch_auto_transformers - -
  • -
  • - vectorhub.encoders.text.torch_transformers.torch_longformers - -
  • -
  • - vectorhub.encoders.text.vectorai - -
  • -
  • - vectorhub.encoders.text.vectorai.vi_encoder - -
  • -
  • - vectorhub.encoders.video - -
  • -
  • - vectorhub.encoders.video.sampler - -
  • -
  • - vectorhub.errors - -
  • -
  • - vectorhub.import_utils - -
  • -
  • - vectorhub.models_dict - -
  • -
  • - vectorhub.utils - -
  • -
  • Vggish2Vec (class in vectorhub.encoders.audio.tfhub.vggish), [1] -
  • -
  • ViAudio2Vec (class in vectorhub.encoders.audio.vectorai.vi_encoder), [1] -
  • -
  • ViImage2Vec (class in vectorhub.encoders.image.vectorai.vi_encoder), [1] -
  • -
  • ViText2Vec (class in vectorhub.encoders.text.vectorai.vi_encoder), [1] -
  • -
- -

W

- - - -
- -

Y

- - -
- - - -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/how_to_add_a_model.html b/docs/how_to_add_a_model.html deleted file mode 100644 index f221ab3f..00000000 --- a/docs/how_to_add_a_model.html +++ /dev/null @@ -1,324 +0,0 @@ - - - - - - - - - - How To Add Your Model To Vector Hub — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • How To Add Your Model To Vector Hub
  • - - -
  • - - - View page source - - -
  • - -
- - -
-
-
-
- -
-

How To Add Your Model To Vector Hub

-

We have written a simple 7-step guide to help you add your models here if you have trained them! -This should take approximately 30 minutes - 1 hour. Let us know at dev@vctr.ai if you need any help.

-
    -
    1. -
    2. Fork the project.

    3. -
    -
  • -
    1. -
    2. Identify the minimum requirements for your model, identify the associated module and then add them to the MODEL_REQUIREMENTS in vectorhub/model_dict.

    3. -
    -
  • -
    1. -
    2. Write a brief description about what your model involves.

    3. -
    -
  • -
    1. -
    2. Create a new branch called new_model/____2vec, replace ___ with the model/domain etc.

    3. -
    -
  • -
    1. -
    2. Identify which directory your model should fall under. Here is a basic directory outline.

    3. -
    -
  • -
-
|____ encoders
-|________ audio
-|________ image
-|________ text
-|____ bi_encoders
-|________ text_text
-
-
-

If you believe your model falls under a new category than we recommend making a new directory!

-
    -
    1. -
    2. Once you identify the requirements, find the associated module or create a new one if required.

    3. -
    -
  • -
-

Use the following code as a base for any new models and add an -encode and bulk_encode method. Both should return lists.

-
from ....import_utils import *
-# Import dictionary for model requirements
-from ....models_dict import MODEL_REQUIREMENTS
-# Add dependencies in if-statement to avoid import breaks in the library
-if is_all_dependency_installed(MODEL_REQUIREMENTS['text-bi-encoder-tfhub-use-qa']):
-    # add imports here
-    import bert
-    import numpy as np
-    import tensorflow as tf
-    import tensorflow_hub as hub
-    import tensorflow_text
-
-from typing import List
-# This decorator returns a default vector in case of an error
-from ....base import catch_vector_errors
-# Base class that provides basic utilities
-from ..base import BaseTextText2Vec
-
-class USEMultiQA2Vec(BaseTextText2Vec):
-    ...
-    # Add decorator in case encoding errors and we need a dummy vector.
-    @catch_vector_errors
-    def encode(self, text):
-        pass
-
-
-
    -
    1. -
    2. Submit a PR!

    3. -
    -
  • -
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/index.html b/docs/index.html deleted file mode 100644 index 416c7834..00000000 --- a/docs/index.html +++ /dev/null @@ -1,422 +0,0 @@ - - - - - - - - - - Welcome to VectorHub’s documentation! — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • Welcome to VectorHub’s documentation!
  • - - -
  • - - - View page source - - -
  • - -
- - -
-
-
-
- -
-

Welcome to VectorHub’s documentation!

-

Vector Hub is your home for ___2Vec models!

-

The rise of deep learning and encoding has meant that there are now explosion of -open-source and proprietary models and techniques that have allowed for distributed -representation of entities. This means the rise of new ____2Vec models that are:

-
    -
  1. Model-specific - New architecture is introduced.

  2. -
  3. Domain-specific - Architecture is trained on new domain.

  4. -
  5. Language-specific - Architecture is trained in new language.

  6. -
  7. Task-specific - Architecture is trained on new task.

  8. -
-

In order to allow people to understand what these models do and mean, we aim to provide -a hub for these __2vec models.

-

Our vision to build a hub that allows people to store these ____2Vec models and provide explanations -for how to best use these encodings while building a flexible framework that allows these -different models to be used easily.

- - - -
-

Audio Encoders

- -
-
-

Text Bi-Encoders

- -
- -
-
-

Indices and tables

- -
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/intro.html b/docs/intro.html deleted file mode 100644 index 28f3708e..00000000 --- a/docs/intro.html +++ /dev/null @@ -1,266 +0,0 @@ - - - - - - - - - - What is Vector Hub? — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

What is Vector Hub?

-

The home of ___2Vec Models.

-

The rise of deep learning and encoding has meant that there are now explosion of -open-source and proprietary models and techniques that have allowed for distributed -representation of entities. This means the rise of new ____2Vec models that are:

-
    -
  1. Model-specific - New architecture is introduced.

  2. -
  3. Domain-specific - Architecture is trained on new domain.

  4. -
  5. Language-specific - Architecture is trained in new language.

  6. -
  7. Task-specific - Architecture is trained on new task.

  8. -
-

In order to allow people to understand what these models do and mean, we aim to provide -a hub for these __2vec models.

-

Our vision to build a hub that allows people to store these ____2Vec models and provide explanations -for how to best use these encodings while building a flexible framework that allows these -different models to be used easily.

-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/modules.html b/docs/modules.html deleted file mode 100644 index f3ca10ca..00000000 --- a/docs/modules.html +++ /dev/null @@ -1,283 +0,0 @@ - - - - - - - - - - vectorhub — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
- - -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/objects.inv b/docs/objects.inv deleted file mode 100644 index d734e335..00000000 Binary files a/docs/objects.inv and /dev/null differ diff --git a/docs/py-modindex.html b/docs/py-modindex.html deleted file mode 100644 index 3a767764..00000000 --- a/docs/py-modindex.html +++ /dev/null @@ -1,654 +0,0 @@ - - - - - - - - - - Python Module Index — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • Python Module Index
  • - - -
  • - -
  • - -
- - -
-
-
-
- - -

Python Module Index

- -
- v -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 
- v
- vectorhub -
    - vectorhub.auto_encoder -
    - vectorhub.base -
    - vectorhub.bi_encoders -
    - vectorhub.bi_encoders.qa -
    - vectorhub.bi_encoders.qa.base -
    - vectorhub.bi_encoders.qa.sentence_transformers -
    - vectorhub.bi_encoders.qa.sentence_transformers.distilroberta_qa -
    - vectorhub.bi_encoders.qa.tfhub -
    - vectorhub.bi_encoders.qa.tfhub.lareqa_qa -
    - vectorhub.bi_encoders.qa.tfhub.use_multi_qa -
    - vectorhub.bi_encoders.qa.tfhub.use_qa -
    - vectorhub.bi_encoders.qa.torch_transformers -
    - vectorhub.bi_encoders.qa.torch_transformers.dpr -
    - vectorhub.bi_encoders.text_image -
    - vectorhub.bi_encoders.text_image.torch -
    - vectorhub.doc_utils -
    - vectorhub.encoders -
    - vectorhub.encoders.audio -
    - vectorhub.encoders.audio.base -
    - vectorhub.encoders.audio.pytorch -
    - vectorhub.encoders.audio.pytorch.wav2vec -
    - vectorhub.encoders.audio.tfhub -
    - vectorhub.encoders.audio.tfhub.speech_embedding -
    - vectorhub.encoders.audio.tfhub.trill -
    - vectorhub.encoders.audio.tfhub.trill_distilled -
    - vectorhub.encoders.audio.tfhub.vggish -
    - vectorhub.encoders.audio.tfhub.yamnet -
    - vectorhub.encoders.audio.vectorai -
    - vectorhub.encoders.audio.vectorai.vi_encoder -
    - vectorhub.encoders.code -
    - vectorhub.encoders.code.transformers -
    - vectorhub.encoders.code.transformers.codebert -
    - vectorhub.encoders.face -
    - vectorhub.encoders.face.tf -
    - vectorhub.encoders.face.tf.face2vec -
    - vectorhub.encoders.image -
    - vectorhub.encoders.image.base -
    - vectorhub.encoders.image.fastai -
    - vectorhub.encoders.image.fastai.base -
    - vectorhub.encoders.image.fastai.resnet -
    - vectorhub.encoders.image.tfhub -
    - vectorhub.encoders.image.tfhub.bit -
    - vectorhub.encoders.image.tfhub.bit_medium -
    - vectorhub.encoders.image.tfhub.inception_resnet -
    - vectorhub.encoders.image.tfhub.inceptionv1 -
    - vectorhub.encoders.image.tfhub.inceptionv2 -
    - vectorhub.encoders.image.tfhub.inceptionv3 -
    - vectorhub.encoders.image.tfhub.mobilenet -
    - vectorhub.encoders.image.tfhub.mobilenetv2 -
    - vectorhub.encoders.image.tfhub.resnet -
    - vectorhub.encoders.image.tfhub.resnetv2 -
    - vectorhub.encoders.image.vectorai -
    - vectorhub.encoders.image.vectorai.vi_encoder -
    - vectorhub.encoders.text -
    - vectorhub.encoders.text.base -
    - vectorhub.encoders.text.sentence_transformers -
    - vectorhub.encoders.text.sentence_transformers.sentence_auto_transformers -
    - vectorhub.encoders.text.tf_transformers -
    - vectorhub.encoders.text.tf_transformers.tf_auto_transformers -
    - vectorhub.encoders.text.tfhub -
    - vectorhub.encoders.text.tfhub.albert -
    - vectorhub.encoders.text.tfhub.bert -
    - vectorhub.encoders.text.tfhub.elmo -
    - vectorhub.encoders.text.tfhub.labse -
    - vectorhub.encoders.text.tfhub.use -
    - vectorhub.encoders.text.tfhub.use_lite -
    - vectorhub.encoders.text.tfhub.use_multi -
    - vectorhub.encoders.text.torch_transformers -
    - vectorhub.encoders.text.torch_transformers.legal_bert -
    - vectorhub.encoders.text.torch_transformers.torch_auto_transformers -
    - vectorhub.encoders.text.torch_transformers.torch_longformers -
    - vectorhub.encoders.text.vectorai -
    - vectorhub.encoders.text.vectorai.vi_encoder -
    - vectorhub.encoders.video -
    - vectorhub.encoders.video.sampler -
    - vectorhub.errors -
    - vectorhub.import_utils -
    - vectorhub.models_dict -
    - vectorhub.utils -
- - -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/search.html b/docs/search.html deleted file mode 100644 index c3325020..00000000 --- a/docs/search.html +++ /dev/null @@ -1,259 +0,0 @@ - - - - - - - - - - Search — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • Search
  • - - -
  • - -
  • - -
- - -
-
-
-
- - - - -
- -
- -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/searchindex.js b/docs/searchindex.js deleted file mode 100644 index c6d6b290..00000000 --- a/docs/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["auto_encoder","bi_encoders.text_text.dpr2vec","bi_encoders.text_text.lareqa_qa2vec","bi_encoders.text_text.use_qa2vec","encoders.audio.speech_embedding2vec","encoders.audio.trill2vec","encoders.audio.vectorai2vec","encoders.audio.vggish2vec","encoders.audio.wav2vec","encoders.audio.yamnet2vec","encoders.image.bit2vec","encoders.image.inception2vec","encoders.image.inception_resnet2vec","encoders.image.mobilenet2vec","encoders.image.resnet2vec","encoders.image.vectorai2vec","encoders.text.albert2vec","encoders.text.bert2vec","encoders.text.labse2vec","encoders.text.legalbert2vec","encoders.text.sentencetransformer2vec","encoders.text.transformer2vec","encoders.text.use2vec","encoders.text.use_multi2vec","encoders.text.vectorai2vec","how_to_add_a_model","index","intro","modules","vectorhub","vectorhub.bi_encoders","vectorhub.bi_encoders.qa","vectorhub.bi_encoders.qa.sentence_transformers","vectorhub.bi_encoders.qa.tfhub","vectorhub.bi_encoders.qa.torch_transformers","vectorhub.bi_encoders.text_image","vectorhub.bi_encoders.text_image.torch","vectorhub.bi_encoders.text_text","vectorhub.bi_encoders.text_text.sentence_transformers","vectorhub.bi_encoders.text_text.tfhub","vectorhub.bi_encoders.text_text.torch_transformers","vectorhub.encoders","vectorhub.encoders.audio","vectorhub.encoders.audio.pytorch","vectorhub.encoders.audio.tfhub","vectorhub.encoders.audio.vectorai","vectorhub.encoders.code","vectorhub.encoders.code.transformers","vectorhub.encoders.face","vectorhub.encoders.face.tf","vectorhub.encoders.image","vectorhub.encoders.image.fastai","vectorhub.encoders.image.tensorflow","vectorhub.encoders.image.tfhub","vectorhub.encoders.image.vectorai","vectorhub.encoders.text","vectorhub.encoders.text.sentence_transformers","vectorhub.encoders.text.tf_transformers","vectorhub.encoders.text.tfhub","vectorhub.encoders.text.torch_transformers","vectorhub.encoders.text.vectorai","vectorhub.encoders.video"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":3,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":2,sphinx:56},filenames:["auto_encoder.rst","bi_encoders.text_text.dpr2vec.rst","bi_encoders.text_text.lareqa_qa2vec.rst","bi_encoders.text_text.use_qa2vec.rst","encoders.audio.speech_embedding2vec.rst","encoders.audio.trill2vec.rst","encoders.audio.vectorai2vec.rst","encoders.audio.vggish2vec.rst","encoders.audio.wav2vec.rst","encoders.audio.yamnet2vec.rst","encoders.image.bit2vec.rst","encoders.image.inception2vec.rst","encoders.image.inception_resnet2vec.rst","encoders.image.mobilenet2vec.rst","encoders.image.resnet2vec.rst","encoders.image.vectorai2vec.rst","encoders.text.albert2vec.rst","encoders.text.bert2vec.rst","encoders.text.labse2vec.rst","encoders.text.legalbert2vec.rst","encoders.text.sentencetransformer2vec.rst","encoders.text.transformer2vec.rst","encoders.text.use2vec.rst","encoders.text.use_multi2vec.rst","encoders.text.vectorai2vec.rst","how_to_add_a_model.rst","index.rst","intro.rst","modules.rst","vectorhub.rst","vectorhub.bi_encoders.rst","vectorhub.bi_encoders.qa.rst","vectorhub.bi_encoders.qa.sentence_transformers.rst","vectorhub.bi_encoders.qa.tfhub.rst","vectorhub.bi_encoders.qa.torch_transformers.rst","vectorhub.bi_encoders.text_image.rst","vectorhub.bi_encoders.text_image.torch.rst","vectorhub.bi_encoders.text_text.rst","vectorhub.bi_encoders.text_text.sentence_transformers.rst","vectorhub.bi_encoders.text_text.tfhub.rst","vectorhub.bi_encoders.text_text.torch_transformers.rst","vectorhub.encoders.rst","vectorhub.encoders.audio.rst","vectorhub.encoders.audio.pytorch.rst","vectorhub.encoders.audio.tfhub.rst","vectorhub.encoders.audio.vectorai.rst","vectorhub.encoders.code.rst","vectorhub.encoders.code.transformers.rst","vectorhub.encoders.face.rst","vectorhub.encoders.face.tf.rst","vectorhub.encoders.image.rst","vectorhub.encoders.image.fastai.rst","vectorhub.encoders.image.tensorflow.rst","vectorhub.encoders.image.tfhub.rst","vectorhub.encoders.image.vectorai.rst","vectorhub.encoders.text.rst","vectorhub.encoders.text.sentence_transformers.rst","vectorhub.encoders.text.tf_transformers.rst","vectorhub.encoders.text.tfhub.rst","vectorhub.encoders.text.torch_transformers.rst","vectorhub.encoders.text.vectorai.rst","vectorhub.encoders.video.rst"],objects:{"":{vectorhub:[29,0,0,"-"]},"vectorhub.auto_encoder":{AutoBiEncoder:[29,1,1,""],AutoEncoder:[29,1,1,""],get_model_definitions:[29,3,1,""],list_all_auto_models:[29,3,1,""]},"vectorhub.auto_encoder.AutoBiEncoder":{from_model:[29,2,1,""]},"vectorhub.auto_encoder.AutoEncoder":{from_model:[29,2,1,""]},"vectorhub.base":{Base2Vec:[29,1,1,""],catch_vector_errors:[29,3,1,""]},"vectorhub.base.Base2Vec":{add_documents:[29,2,1,""],chunk:[29,2,1,""],delete_collection:[29,2,1,""],encoder_type:[29,2,1,""],get_vector_field_name:[29,2,1,""],is_url_working:[29,2,1,""],request_api_key:[29,2,1,""],retrieve_all_documents:[29,2,1,""],retrieve_documents:[29,2,1,""],search:[29,2,1,""],validate_model_url:[29,2,1,""]},"vectorhub.bi_encoders":{qa:[31,0,0,"-"],text_image:[35,0,0,"-"]},"vectorhub.bi_encoders.qa":{base:[31,0,0,"-"],sentence_transformers:[32,0,0,"-"],tfhub:[33,0,0,"-"],torch_transformers:[34,0,0,"-"]},"vectorhub.bi_encoders.qa.base":{BaseQA2Vec:[31,1,1,""]},"vectorhub.bi_encoders.qa.base.BaseQA2Vec":{add_documents:[31,2,1,""],chunk:[31,2,1,""],delete_collection:[31,2,1,""],encode:[31,2,1,""],encode_answer:[31,2,1,""],encode_question:[31,2,1,""],encoder_type:[31,2,1,""],get_vector_field_name:[31,2,1,""],is_url_working:[31,2,1,""],read:[31,2,1,""],request_api_key:[31,2,1,""],retrieve_all_documents:[31,2,1,""],retrieve_documents:[31,2,1,""],search:[31,2,1,""],test_word:[31,2,1,""],validate_model_url:[31,2,1,""],vector_length:[31,2,1,""]},"vectorhub.bi_encoders.qa.sentence_transformers":{distilroberta_qa:[32,0,0,"-"]},"vectorhub.bi_encoders.qa.sentence_transformers.distilroberta_qa":{DistilRobertaQA2Vec:[32,1,1,""]},"vectorhub.bi_encoders.qa.sentence_transformers.distilroberta_qa.DistilRobertaQA2Vec":{add_documents:[32,2,1,""],bulk_encode:[32,2,1,""],bulk_encode_answers:[32,2,1,""],bulk_encode_question:[32,2,1,""],chunk:[32,2,1,""],definition:[32,4,1,""],delete_collection:[32,2,1,""],encode:[32,2,1,""],encode_answer:[32,2,1,""],encode_question:[32,2,1,""],encoder_type:[32,2,1,""],get_vector_field_name:[32,2,1,""],is_url_working:[32,2,1,""],read:[32,2,1,""],request_api_key:[32,2,1,""],retrieve_all_documents:[32,2,1,""],retrieve_documents:[32,2,1,""],search:[32,2,1,""],test_word:[32,2,1,""],urls:[32,4,1,""],validate_model_url:[32,2,1,""],vector_length:[32,2,1,""]},"vectorhub.bi_encoders.qa.tfhub":{lareqa_qa:[33,0,0,"-"],use_multi_qa:[33,0,0,"-"],use_qa:[33,0,0,"-"]},"vectorhub.bi_encoders.qa.tfhub.lareqa_qa":{LAReQA2Vec:[33,1,1,""]},"vectorhub.bi_encoders.qa.tfhub.lareqa_qa.LAReQA2Vec":{add_documents:[33,2,1,""],bulk_encode:[33,2,1,""],bulk_encode_answers:[33,2,1,""],bulk_encode_question:[33,2,1,""],chunk:[33,2,1,""],definition:[33,4,1,""],delete_collection:[33,2,1,""],encode:[33,2,1,""],encode_answer:[33,2,1,""],encode_question:[33,2,1,""],encoder_type:[33,2,1,""],get_vector_field_name:[33,2,1,""],is_url_working:[33,2,1,""],read:[33,2,1,""],request_api_key:[33,2,1,""],retrieve_all_documents:[33,2,1,""],retrieve_documents:[33,2,1,""],search:[33,2,1,""],test_word:[33,2,1,""],urls:[33,4,1,""],validate_model_url:[33,2,1,""],vector_length:[33,2,1,""]},"vectorhub.bi_encoders.qa.tfhub.use_multi_qa":{USEMultiQA2Vec:[33,1,1,""]},"vectorhub.bi_encoders.qa.tfhub.use_multi_qa.USEMultiQA2Vec":{add_documents:[33,2,1,""],bulk_encode:[33,2,1,""],bulk_encode_answers:[33,2,1,""],bulk_encode_questions:[33,2,1,""],chunk:[33,2,1,""],definition:[33,4,1,""],delete_collection:[33,2,1,""],encode:[33,2,1,""],encode_answer:[33,2,1,""],encode_question:[33,2,1,""],encoder_type:[33,2,1,""],get_vector_field_name:[33,2,1,""],is_url_working:[33,2,1,""],read:[33,2,1,""],request_api_key:[33,2,1,""],retrieve_all_documents:[33,2,1,""],retrieve_documents:[33,2,1,""],search:[33,2,1,""],test_word:[33,2,1,""],urls:[33,4,1,""],validate_model_url:[33,2,1,""],vector_length:[33,2,1,""]},"vectorhub.bi_encoders.qa.tfhub.use_qa":{USEQA2Vec:[33,1,1,""]},"vectorhub.bi_encoders.qa.tfhub.use_qa.USEQA2Vec":{add_documents:[33,2,1,""],bulk_encode:[33,2,1,""],bulk_encode_answers:[33,2,1,""],bulk_encode_questions:[33,2,1,""],chunk:[33,2,1,""],definition:[33,4,1,""],delete_collection:[33,2,1,""],encode:[33,2,1,""],encode_answer:[33,2,1,""],encode_question:[33,2,1,""],encoder_type:[33,2,1,""],get_vector_field_name:[33,2,1,""],is_url_working:[33,2,1,""],read:[33,2,1,""],request_api_key:[33,2,1,""],retrieve_all_documents:[33,2,1,""],retrieve_documents:[33,2,1,""],search:[33,2,1,""],test_word:[33,2,1,""],urls:[33,4,1,""],validate_model_url:[33,2,1,""],vector_length:[33,2,1,""]},"vectorhub.bi_encoders.qa.torch_transformers":{dpr:[34,0,0,"-"]},"vectorhub.bi_encoders.qa.torch_transformers.dpr":{DPR2Vec:[34,1,1,""]},"vectorhub.bi_encoders.qa.torch_transformers.dpr.DPR2Vec":{add_documents:[34,2,1,""],bulk_encode:[34,2,1,""],bulk_encode_answers:[34,2,1,""],bulk_encode_questions:[34,2,1,""],chunk:[34,2,1,""],definition:[34,4,1,""],delete_collection:[34,2,1,""],encode:[34,2,1,""],encode_answer:[34,2,1,""],encode_question:[34,2,1,""],encoder_type:[34,2,1,""],get_vector_field_name:[34,2,1,""],is_url_working:[34,2,1,""],read:[34,2,1,""],request_api_key:[34,2,1,""],retrieve_all_documents:[34,2,1,""],retrieve_documents:[34,2,1,""],search:[34,2,1,""],test_word:[34,2,1,""],validate_model_url:[34,2,1,""],vector_length:[34,2,1,""]},"vectorhub.bi_encoders.text_image":{torch:[36,0,0,"-"]},"vectorhub.doc_utils":{ModelDefinition:[29,1,1,""]},"vectorhub.doc_utils.ModelDefinition":{DATA_TYPE_TO_EXAMPLE:[29,2,1,""],audio_items_examples:[29,2,1,""],audio_metadata_examples:[29,2,1,""],audio_search_example:[29,2,1,""],create_docs:[29,2,1,""],data_type:[29,2,1,""],from_markdown:[29,2,1,""],image_items_examples:[29,2,1,""],image_metadata_examples:[29,2,1,""],image_search_example:[29,2,1,""],item_examples:[29,2,1,""],metadata_examples:[29,2,1,""],qa_items_examples:[29,2,1,""],qa_metadata_examples:[29,2,1,""],qa_search_example:[29,2,1,""],search_example:[29,2,1,""],text_image_search_example:[29,2,1,""],text_items_examples:[29,2,1,""],text_metadata_examples:[29,2,1,""],text_search_example:[29,2,1,""],to_dict:[29,2,1,""],vectorai_integration:[29,2,1,""]},"vectorhub.encoders":{audio:[42,0,0,"-"],code:[46,0,0,"-"],face:[48,0,0,"-"],image:[50,0,0,"-"],text:[55,0,0,"-"],video:[61,0,0,"-"]},"vectorhub.encoders.audio":{base:[42,0,0,"-"],pytorch:[43,0,0,"-"],tfhub:[44,0,0,"-"],vectorai:[45,0,0,"-"]},"vectorhub.encoders.audio.base":{BaseAudio2Vec:[42,1,1,""]},"vectorhub.encoders.audio.base.BaseAudio2Vec":{add_documents:[42,2,1,""],bulk_encode:[42,2,1,""],chunk:[42,2,1,""],delete_collection:[42,2,1,""],encoder_type:[42,2,1,""],get_vector_field_name:[42,2,1,""],is_url_working:[42,2,1,""],read:[42,2,1,""],request_api_key:[42,2,1,""],retrieve_all_documents:[42,2,1,""],retrieve_documents:[42,2,1,""],search:[42,2,1,""],validate_model_url:[42,2,1,""]},"vectorhub.encoders.audio.pytorch":{wav2vec:[43,0,0,"-"]},"vectorhub.encoders.audio.pytorch.wav2vec":{Wav2Vec:[43,1,1,""]},"vectorhub.encoders.audio.pytorch.wav2vec.Wav2Vec":{add_documents:[43,2,1,""],bulk_encode:[43,2,1,""],chunk:[43,2,1,""],definition:[43,4,1,""],delete_collection:[43,2,1,""],encode:[43,2,1,""],encoder_type:[43,2,1,""],get_vector_field_name:[43,2,1,""],init:[43,2,1,""],is_url_working:[43,2,1,""],read:[43,2,1,""],request_api_key:[43,2,1,""],retrieve_all_documents:[43,2,1,""],retrieve_documents:[43,2,1,""],search:[43,2,1,""],urls:[43,4,1,""],validate_model_url:[43,2,1,""]},"vectorhub.encoders.audio.tfhub":{speech_embedding:[44,0,0,"-"],trill:[44,0,0,"-"],trill_distilled:[44,0,0,"-"],vggish:[44,0,0,"-"],yamnet:[44,0,0,"-"]},"vectorhub.encoders.audio.tfhub.speech_embedding":{SpeechEmbedding2Vec:[44,1,1,""]},"vectorhub.encoders.audio.tfhub.speech_embedding.SpeechEmbedding2Vec":{add_documents:[44,2,1,""],bulk_encode:[44,2,1,""],chunk:[44,2,1,""],definition:[44,4,1,""],delete_collection:[44,2,1,""],encode:[44,2,1,""],encoder_type:[44,2,1,""],get_vector_field_name:[44,2,1,""],is_url_working:[44,2,1,""],read:[44,2,1,""],request_api_key:[44,2,1,""],retrieve_all_documents:[44,2,1,""],retrieve_documents:[44,2,1,""],search:[44,2,1,""],urls:[44,4,1,""],validate_model_url:[44,2,1,""]},"vectorhub.encoders.audio.tfhub.trill":{Trill2Vec:[44,1,1,""]},"vectorhub.encoders.audio.tfhub.trill.Trill2Vec":{add_documents:[44,2,1,""],bulk_encode:[44,2,1,""],chunk:[44,2,1,""],definition:[44,4,1,""],delete_collection:[44,2,1,""],encode:[44,2,1,""],encoder_type:[44,2,1,""],get_vector_field_name:[44,2,1,""],is_url_working:[44,2,1,""],read:[44,2,1,""],request_api_key:[44,2,1,""],retrieve_all_documents:[44,2,1,""],retrieve_documents:[44,2,1,""],search:[44,2,1,""],urls:[44,4,1,""],validate_model_url:[44,2,1,""]},"vectorhub.encoders.audio.tfhub.trill_distilled":{TrillDistilled2Vec:[44,1,1,""]},"vectorhub.encoders.audio.tfhub.trill_distilled.TrillDistilled2Vec":{add_documents:[44,2,1,""],bulk_encode:[44,2,1,""],chunk:[44,2,1,""],definition:[44,4,1,""],delete_collection:[44,2,1,""],encode:[44,2,1,""],encoder_type:[44,2,1,""],get_vector_field_name:[44,2,1,""],is_url_working:[44,2,1,""],read:[44,2,1,""],request_api_key:[44,2,1,""],retrieve_all_documents:[44,2,1,""],retrieve_documents:[44,2,1,""],search:[44,2,1,""],urls:[44,4,1,""],validate_model_url:[44,2,1,""]},"vectorhub.encoders.audio.tfhub.vggish":{Vggish2Vec:[44,1,1,""]},"vectorhub.encoders.audio.tfhub.vggish.Vggish2Vec":{add_documents:[44,2,1,""],bulk_encode:[44,2,1,""],chunk:[44,2,1,""],definition:[44,4,1,""],delete_collection:[44,2,1,""],encode:[44,2,1,""],encoder_type:[44,2,1,""],get_vector_field_name:[44,2,1,""],is_url_working:[44,2,1,""],read:[44,2,1,""],request_api_key:[44,2,1,""],retrieve_all_documents:[44,2,1,""],retrieve_documents:[44,2,1,""],search:[44,2,1,""],urls:[44,4,1,""],validate_model_url:[44,2,1,""]},"vectorhub.encoders.audio.tfhub.yamnet":{Yamnet2Vec:[44,1,1,""]},"vectorhub.encoders.audio.tfhub.yamnet.Yamnet2Vec":{add_documents:[44,2,1,""],bulk_encode:[44,2,1,""],chunk:[44,2,1,""],definition:[44,4,1,""],delete_collection:[44,2,1,""],encode:[44,2,1,""],encoder_type:[44,2,1,""],get_vector_field_name:[44,2,1,""],is_url_working:[44,2,1,""],read:[44,2,1,""],request_api_key:[44,2,1,""],retrieve_all_documents:[44,2,1,""],retrieve_documents:[44,2,1,""],search:[44,2,1,""],urls:[44,4,1,""],validate_model_url:[44,2,1,""]},"vectorhub.encoders.audio.vectorai":{vi_encoder:[45,0,0,"-"]},"vectorhub.encoders.audio.vectorai.vi_encoder":{ViAudio2Vec:[45,1,1,""]},"vectorhub.encoders.audio.vectorai.vi_encoder.ViAudio2Vec":{encode:[45,2,1,""],vector_length:[45,2,1,""]},"vectorhub.encoders.code":{transformers:[47,0,0,"-"]},"vectorhub.encoders.code.transformers":{codebert:[47,0,0,"-"]},"vectorhub.encoders.code.transformers.codebert":{Code2Vec:[47,1,1,""]},"vectorhub.encoders.code.transformers.codebert.Code2Vec":{add_documents:[47,2,1,""],bulk_encode:[47,2,1,""],chunk:[47,2,1,""],definition:[47,4,1,""],delete_collection:[47,2,1,""],encode:[47,2,1,""],encoder_type:[47,2,1,""],get_vector_field_name:[47,2,1,""],is_url_working:[47,2,1,""],read:[47,2,1,""],request_api_key:[47,2,1,""],retrieve_all_documents:[47,2,1,""],retrieve_documents:[47,2,1,""],search:[47,2,1,""],test_word:[47,2,1,""],urls:[47,4,1,""],validate_model_url:[47,2,1,""],vector_length:[47,2,1,""]},"vectorhub.encoders.face":{tf:[49,0,0,"-"]},"vectorhub.encoders.face.tf":{face2vec:[49,0,0,"-"]},"vectorhub.encoders.face.tf.face2vec":{Face2Vec:[49,1,1,""]},"vectorhub.encoders.face.tf.face2vec.Face2Vec":{add_documents:[49,2,1,""],bulk_encode:[49,2,1,""],cache_dir:[49,2,1,""],chunk:[49,2,1,""],definition:[49,4,1,""],delete_collection:[49,2,1,""],encode:[49,2,1,""],encoder_type:[49,2,1,""],extract_face:[49,2,1,""],get_vector_field_name:[49,2,1,""],image_resize:[49,2,1,""],is_url_working:[49,2,1,""],model_path:[49,2,1,""],read:[49,2,1,""],request_api_key:[49,2,1,""],retrieve_all_documents:[49,2,1,""],retrieve_documents:[49,2,1,""],rgb_weights:[49,2,1,""],search:[49,2,1,""],show_face_landmarks:[49,2,1,""],show_image:[49,2,1,""],standardise_image:[49,2,1,""],to_grayscale:[49,2,1,""],urls:[49,2,1,""],validate_model_url:[49,2,1,""]},"vectorhub.encoders.image":{base:[50,0,0,"-"],fastai:[51,0,0,"-"],tfhub:[53,0,0,"-"],vectorai:[54,0,0,"-"]},"vectorhub.encoders.image.base":{BaseImage2Vec:[50,1,1,""]},"vectorhub.encoders.image.base.BaseImage2Vec":{add_documents:[50,2,1,""],chunk:[50,2,1,""],delete_collection:[50,2,1,""],encoder_type:[50,2,1,""],get_vector_field_name:[50,2,1,""],image_resize:[50,2,1,""],is_url_working:[50,2,1,""],read:[50,2,1,""],request_api_key:[50,2,1,""],retrieve_all_documents:[50,2,1,""],retrieve_documents:[50,2,1,""],rgb_weights:[50,2,1,""],search:[50,2,1,""],show_image:[50,2,1,""],to_grayscale:[50,2,1,""],validate_model_url:[50,2,1,""]},"vectorhub.encoders.image.fastai":{base:[51,0,0,"-"],resnet:[51,0,0,"-"]},"vectorhub.encoders.image.fastai.base":{FastAIBase:[51,1,1,""]},"vectorhub.encoders.image.fastai.base.FastAIBase":{add_documents:[51,2,1,""],bulk_encode:[51,2,1,""],chunk:[51,2,1,""],delete_collection:[51,2,1,""],encode:[51,2,1,""],encoder_type:[51,2,1,""],extraction_layer:[51,2,1,""],get_vector_field_name:[51,2,1,""],image_resize:[51,2,1,""],is_url_working:[51,2,1,""],read:[51,2,1,""],request_api_key:[51,2,1,""],retrieve_all_documents:[51,2,1,""],retrieve_documents:[51,2,1,""],rgb_weights:[51,2,1,""],search:[51,2,1,""],show_image:[51,2,1,""],to_grayscale:[51,2,1,""],validate_model_url:[51,2,1,""]},"vectorhub.encoders.image.fastai.resnet":{FastAIResnet2Vec:[51,1,1,""]},"vectorhub.encoders.image.fastai.resnet.FastAIResnet2Vec":{add_documents:[51,2,1,""],architecture_mappings:[51,2,1,""],bulk_encode:[51,2,1,""],chunk:[51,2,1,""],definition:[51,4,1,""],delete_collection:[51,2,1,""],encode:[51,2,1,""],encoder_type:[51,2,1,""],extraction_layer:[51,2,1,""],get_vector_field_name:[51,2,1,""],image_resize:[51,2,1,""],is_url_working:[51,2,1,""],possible_architectures:[51,2,1,""],read:[51,2,1,""],request_api_key:[51,2,1,""],retrieve_all_documents:[51,2,1,""],retrieve_documents:[51,2,1,""],rgb_weights:[51,2,1,""],search:[51,2,1,""],show_image:[51,2,1,""],to_grayscale:[51,2,1,""],validate_model_url:[51,2,1,""]},"vectorhub.encoders.image.tfhub":{bit:[53,0,0,"-"],bit_medium:[53,0,0,"-"],inception_resnet:[53,0,0,"-"],inceptionv1:[53,0,0,"-"],inceptionv2:[53,0,0,"-"],inceptionv3:[53,0,0,"-"],mobilenet:[53,0,0,"-"],mobilenetv2:[53,0,0,"-"],resnet:[53,0,0,"-"],resnetv2:[53,0,0,"-"]},"vectorhub.encoders.image.tfhub.bit":{BitSmall2Vec:[53,1,1,""]},"vectorhub.encoders.image.tfhub.bit.BitSmall2Vec":{add_documents:[53,2,1,""],bulk_encode:[53,2,1,""],chunk:[53,2,1,""],definition:[53,4,1,""],delete_collection:[53,2,1,""],encode:[53,2,1,""],encoder_type:[53,2,1,""],get_vector_field_name:[53,2,1,""],image_resize:[53,2,1,""],init:[53,2,1,""],is_url_working:[53,2,1,""],read:[53,2,1,""],request_api_key:[53,2,1,""],retrieve_all_documents:[53,2,1,""],retrieve_documents:[53,2,1,""],rgb_weights:[53,2,1,""],search:[53,2,1,""],show_image:[53,2,1,""],to_grayscale:[53,2,1,""],urls:[53,4,1,""],validate_model_url:[53,2,1,""]},"vectorhub.encoders.image.tfhub.bit_medium":{BitMedium2Vec:[53,1,1,""]},"vectorhub.encoders.image.tfhub.bit_medium.BitMedium2Vec":{add_documents:[53,2,1,""],bulk_encode:[53,2,1,""],chunk:[53,2,1,""],definition:[53,4,1,""],delete_collection:[53,2,1,""],encode:[53,2,1,""],encoder_type:[53,2,1,""],get_vector_field_name:[53,2,1,""],image_resize:[53,2,1,""],init:[53,2,1,""],is_url_working:[53,2,1,""],read:[53,2,1,""],request_api_key:[53,2,1,""],retrieve_all_documents:[53,2,1,""],retrieve_documents:[53,2,1,""],rgb_weights:[53,2,1,""],search:[53,2,1,""],show_image:[53,2,1,""],to_grayscale:[53,2,1,""],urls:[53,4,1,""],validate_model_url:[53,2,1,""]},"vectorhub.encoders.image.tfhub.inception_resnet":{InceptionResnet2Vec:[53,1,1,""]},"vectorhub.encoders.image.tfhub.inception_resnet.InceptionResnet2Vec":{add_documents:[53,2,1,""],bulk_encode:[53,2,1,""],chunk:[53,2,1,""],definition:[53,4,1,""],delete_collection:[53,2,1,""],encode:[53,2,1,""],encoder_type:[53,2,1,""],get_vector_field_name:[53,2,1,""],image_resize:[53,2,1,""],is_url_working:[53,2,1,""],read:[53,2,1,""],request_api_key:[53,2,1,""],retrieve_all_documents:[53,2,1,""],retrieve_documents:[53,2,1,""],rgb_weights:[53,2,1,""],search:[53,2,1,""],show_image:[53,2,1,""],to_grayscale:[53,2,1,""],urls:[53,4,1,""],validate_model_url:[53,2,1,""]},"vectorhub.encoders.image.tfhub.inceptionv1":{InceptionV12Vec:[53,1,1,""]},"vectorhub.encoders.image.tfhub.inceptionv1.InceptionV12Vec":{add_documents:[53,2,1,""],bulk_encode:[53,2,1,""],chunk:[53,2,1,""],definition:[53,4,1,""],delete_collection:[53,2,1,""],encode:[53,2,1,""],encoder_type:[53,2,1,""],get_vector_field_name:[53,2,1,""],image_resize:[53,2,1,""],init:[53,2,1,""],is_url_working:[53,2,1,""],read:[53,2,1,""],request_api_key:[53,2,1,""],retrieve_all_documents:[53,2,1,""],retrieve_documents:[53,2,1,""],rgb_weights:[53,2,1,""],search:[53,2,1,""],show_image:[53,2,1,""],to_grayscale:[53,2,1,""],urls:[53,4,1,""],validate_model_url:[53,2,1,""]},"vectorhub.encoders.image.tfhub.inceptionv2":{InceptionV22Vec:[53,1,1,""]},"vectorhub.encoders.image.tfhub.inceptionv2.InceptionV22Vec":{add_documents:[53,2,1,""],bulk_encode:[53,2,1,""],chunk:[53,2,1,""],definition:[53,4,1,""],delete_collection:[53,2,1,""],encode:[53,2,1,""],encoder_type:[53,2,1,""],get_vector_field_name:[53,2,1,""],image_resize:[53,2,1,""],init:[53,2,1,""],is_url_working:[53,2,1,""],read:[53,2,1,""],request_api_key:[53,2,1,""],retrieve_all_documents:[53,2,1,""],retrieve_documents:[53,2,1,""],rgb_weights:[53,2,1,""],search:[53,2,1,""],show_image:[53,2,1,""],to_grayscale:[53,2,1,""],urls:[53,4,1,""],validate_model_url:[53,2,1,""]},"vectorhub.encoders.image.tfhub.inceptionv3":{InceptionV32Vec:[53,1,1,""]},"vectorhub.encoders.image.tfhub.inceptionv3.InceptionV32Vec":{add_documents:[53,2,1,""],bulk_encode:[53,2,1,""],chunk:[53,2,1,""],definition:[53,4,1,""],delete_collection:[53,2,1,""],encode:[53,2,1,""],encoder_type:[53,2,1,""],get_vector_field_name:[53,2,1,""],image_resize:[53,2,1,""],init:[53,2,1,""],is_url_working:[53,2,1,""],read:[53,2,1,""],request_api_key:[53,2,1,""],retrieve_all_documents:[53,2,1,""],retrieve_documents:[53,2,1,""],rgb_weights:[53,2,1,""],search:[53,2,1,""],show_image:[53,2,1,""],to_grayscale:[53,2,1,""],urls:[53,4,1,""],validate_model_url:[53,2,1,""]},"vectorhub.encoders.image.tfhub.mobilenet":{MobileNetV12Vec:[53,1,1,""]},"vectorhub.encoders.image.tfhub.mobilenet.MobileNetV12Vec":{add_documents:[53,2,1,""],bulk_encode:[53,2,1,""],chunk:[53,2,1,""],definition:[53,4,1,""],delete_collection:[53,2,1,""],encode:[53,2,1,""],encoder_type:[53,2,1,""],get_vector_field_name:[53,2,1,""],image_resize:[53,2,1,""],init:[53,2,1,""],is_url_working:[53,2,1,""],read:[53,2,1,""],request_api_key:[53,2,1,""],retrieve_all_documents:[53,2,1,""],retrieve_documents:[53,2,1,""],rgb_weights:[53,2,1,""],search:[53,2,1,""],show_image:[53,2,1,""],to_grayscale:[53,2,1,""],urls:[53,4,1,""],validate_model_url:[53,2,1,""]},"vectorhub.encoders.image.tfhub.mobilenetv2":{MobileNetV22Vec:[53,1,1,""]},"vectorhub.encoders.image.tfhub.mobilenetv2.MobileNetV22Vec":{add_documents:[53,2,1,""],bulk_encode:[53,2,1,""],chunk:[53,2,1,""],definition:[53,4,1,""],delete_collection:[53,2,1,""],encode:[53,2,1,""],encoder_type:[53,2,1,""],get_vector_field_name:[53,2,1,""],image_resize:[53,2,1,""],init:[53,2,1,""],is_url_working:[53,2,1,""],read:[53,2,1,""],request_api_key:[53,2,1,""],retrieve_all_documents:[53,2,1,""],retrieve_documents:[53,2,1,""],rgb_weights:[53,2,1,""],search:[53,2,1,""],show_image:[53,2,1,""],to_grayscale:[53,2,1,""],urls:[53,4,1,""],validate_model_url:[53,2,1,""]},"vectorhub.encoders.image.tfhub.resnet":{ResnetV12Vec:[53,1,1,""]},"vectorhub.encoders.image.tfhub.resnet.ResnetV12Vec":{add_documents:[53,2,1,""],bulk_encode:[53,2,1,""],chunk:[53,2,1,""],definition:[53,4,1,""],delete_collection:[53,2,1,""],encode:[53,2,1,""],encoder_type:[53,2,1,""],get_vector_field_name:[53,2,1,""],image_resize:[53,2,1,""],init:[53,2,1,""],is_url_working:[53,2,1,""],read:[53,2,1,""],request_api_key:[53,2,1,""],retrieve_all_documents:[53,2,1,""],retrieve_documents:[53,2,1,""],rgb_weights:[53,2,1,""],search:[53,2,1,""],show_image:[53,2,1,""],to_grayscale:[53,2,1,""],urls:[53,4,1,""],validate_model_url:[53,2,1,""]},"vectorhub.encoders.image.tfhub.resnetv2":{ResnetV22Vec:[53,1,1,""]},"vectorhub.encoders.image.tfhub.resnetv2.ResnetV22Vec":{add_documents:[53,2,1,""],bulk_encode:[53,2,1,""],chunk:[53,2,1,""],definition:[53,4,1,""],delete_collection:[53,2,1,""],encode:[53,2,1,""],encoder_type:[53,2,1,""],get_vector_field_name:[53,2,1,""],image_resize:[53,2,1,""],init:[53,2,1,""],is_url_working:[53,2,1,""],read:[53,2,1,""],request_api_key:[53,2,1,""],retrieve_all_documents:[53,2,1,""],retrieve_documents:[53,2,1,""],rgb_weights:[53,2,1,""],search:[53,2,1,""],show_image:[53,2,1,""],to_grayscale:[53,2,1,""],urls:[53,2,1,""],validate_model_url:[53,2,1,""]},"vectorhub.encoders.image.vectorai":{vi_encoder:[54,0,0,"-"]},"vectorhub.encoders.image.vectorai.vi_encoder":{ViImage2Vec:[54,1,1,""]},"vectorhub.encoders.image.vectorai.vi_encoder.ViImage2Vec":{encode:[54,2,1,""]},"vectorhub.encoders.text":{base:[55,0,0,"-"],sentence_transformers:[56,0,0,"-"],tf_transformers:[57,0,0,"-"],tfhub:[58,0,0,"-"],torch_transformers:[59,0,0,"-"],vectorai:[60,0,0,"-"]},"vectorhub.encoders.text.base":{BaseText2Vec:[55,1,1,""]},"vectorhub.encoders.text.base.BaseText2Vec":{add_documents:[55,2,1,""],chunk:[55,2,1,""],delete_collection:[55,2,1,""],encode:[55,2,1,""],encoder_type:[55,2,1,""],get_vector_field_name:[55,2,1,""],is_url_working:[55,2,1,""],read:[55,2,1,""],request_api_key:[55,2,1,""],retrieve_all_documents:[55,2,1,""],retrieve_documents:[55,2,1,""],search:[55,2,1,""],test_word:[55,2,1,""],validate_model_url:[55,2,1,""],vector_length:[55,2,1,""]},"vectorhub.encoders.text.sentence_transformers":{sentence_auto_transformers:[56,0,0,"-"]},"vectorhub.encoders.text.sentence_transformers.sentence_auto_transformers":{SentenceTransformer2Vec:[56,1,1,""]},"vectorhub.encoders.text.sentence_transformers.sentence_auto_transformers.SentenceTransformer2Vec":{add_documents:[56,2,1,""],bulk_encode:[56,2,1,""],chunk:[56,2,1,""],definition:[56,4,1,""],delete_collection:[56,2,1,""],encode:[56,2,1,""],encoder_type:[56,2,1,""],get_list_of_urls:[56,2,1,""],get_vector_field_name:[56,2,1,""],is_url_working:[56,2,1,""],read:[56,2,1,""],request_api_key:[56,2,1,""],retrieve_all_documents:[56,2,1,""],retrieve_documents:[56,2,1,""],search:[56,2,1,""],test_word:[56,2,1,""],urls:[56,4,1,""],validate_model_url:[56,2,1,""],vector_length:[56,2,1,""]},"vectorhub.encoders.text.tf_transformers":{tf_auto_transformers:[57,0,0,"-"]},"vectorhub.encoders.text.tf_transformers.tf_auto_transformers":{TFTransformer2Vec:[57,1,1,""]},"vectorhub.encoders.text.tf_transformers.tf_auto_transformers.TFTransformer2Vec":{add_documents:[57,2,1,""],bulk_encode:[57,2,1,""],chunk:[57,2,1,""],definition:[57,4,1,""],delete_collection:[57,2,1,""],encode:[57,2,1,""],encoder_type:[57,2,1,""],get_vector_field_name:[57,2,1,""],is_url_working:[57,2,1,""],read:[57,2,1,""],request_api_key:[57,2,1,""],retrieve_all_documents:[57,2,1,""],retrieve_documents:[57,2,1,""],search:[57,2,1,""],test_word:[57,2,1,""],validate_model_url:[57,2,1,""],vector_length:[57,2,1,""]},"vectorhub.encoders.text.tfhub":{albert:[58,0,0,"-"],bert:[58,0,0,"-"],elmo:[58,0,0,"-"],labse:[58,0,0,"-"],use:[58,0,0,"-"],use_lite:[58,0,0,"-"],use_multi:[58,0,0,"-"]},"vectorhub.encoders.text.tfhub.albert":{Albert2Vec:[58,1,1,""]},"vectorhub.encoders.text.tfhub.albert.Albert2Vec":{add_documents:[58,2,1,""],bulk_encode:[58,2,1,""],chunk:[58,2,1,""],definition:[58,4,1,""],delete_collection:[58,2,1,""],encode:[58,2,1,""],encoder_type:[58,2,1,""],get_vector_field_name:[58,2,1,""],init:[58,2,1,""],init_tokenizer:[58,2,1,""],is_url_working:[58,2,1,""],read:[58,2,1,""],request_api_key:[58,2,1,""],retrieve_all_documents:[58,2,1,""],retrieve_documents:[58,2,1,""],search:[58,2,1,""],test_word:[58,2,1,""],urls:[58,4,1,""],validate_model_url:[58,2,1,""],vector_length:[58,2,1,""]},"vectorhub.encoders.text.tfhub.bert":{Bert2Vec:[58,1,1,""]},"vectorhub.encoders.text.tfhub.bert.Bert2Vec":{add_documents:[58,2,1,""],bulk_encode:[58,2,1,""],chunk:[58,2,1,""],definition:[58,4,1,""],delete_collection:[58,2,1,""],encode:[58,2,1,""],encoder_type:[58,2,1,""],get_vector_field_name:[58,2,1,""],init:[58,2,1,""],init_tokenizer:[58,2,1,""],is_url_working:[58,2,1,""],process:[58,2,1,""],read:[58,2,1,""],request_api_key:[58,2,1,""],retrieve_all_documents:[58,2,1,""],retrieve_documents:[58,2,1,""],search:[58,2,1,""],test_word:[58,2,1,""],urls:[58,4,1,""],validate_model_url:[58,2,1,""],vector_length:[58,2,1,""]},"vectorhub.encoders.text.tfhub.elmo":{Elmo2Vec:[58,1,1,""]},"vectorhub.encoders.text.tfhub.elmo.Elmo2Vec":{add_documents:[58,2,1,""],bulk_encode:[58,2,1,""],chunk:[58,2,1,""],definition:[58,4,1,""],delete_collection:[58,2,1,""],encode:[58,2,1,""],encoder_type:[58,2,1,""],get_vector_field_name:[58,2,1,""],is_url_working:[58,2,1,""],read:[58,2,1,""],request_api_key:[58,2,1,""],retrieve_all_documents:[58,2,1,""],retrieve_documents:[58,2,1,""],search:[58,2,1,""],test_word:[58,2,1,""],urls:[58,4,1,""],validate_model_url:[58,2,1,""],vector_length:[58,2,1,""]},"vectorhub.encoders.text.tfhub.labse":{LaBSE2Vec:[58,1,1,""]},"vectorhub.encoders.text.tfhub.labse.LaBSE2Vec":{add_documents:[58,2,1,""],bulk_encode:[58,2,1,""],chunk:[58,2,1,""],definition:[58,4,1,""],delete_collection:[58,2,1,""],encode:[58,2,1,""],encoder_type:[58,2,1,""],get_vector_field_name:[58,2,1,""],init:[58,2,1,""],init_tokenizer:[58,2,1,""],is_url_working:[58,2,1,""],process:[58,2,1,""],read:[58,2,1,""],request_api_key:[58,2,1,""],retrieve_all_documents:[58,2,1,""],retrieve_documents:[58,2,1,""],search:[58,2,1,""],test_word:[58,2,1,""],urls:[58,4,1,""],validate_model_url:[58,2,1,""],vector_length:[58,2,1,""]},"vectorhub.encoders.text.tfhub.use":{USE2Vec:[58,1,1,""]},"vectorhub.encoders.text.tfhub.use.USE2Vec":{add_documents:[58,2,1,""],bulk_encode:[58,2,1,""],chunk:[58,2,1,""],definition:[58,4,1,""],delete_collection:[58,2,1,""],encode:[58,2,1,""],encoder_type:[58,2,1,""],get_vector_field_name:[58,2,1,""],init:[58,2,1,""],is_url_working:[58,2,1,""],read:[58,2,1,""],request_api_key:[58,2,1,""],retrieve_all_documents:[58,2,1,""],retrieve_documents:[58,2,1,""],search:[58,2,1,""],test_word:[58,2,1,""],urls:[58,4,1,""],validate_model_url:[58,2,1,""],vector_length:[58,2,1,""]},"vectorhub.encoders.text.tfhub.use_lite":{USELite2Vec:[58,1,1,""]},"vectorhub.encoders.text.tfhub.use_lite.USELite2Vec":{add_documents:[58,2,1,""],bulk_encode:[58,2,1,""],chunk:[58,2,1,""],definition:[58,4,1,""],delete_collection:[58,2,1,""],encode:[58,2,1,""],encoder_type:[58,2,1,""],get_vector_field_name:[58,2,1,""],init:[58,2,1,""],is_url_working:[58,2,1,""],process_texts:[58,2,1,""],read:[58,2,1,""],request_api_key:[58,2,1,""],retrieve_all_documents:[58,2,1,""],retrieve_documents:[58,2,1,""],search:[58,2,1,""],test_word:[58,2,1,""],urls:[58,4,1,""],validate_model_url:[58,2,1,""],vector_length:[58,2,1,""]},"vectorhub.encoders.text.tfhub.use_multi":{USEMulti2Vec:[58,1,1,""]},"vectorhub.encoders.text.tfhub.use_multi.USEMulti2Vec":{add_documents:[58,2,1,""],bulk_encode:[58,2,1,""],chunk:[58,2,1,""],definition:[58,4,1,""],delete_collection:[58,2,1,""],encode:[58,2,1,""],encoder_type:[58,2,1,""],get_vector_field_name:[58,2,1,""],init:[58,2,1,""],is_url_working:[58,2,1,""],read:[58,2,1,""],request_api_key:[58,2,1,""],retrieve_all_documents:[58,2,1,""],retrieve_documents:[58,2,1,""],search:[58,2,1,""],test_word:[58,2,1,""],urls:[58,4,1,""],validate_model_url:[58,2,1,""],vector_length:[58,2,1,""]},"vectorhub.encoders.text.torch_transformers":{legal_bert:[59,0,0,"-"],torch_auto_transformers:[59,0,0,"-"],torch_longformers:[59,0,0,"-"]},"vectorhub.encoders.text.torch_transformers.legal_bert":{LegalBert2Vec:[59,1,1,""]},"vectorhub.encoders.text.torch_transformers.legal_bert.LegalBert2Vec":{add_documents:[59,2,1,""],bulk_encode:[59,2,1,""],chunk:[59,2,1,""],definition:[59,4,1,""],delete_collection:[59,2,1,""],encode:[59,2,1,""],encoder_type:[59,2,1,""],get_vector_field_name:[59,2,1,""],is_url_working:[59,2,1,""],read:[59,2,1,""],request_api_key:[59,2,1,""],retrieve_all_documents:[59,2,1,""],retrieve_documents:[59,2,1,""],search:[59,2,1,""],test_word:[59,2,1,""],urls:[59,4,1,""],validate_model_url:[59,2,1,""],vector_length:[59,2,1,""]},"vectorhub.encoders.text.torch_transformers.torch_auto_transformers":{Transformer2Vec:[59,1,1,""],list_tested_transformer_models:[59,3,1,""]},"vectorhub.encoders.text.torch_transformers.torch_auto_transformers.Transformer2Vec":{add_documents:[59,2,1,""],bulk_encode:[59,2,1,""],chunk:[59,2,1,""],definition:[59,4,1,""],delete_collection:[59,2,1,""],encode:[59,2,1,""],encoder_type:[59,2,1,""],get_vector_field_name:[59,2,1,""],is_url_working:[59,2,1,""],read:[59,2,1,""],request_api_key:[59,2,1,""],retrieve_all_documents:[59,2,1,""],retrieve_documents:[59,2,1,""],search:[59,2,1,""],test_word:[59,2,1,""],urls:[59,4,1,""],validate_model_url:[59,2,1,""],vector_length:[59,2,1,""]},"vectorhub.encoders.text.torch_transformers.torch_longformers":{Longformer2Vec:[59,1,1,""]},"vectorhub.encoders.text.torch_transformers.torch_longformers.Longformer2Vec":{add_documents:[59,2,1,""],bulk_encode:[59,2,1,""],chunk:[59,2,1,""],definition:[59,4,1,""],delete_collection:[59,2,1,""],encode:[59,2,1,""],encoder_type:[59,2,1,""],get_vector_field_name:[59,2,1,""],is_url_working:[59,2,1,""],read:[59,2,1,""],request_api_key:[59,2,1,""],retrieve_all_documents:[59,2,1,""],retrieve_documents:[59,2,1,""],search:[59,2,1,""],test_word:[59,2,1,""],urls:[59,4,1,""],validate_model_url:[59,2,1,""],vector_length:[59,2,1,""]},"vectorhub.encoders.text.vectorai":{vi_encoder:[60,0,0,"-"]},"vectorhub.encoders.text.vectorai.vi_encoder":{ViText2Vec:[60,1,1,""]},"vectorhub.encoders.text.vectorai.vi_encoder.ViText2Vec":{add_documents:[60,2,1,""],bulk_encode:[60,2,1,""],chunk:[60,2,1,""],delete_collection:[60,2,1,""],encode:[60,2,1,""],encoder_type:[60,2,1,""],get_vector_field_name:[60,2,1,""],is_url_working:[60,2,1,""],read:[60,2,1,""],request_api_key:[60,2,1,""],retrieve_all_documents:[60,2,1,""],retrieve_documents:[60,2,1,""],search:[60,2,1,""],test_word:[60,2,1,""],validate_model_url:[60,2,1,""],vector_length:[60,2,1,""]},"vectorhub.encoders.video":{sampler:[61,0,0,"-"]},"vectorhub.encoders.video.sampler":{FrameSamplingFilter:[61,1,1,""]},"vectorhub.encoders.video.sampler.FrameSamplingFilter":{get_audio_sampling_rate:[61,2,1,""],get_audio_vector:[61,2,1,""],get_frame:[61,2,1,""],initialize_video:[61,2,1,""],iter_frames:[61,2,1,""],load_clip:[61,2,1,""],transform:[61,2,1,""]},"vectorhub.errors":{ModelError:[29,5,1,""]},"vectorhub.errors.ModelError":{args:[29,4,1,""],with_traceback:[29,2,1,""]},"vectorhub.import_utils":{get_package_requirements:[29,3,1,""],is_all_dependency_installed:[29,3,1,""],is_dependency_installed:[29,3,1,""]},"vectorhub.utils":{list_installed_models:[29,3,1,""],list_models:[29,3,1,""]},vectorhub:{auto_encoder:[29,0,0,"-"],base:[29,0,0,"-"],bi_encoders:[30,0,0,"-"],doc_utils:[29,0,0,"-"],encoders:[41,0,0,"-"],errors:[29,0,0,"-"],import_utils:[29,0,0,"-"],models_dict:[29,0,0,"-"],utils:[29,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","function","Python function"],"4":["py","attribute","Python attribute"],"5":["py","exception","Python exception"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:function","4":"py:attribute","5":"py:exception"},terms:{"01322":[4,44],"01852v1":[18,58],"02559":[19,59],"03385":[14,51,53],"03771":[21,57,59],"03832":49,"04805v2":[17,58],"04861":[13,53],"04906":34,"05150":59,"05365":58,"05484":33,"07261":[12,53],"08155":47,"100":[14,22,23,51,53,58],"1000":[14,51,53],"10084":[20,32,56],"1024":[9,13,16,17,20,44,51,53,56,58],"1024_a":[17,58],"109":[18,58],"11175":[22,23,58],"11370":[10,53],"11942":[16,58],"12764":[5,44],"128":[7,13,18,44,49,53,58],"1280":53,"12836":33,"12_h":[17,58],"1409":53,"1503":49,"1512":[14,51,53],"152":[14,51,53],"1536":[12,53],"160":[13,53],"16000":[4,5,7,9,42,43,44,61],"1602":[12,53],"1664":53,"1704":[13,53],"1792":53,"1802":58,"1803":[22,23,58],"1810":[17,33,58],"1908":[20,32,56],"1909":[16,58],"1910":[21,57,59],"1912":[10,53],"192":[13,53],"1pz_6zsy1vb0s0jmjemvd8fs99zomcin1":49,"1st":[14,51,53],"2002":[4,5,44,47],"2004":[33,34,59],"2007":[18,58],"2010":[19,59],"2012":[10,12,53],"2014":53,"2015":[12,14,49,51,53],"2016":[12,53],"2017":[13,53],"2018":[17,22,23,53,58],"2019":[10,16,20,32,33,53,56,58],"2020":[4,5,7,9,18,19,33,34,44,47,58,59],"2048":[10,14,16,44,53,58],"224":[13,53],"228":[16,58],"24_h":[17,58],"256":[13,53],"300":[20,56],"300d":[20,56],"4000":[4,44],"400k":[4,44],"4096":[16,58,59],"4842":53,"500":[4,44],"512":[5,13,20,22,23,33,44,49,53,56,58],"521":[9,44],"6144":[10,53],"619":[17,58],"768":[13,16,17,18,19,20,21,32,34,47,53,56,58,59],"768_a":[17,58],"8192":[10,53],"840b":[20,56],"abstract":[16,17,18,19,20,21,22,23,24,31,32,33,34,47,51,55,56,57,58,59,60],"break":25,"byte":[10,12,13,14,49,50,51,53],"case":[13,19,20,25,53,56,59],"class":[0,4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60,61],"default":[4,5,7,9,10,12,13,14,18,19,25,32,33,34,44,47,49,51,53,58,59],"export":49,"final":[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],"float":[19,20,21,56,57,59],"function":[14,29,47,51,53,58],"import":[0,4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,29,32,33,34,43,44,45,47,49,51,53,54,56,57,58,59,60],"int":[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60,61],"long":[16,17,18,19,20,21,22,23,32,56,57,58,59],"new":[12,16,17,18,25,26,27,33,34,49,53,58,59],"return":[20,21,25,29,56,57],"short":33,"static":[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],"true":[10,12,13,14,16,17,18,29,47,49,50,51,53,58],"while":[16,26,27,47,53,58],CLS:[12,53,59],FOR:[16,17,18,22,58],For:[6,9,15,22,24,29,44,53,58],Not:[0,4,5,6,7,9,10,12,13,14,16,17,18,19,20,21,22,23,29,32,33,34,44,45,47,49,51,53,56,57,58,59],One:[12,53],The:[0,4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,26,27,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],There:[12,53],These:[6,12,13,15,19,20,21,24,32,45,53,54,56,57,59,60],USE:[22,23,58],Use:25,Using:[4,44],With:[4,12,22,44,53,58],__2vec:[26,27],___2vec:[26,27],___:25,____2vec:[25,26,27],____:[25,29],________:25,__traceback__:29,abc:[31,55],about:25,abov:[19,58,59],abs:[4,5,10,12,13,14,16,17,19,20,21,22,23,32,33,34,44,47,51,53,56,57,58,59],absolut:34,acceler:[12,53],access:[6,15,24,45,54,60],accur:[22,58],accuraci:[4,10,13,14,22,34,44,49,51,53,58],achiev:[10,12,14,19,33,47,49,51,53,59],across:[9,10,13,23,33,44,53,58],activ:[12,53],adapt:[10,19,53,59],add:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,26,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],add_docu:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],addit:[17,19,58,59],address:[16,58,59],adopt:0,advanc:[12,49,53],after:[6,15,24,45,51,54,60],agnost:[18,33,58],aihub:[22,58],aim:[26,27],albert2vec:[26,58],albert:[0,16,41,55],albert_en_bas:[16,58],albert_en_larg:[16,58],albert_en_preprocess:[16,58],albert_en_xlarg:[16,58],albert_en_xxlarg:[16,58],align:[33,49],all:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],allenai:59,allow:[0,6,7,9,13,15,22,24,26,27,44,45,49,53,54,58,60],alon:[9,34,44],along:[16,17,18,19,20,21,22,23,32,56,57,58,59],also:[4,12,14,16,19,33,44,49,51,53,58,59],altern:47,although:[7,9,44],alwai:[19,59],amazonaw:[4,5,7,9,44],amount:[4,7,9,22,44,58],analysi:[10,14,51,53],ani:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,25,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],anoth:[7,9,44],answer:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],api:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],api_kei:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],appli:[19,59],applic:[13,19,33,47,53,59],approach:[19,49,59],approxim:25,arab:[23,58],arbitrari:[4,44],architectur:[0,4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,26,27,29,32,33,34,44,47,49,51,53,56,57,58,59],architecture_map:51,arg:[10,12,13,14,29,49,50,51,53],argument:[29,53],around:[4,44],arrai:[10,12,13,14,49,50,51,53],art:[5,12,16,17,18,34,44,47,49,53,58,59],arxiv:[4,5,10,12,13,14,16,17,18,19,20,21,22,23,32,33,34,44,47,49,51,53,56,57,58,59],as_grai:[10,12,13,14,49,50,51,53],assess:53,asset:[10,12,13,14,49,51,53],assign:[9,44],assist:[19,59],associ:[22,25,58],attain:[10,53],attend:59,attent:59,attribut:[13,29,53],audio:[0,4,5,6,7,9,25,26,29,41],audio_fil:[6,45],audio_items_exampl:29,audio_metadata_exampl:29,audio_search_exampl:29,audioset:[9,44],augment:33,auto:[21,26,29,59],auto_encod:[26,28],autobiencod:29,autoencod:[0,29],automodel:[21,57,59],avail:[0,5,18,19,22,29,44,58,59],averag:[7,9,44],average_word_embeddings_glov:[20,56],average_word_embeddings_komnino:[20,56],average_word_embeddings_levy_depend:[20,56],avoid:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,25,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],bart:[21,59],base2vec:[29,42,50,55],base:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,28,30,32,33,34,41,43,44,45,47,49,53,54,56,57,58,59,60,61],baseaudio2vec:[4,5,7,9,42,43,44],baseimage2vec:[10,12,13,14,49,50,51,53],baselin:[9,22,33,44,58],baseqa2vec:[31,32,33,34],basetext2vec:[16,17,18,19,20,21,22,24,31,47,55,56,57,58,59,60],basetexttext2vec:25,basic:25,batch_siz:58,beach:[16,17,18,19,20,21,22,23,32,56,57,58,59],becom:[16,58],been:[7,9,12,19,44,49,51,53,59],begin:[20,21,56,57],believ:25,belong:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],benchmark:[5,10,16,33,34,44,53,58],benefit:[12,49,53],bert2vec:[26,58],bert:[0,16,17,18,19,20,21,25,29,32,33,41,55,56,57,59],bert_en_cased_l:[17,58],bert_en_uncased_l:[17,58],bert_en_wwm_cased_l:[17,58],bert_en_wwm_uncased_l:[17,58],bert_multi_cased_l:[17,58],bert_zh_l:[17,58],best:[16,26,27,33,49,58],better:[16,47,58],between:[7,9,13,22,44,49,53,58],bi_encod:[25,26,28,29],bia:[22,58],bidirect:[17,58],big:[10,53],big_transf:[10,53],bilingu:[18,58],billion:[18,58],bilm:58,bimod:47,bit2vec:26,bit:[0,10,41,50],bit_medium:[41,50],bitext:[18,58],bitmedium2vec:53,bitsmall2vec:[10,53],black:[10,12,13,14,49,50,51,53],blindli:[19,59],bm25:34,bool:[16,17,18,58],both:[12,17,22,25,47,49,53,58,59],bottleneck:49,box:[6,15,19,24,33,45,54,59,60],branch:25,brief:25,broader:[19,59],bucket:[4,5,7,9,44],budget:53,build:[6,7,9,13,15,24,26,27,33,44,45,53,54,60],builder:[13,53],bulk:[10,13,20,21,24,32,33,34,49,53,56,57,60],bulk_encod:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,25,32,33,34,42,43,44,47,49,51,53,56,57,58,59,60],bulk_encode_answ:[32,33,34],bulk_encode_quest:[32,33,34],bytesio:[10,12,13,14,49,50,51,53],cache_dir:49,calibr:[7,9,44],call:[0,10,13,17,18,25,49,53,58],can:[0,6,7,9,14,17,18,29,33,34,44,45,47,49,51,53,58,59],candid:[33,34],cannot:[9,44],carefulli:[10,53],catch_vector_error:[25,29],categori:25,central:[12,53],certain:59,challeng:[4,12,33,44,49,53],chang:51,charact:[58,59],checkpoint:[9,44],chines:[23,58],choos:[13,53],chunk:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],chunk_siz:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],cifar:[10,14,51,53],claim:33,classif:[12,13,14,33,51,53,59],classifi:[7,9,44],classmethod:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],clear:[12,53],clip2vec:35,closer:33,cloud:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],cluster:[33,49],cmap:[10,12,13,14,49,50,51,53],cnn:59,code2vec:47,code:[5,9,25,44],codebert:46,codenam:53,codesearch:47,coher:[16,58],collect:[17,22,23,51,58],collection_nam:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],com:[4,5,7,9,10,12,13,14,17,20,22,32,43,44,47,49,51,53,56,58],combin:[10,12,53,58,59],come:59,common_voice_en_2:[4,5,7,9,44],commun:[5,18,44,58],compact:49,compar:[5,13,16,44,53,58],comparison:[22,49,58],compat:49,compet:33,complex:[14,22,51,53,58],compon:[10,53],comprehens:[14,16,51,53,58],comput:[12,19,22,53,58,59],concept:49,condit:[17,58],conduct:[10,53],config:[21,57],confus:0,conjunct:[12,53],connect:[12,53],consider:[14,51,53],consist:[16,58,59],constant:53,constraint:[13,53],construct:47,consumpt:[16,22,58],content:[26,28],context:[17,32,33,34,53,58],context_str:[32,33,34],contextu:58,contract:[19,59],contrast:59,convert:[10,12,13,14,24,49,50,51,53,60],convolut:[12,13,49,53],corpora:[19,59],corpu:[9,18,44,58],correspond:49,cost:[12,53],craft:53,creat:[0,7,9,17,25,29,44,58],create_doc:29,creation:[4,44],cross:[18,33,58],current:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],cut:49,data:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],data_typ:29,data_type_to_exampl:29,databunch:51,datafram:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],dataset:[5,7,10,14,19,32,34,44,47,49,51,53,59],date:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,29,32,33,34,44,47,49,51,53,56,57,58,59],datetim:29,decis:53,decor:[25,29],deep:[10,12,13,17,26,27,49,53,58],deeper:[14,51,53],def:[25,29],definit:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,29,32,33,34,43,44,47,49,51,53,56,57,58,59],delete_collect:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],demand:[4,44],demonstr:[12,13,53],dens:34,depend:[20,21,25,29,56,57,59],deploi:[6,15,24,45,54,60],depth:[13,14,51,53],describ:[49,58],descript:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,25,29,32,33,34,44,47,49,51,53,56,57,58,59],design:[17,53,58],despit:49,detail:[9,10,44,53],detect:[4,13,22,44,47,49,53,58],dev:[4,5,7,9,10,12,13,14,16,17,18,22,23,25,33,44,53,58],develop:[6,15,24,33,45,47,54,60],devic:[4,44],dict:53,dictat:29,dictionari:[25,29,49,53],differ:[5,26,27,33,44,49],difficult:[14,51,53],dilat:59,dimens:51,dimension:[18,58],direct:49,directli:[4,9,44,49],directori:[25,29],distanc:49,distil:[0,32,44],distilbert:[20,21,56,59],distilroberta:[20,32,56],distilroberta_qa:[30,31,37],distilrobertaqa2vec:32,distilus:[20,56],distribut:[26,27],divers:[7,9,22,44,58],doc_util:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,26,28,32,33,34,43,44,47,49,51,53,56,57,58,59],document:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],dog:[16,17,18,19,20,21,22,23,32,56,57,58,59],doing:47,domain:[5,7,9,18,19,25,26,27,34,44,58,59],download:[22,49,58],downstream:[5,16,19,29,44,47,58,59],dpr2vec:[26,34],dpr:[0,30,31,37],drive:49,drop:59,dual:[33,34],due:[16,58,59],dummi:25,dure:[18,58],dutch:[23,58],each:[9,18,44,49,58,59],eas:[14,51,53],easi:59,easier:[14,51,53],easili:[0,6,15,24,26,27,45,49,51,54,60],east:[4,5,7,9,44],echr:[19,59],effect:[4,13,18,33,44,53,58],effici:[10,13,22,34,49,53,58],either:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],elmo2vec:58,elmo:[41,55],els:29,email:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],embed:[0,4,5,6,7,9,13,15,18,22,24,33,34,44,45,49,53,54,58,60],empir:[12,14,16,51,53,58],employ:59,empti:53,enabl:[4,44,47],encod:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,31,32,33,34],encode_answ:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],encode_quest:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],encoder_typ:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],encourag:[22,58],end:[6,7,9,15,24,34,44,45,54,58,60],english:[23,58],enjoi:[16,17,18,19,20,21,22,23,32,56,57,58,59],ensembl:[12,14,51,53],ensur:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],entiti:[26,27],enwik8:59,equival:[4,44],error:[12,14,25,26,28,49,51,53],establish:[5,16,18,34,44,58],etc:[25,33,47],euclidean:49,eurlex:[19,59],evalu:[5,6,14,15,19,24,33,34,44,47,51,53,59],even:[5,18,44,58],event:[7,9,44],everi:61,evid:[12,14,16,51,53,58],exampl:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],exce:[5,44],except:29,exclus:[18,58],exist:[5,44],expect:[7,9,44],expens:[12,53],experi:[13,53],explan:[26,27],explicitli:[14,51,53],exploit:[5,44],explor:[4,7,9,19,44,59],explos:[26,27],extens:[13,23,53,58],extra_requir:29,extra_requirements_fil:29,extract:[4,44,51],extract_fac:49,extraction_lay:51,extractor:[7,9,44],face2vec:48,face:[13,53],face_pixel:49,facebook:[21,59],facenet:49,facto:34,fairseq:[0,43],fall:[25,33],fals:29,famili:[19,59],fastai:[41,50],fastaibas:51,fastairesnet2vec:51,faster:[7,9,44],fbaipublicfil:43,featur:[4,7,9,44,49],feature_vector:[12,13,14,53],few:[10,53],fewer:[16,58],field:49,file:29,filenam:61,find:[19,22,25,33,58,59],fine:[7,9,10,17,18,19,44,47,53,58,59],finegrain:[13,53],finetun:59,first:58,fix:[47,58,59],flatten:51,flexibl:[26,27],focu:[19,59],focus:[16,58],follow:[19,25,29,58,59],fork:25,format:[10,12,13,14,49,50,51,53],former:47,found:[0,29],frame:[12,53],framesamplingfilt:61,framework:[14,26,27,34,51,53],freeli:[22,58],french:[23,58],from:[0,4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,29,32,33,34,43,44,45,47,49,50,51,53,54,56,57,58,59,60],from_markdown:29,from_model:29,func:29,further:[12,16,18,53,58],furthermor:47,g6zqj53:[17,58],gain:[14,51,53],gap:59,garden:[9,44],gener:[10,12,19,47,49,53,59],geo:[13,53],german:[23,58],get:[0,4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],get_audio_sampling_r:61,get_audio_vector:61,get_fram:61,get_list_of_url:[20,56],get_model_definit:29,get_package_requir:29,get_vector_field_nam:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],getvectorai:[10,12,13,14,49,51,53],github:[6,10,20,32,45,47,53,56],give:[12,53],given:[0,7,9,33,44],global:[13,53,59],glue:[16,58],goal:[5,33,44],good:[12,22,53,58],googl:[4,5,7,9,10,12,13,14,17,18,22,23,33,44,49,53,58],googlenet:53,gpu:[16,58],grayscal:[10,12,13,14,49,50,51,53],great:[33,34],greater:49,grow:[4,44],gstatic:[22,58],guid:[25,26],guidelin:[19,59],hallmark:53,harder:[16,58],harmon:49,has:[5,7,9,12,19,26,27,44,49,51,53,58,59],have:[5,7,9,12,14,16,18,25,26,27,44,51,53,58,59],hebbian:53,hei:[12,24,53,60],height:[10,12,13,14,49,50,51,53],help:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,25,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],here:[0,12,19,25,51,53,59],hertz:61,heurist:[10,53],hidden:[47,58],high:[7,9,10,18,44,53,58],highest:51,home:[26,27,29],hour:25,how:[6,12,15,24,26,27,29,33,34,45,53,54,60],howev:[16,19,58,59],html:[21,57,59],http:[4,5,6,7,9,10,12,13,14,16,17,18,19,20,21,22,23,32,33,34,43,44,45,47,49,51,53,56,57,58,59],hub:[10,12,13,14,22,26,29,49,51,53,58],huggingfac:[19,21,57,59],hybrid:47,hyper:[13,19,53,59],hyperparamet:[10,53],ident:29,identifi:25,idf:34,ijqlhzz:[17,58],ilsvrc:[10,12,14,51,53],imag:[0,4,5,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,31,32,33,34,41,42,43,44,47,49,55,56,57,58,59,60],image_arrai:[10,12,13,14,49,50,51,53],image_dimens:[13,53],image_filenam:49,image_input:49,image_items_exampl:29,image_metadata_exampl:29,image_res:[10,12,13,14,49,50,51,53],image_search_exampl:29,imagenet:[12,13,14,51,53],implement:[34,49],import_util:[25,26,28],impress:[19,59],improv:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],inaturalist:53,incarn:53,incept:[0,12,53],inception2vec:26,inception_resnet:[12,41,50],inception_resnet_v2:[12,53],inception_v1:53,inception_v2:53,inception_v3:53,inceptionresnet2vec:[26,53],inceptionresnet:[12,53],inceptionv12vec:53,inceptionv1:[41,50],inceptionv22vec:53,inceptionv2:[41,50],inceptionv32vec:53,inceptionv3:[41,50],includ:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],incorpor:47,increas:[14,16,51,53,58],independ:[9,44],index:[26,29,61],indic:[19,59],infer:[17,58],init:[10,13,14,16,17,18,22,23,43,53,58],init_token:[16,17,18,58],initi:[7,9,44,53],initialize_video:61,input:[7,9,14,16,44,47,51,53,58],input_str:[17,18,58],insid:53,inspir:0,instal:[0,4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,29,32,33,34,44,47,49,51,53,56,57,58,59],instanti:[0,29,51],instead:[4,14,44,51,53],intend:[19,59],inter:[16,58],interestingli:33,intermedi:49,intern:58,introduc:[13,17,26,27,49,53,58,59],introduct:[12,53],intuit:53,investig:[19,22,47,58,59],involv:25,is_all_dependency_instal:[25,29],is_dependency_instal:29,is_grayscal:[10,12,13,14,49,50,51,53],is_url_work:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],isntal:29,issu:29,italian:[23,58],item:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],item_exampl:29,iter:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],iter_fram:61,its:[12,19,53,59],itself:49,japanes:[23,58],jointli:[17,58],jpg:[15,54],json:29,json_fn:29,just:[17,58],keep:53,kei:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],keyword:[4,29,44,53],kind:33,know:25,knowledg:47,korean:[23,58],kwarg:[29,53],label:[5,7,9,44,49],labs:[0,18,20,41,55,56],labse2vec:[26,58],landmark:49,languag:[5,16,17,18,23,26,27,33,44,47,58,59],languageagnost:33,lareqa2vec:[26,32,33,34],lareqa:[0,32,33,34],lareqa_qa:[30,31,32,34,37],larg:[7,9,10,13,16,17,20,22,23,34,44,53,56,58,59],larger:[7,9,18,44,58],largest:[12,53],last:47,latenc:[13,53],latest:[12,53],latter:47,law:[19,59],layer:[5,9,14,17,44,49,51,53,58],layer_num:51,lead:[10,16,53,58],learn:[4,5,10,14,22,26,27,33,34,44,47,49,51,53,58],learner:51,left:[17,58],legal:[0,19,59],legal_bert:[19,41,55],legalbert2vec:[26,59],legisl:[19,59],length:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,44,47,49,51,53,55,56,57,58,59,60],let:[9,25,44],level:[4,7,9,22,44,58,59],lfw:49,librari:[25,29],light:[13,53],like:[9,44],limit:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,29,32,33,34,44,47,49,51,53,56,57,58,59],linear:[58,59],linearli:59,lingual:[18,33,58],link:[10,12,13,14,49,50,51,53],list:[0,4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,25,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],list_all_auto_model:29,list_available_auto_model:0,list_installed_model:29,list_model:29,list_of_url:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],list_tested_transformer_model:[21,59],lite:[0,16,58],load:29,load_clip:61,local:[13,53,59],logo:[10,12,13,14,49,51,53],longer:[16,58,59],longform:59,longformer2vec:59,loss:[5,16,44,49,58],lot:[7,9,44],low:[4,5,12,18,44,53,58],lower:[14,16,51,53,58],lst:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],lstm:58,lstm_outputs1:58,lstm_outputs2:58,lucen:34,machin:[4,33,44],made:[22,58],main:[10,29,53],make:[7,9,25,44,59],mani:[4,44],map:[49,51,53],margin:[12,53],markdown:29,markdown_filepath:29,match:49,max:[17,58],max_length:58,max_seq_length:[16,17,18,58],mbert:33,mbert_en_en:33,mbert_x_i:33,mbert_x_x:33,mbert_x_x_mono:33,mean:[4,5,7,9,20,21,26,27,29,42,43,44,47,56,57,58,59],meant:[26,27],measur:49,mechan:59,medic:[5,44],medium:[0,17,53,58],memori:[16,58],messag:29,metadata:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],metadata_exampl:29,method:[4,5,6,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,25,31,32,33,34,42,43,44,45,47,49,50,51,53,55,56,57,58,59,60],mfcc:[4,44],microsoft:47,million:[7,9,44],mine:[18,49,58],minim:[22,58],minimum:25,minut:25,miro:[17,58],mis:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],mismatch:[7,9,44],miss:29,mlm:[18,58],mobil:[13,53],mobilenet2vec:26,mobilenet:[0,9,13,41,44,50],mobilenet_v1_025_128:[13,53],mobilenet_v1_025_160:[13,53],mobilenet_v1_025_192:[13,53],mobilenet_v1_025_224:[13,53],mobilenet_v1_050_128:[13,53],mobilenet_v1_050_160:[13,53],mobilenet_v1_050_192:[13,53],mobilenet_v1_050_224:[13,53],mobilenet_v1_075_128:[13,53],mobilenet_v1_075_160:[13,53],mobilenet_v1_075_192:[13,53],mobilenet_v1_075_224:[13,53],mobilenet_v1_100_128:[13,53],mobilenet_v1_100_160:[13,53],mobilenet_v1_100_192:[13,53],mobilenet_v1_100_224:[13,53],mobilenet_v2_035_128:53,mobilenet_v2_035_160:53,mobilenet_v2_035_192:53,mobilenet_v2_035_224:53,mobilenet_v2_035_96:53,mobilenet_v2_050_128:53,mobilenet_v2_050_160:53,mobilenet_v2_050_192:53,mobilenet_v2_050_224:53,mobilenet_v2_050_96:53,mobilenet_v2_075_128:53,mobilenet_v2_075_160:53,mobilenet_v2_075_192:53,mobilenet_v2_075_224:53,mobilenet_v2_075_96:53,mobilenet_v2_100_128:53,mobilenet_v2_100_160:53,mobilenet_v2_100_192:53,mobilenet_v2_100_224:53,mobilenet_v2_100_96:53,mobilenet_v2_130_224:53,mobilenet_v2_140_224:53,mobilenetv12vec:[13,53],mobilenetv22vec:53,mobilenetv2:[41,50],model:[0,4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,26,27,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],model_dict:25,model_id:29,model_nam:[19,20,21,29,47,56,57,59],model_path:49,model_requir:25,model_url:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],modeldefinit:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,29,32,33,34,43,44,47,49,51,53,56,57,58,59],modelerror:29,models_dict:[25,26,28],modif:[17,58],modul:[0,18,23,25,26,28],monolingu:[18,58],more:[9,12,14,44,51,53,58],most:[4,44,59],motiv:59,msmacro:32,msmarco:[20,32,56],much:[16,49,51,58],multi:[0,16,23,33,53,58],multilingu:[18,20,23,33,56,58],multipl:[18,19,21,23,34,58,59],name:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],nat:47,natur:[16,47,58],necessari:[6,45],need:[0,4,6,9,15,24,25,44,45,54,60],net:[14,20,32,51,53,56],network:[5,10,12,13,14,44,49,51,53],neural:[10,13,14,47,51,53],new_model:25,new_sampling_r:[4,5,7,9,42,43,44,61],nli:[20,56],nlp:[19,22,58,59],nlpaueb:[19,59],non:[5,12,44,49,53],none:[4,5,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,54,55,56,57,58,59,60,61],nonsemant:[5,44],normal:[16,17,18,58],note:[47,58],novel:49,novelti:59,now:[26,27],num_of_docu:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],num_result:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],number:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],numpi:[10,12,13,14,25,49,50,51,53],object:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60,61],observ:[22,58],obtain:[4,22,44,58],off:[13,22,53,58],often:[16,19,58,59],onc:[10,12,13,14,25,49,50,51,53],one:[4,12,17,25,44,53,58],onli:[4,33,44,49],onlin:49,ontolog:[9,44],open:[26,27,29,34],oper:59,optim:[14,18,49,51,53,58],option:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],order:[26,27],org:[4,5,10,12,13,14,16,17,18,19,20,21,22,23,32,33,34,44,47,49,51,53,56,57,58,59],origin:[9,16,19,44,58,59],other:[5,13,18,22,33,44,49,53,58],our:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,26,27,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],out:[6,15,19,24,33,45,54,59,60],outlin:25,outperform:[5,12,22,34,44,53,58,59],output:[7,9,17,44,47,58],output_lay:58,outsid:[10,13,49,53],over:[4,10,33,44,53],packag:[4,5,6,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,26,28],page:26,pair:[18,33,47,53,58],panda:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],paper:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,29,32,33,34,44,47,49,51,53,56,57,58,59],paradigm:[10,53],parallel:[18,58],param:[4,5,6,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,55,56,57,58,59,60],paramet:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],paraphras:[20,56],part:[4,7,9,44],particular:[7,9,44,53],pass:25,passag:34,patch:49,path:29,pattern:59,pdf:[18,33,49,58],peopl:[26,27],per:[9,10,44,49,53],percent:[12,53],perform:[5,9,10,12,13,16,19,22,33,44,47,49,53,58,59],person:[5,44],pip:[0,4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,32,33,34,44,47,49,51,53,56,57,58,59],pixel:49,place:[14,51,53],plausibl:47,png:[10,12,13,14,17,22,49,51,53,58],point:[16,58],polish:[23,58],pool:[20,21,33,47,56,57,58],pooled_output:[16,17,58],pooler_output:47,pooling_method:[47,59],pooling_strategi:[16,17,58],popular:[13,53],portugues:[23,58],possible_architectur:51,power:[4,44],practic:34,pre:[4,5,10,17,18,19,22,44,47,53,58,59],predict:[9,44],preprocessor_url:[16,58],present:[12,13,14,16,18,22,33,47,49,51,53,58],pretrain:[16,22,58,59],pretrained_model:[21,57,59],previou:[19,33,47,49,59],previous:[14,51,53],principl:53,prior:59,probabl:[9,44],probe:47,problem:[13,16,53,58],process:[4,17,18,44,53,58,59],process_text:58,produc:[4,18,44,49,58],program:47,project:[25,59],proper:[9,12,44,53],properti:[4,5,6,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,55,56,57,58,59,60],propos:[5,10,16,19,44,53,58,59],proprietari:[26,27],provid:[9,14,25,26,27,44,47,51,53],publicli:[5,44],publish:49,purpos:[6,15,24,45,47,54,60],python:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],pytorch:[26,41,42],qa_items_exampl:29,qa_metadata_exampl:29,qa_search_exampl:29,quadrat:59,qualiti:53,question:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],quickli:[4,7,9,44],quora:[20,56],r101x1:[10,53],r101x3:[10,53],r152x4:[10,53],r50x1:[10,53],r50x3:[10,53],race:[16,58],rais:[12,29,53],raise_warn:29,rang:[10,13,17,34,53,58],rank:[20,56],rate:49,rather:49,re9vva:[17,58],reach:[4,44],read:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],real:[4,44],reason:[9,44],recent:[12,17,49,53,58],recip:[10,53],recogn:[4,44],recognit:[12,49,53],recommend:25,record:49,redownload:49,reduc:[5,44],reduct:[16,58],refer:[14,51,53],referral_cod:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],reformul:[14,51,53],regim:[10,53],rel:[12,29,53],relat:33,relationship:[22,58],releas:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,32,33,34,44,47,49,51,53,56,57,58,59],release_d:29,relev:0,reli:34,replac:[25,47,51,59],repo:29,report:[22,58],repositori:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,29,32,33,34,44,47,49,51,53,56,57,58,59],represent:[5,10,16,17,18,26,27,33,34,44,47,49,53,58],request:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],request_api_kei:[4,5,6,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,55,56,57,58,59,60],requir:[0,5,7,9,25,29,33,44],requirement_typ:29,rescal:[10,12,13,14,49,50,51,53],research:[10,19,33,53,59],reshape_s:49,residu:[12,14,51,53],resize_mod:[10,12,13,14,49,50,51,53],resnet2vec:26,resnet34:51,resnet:[0,12,14,41,50],resnet_v1_101:[14,53],resnet_v1_152:[14,53],resnet_v1_50:[14,53],resnet_v2_50:53,resnetv12vec:[14,53],resnetv22vec:53,resnetv2:[41,50],resolv:29,resourc:[5,13,18,22,44,53,58],resource_filenam:29,respons:53,result:[14,16,17,18,22,47,49,51,53,58,59],retriev:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],retrieve_all_docu:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],retrieve_docu:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],return_base_dictionari:29,return_names_onli:29,revisit:[10,53],rgb:[10,12,13,14,49,50,51,53],rgb_weight:[10,12,13,14,49,50,51,53],right:[13,17,53,58],rise:[4,26,27,44],roberta:[20,32,56,59],roughli:49,rst:29,rubric:29,runner:29,russian:[23,58],same:[4,33,44],sampl:[5,7,9,10,12,13,14,15,44,47,49,50,51,53,54],sample_r:44,sampler:[29,41],sbert:[20,32,56],scale:[9,10,12,13,16,44,49,53,58,59],score:[9,44,59],scratch:[19,59],search:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,26,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],search_exampl:29,second:58,section:[20,56],see:[6,15,24,45,54,60],select:[6,10,15,24,34,45,51,53,54,60],self:[16,25,29,58,59],semant:[5,33,44],sentenc:[16,18,19,21,22,23,26,32,33,47,56,58,59],sentence_auto_transform:[20,41,55],sentence_transform:[20,30,31,37,41,55],sentencetransformer2vec:[26,56],separ:[13,53],sequenc:59,seriou:49,set:[4,12,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,44,47,51,53,55,56,57,58,59,60],setup:33,sever:[12,19,53,59],shallow:[7,9,44],shape:58,shot:[33,47],should:[7,9,10,13,25,44,47,49,53],show:[4,10,12,13,14,16,34,44,47,49,50,51,53,58],show_face_landmark:49,show_imag:[10,12,13,14,49,50,51,53],shown:[12,53],side:59,signatur:[4,44],signific:49,significantli:[12,33,53],similar:[12,18,22,49,53,58,59],similarli:[12,53],simpl:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,25,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],simplifi:[10,23,53,58],singl:[12,53],size:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],slide:59,small:[4,7,9,10,19,34,44,53,59],some:[7,9,12,16,44,53,58],sourc:[9,26,27,33,44],space:[19,33,34,49,59],spanish:[23,58],spars:34,special:[7,9,44],specialis:[19,59],specif:[9,17,18,19,22,26,27,44,58,59],specifi:[4,5,7,9,10,13,16,17,18,19,20,21,22,23,24,31,32,33,34,42,43,44,47,49,53,55,56,57,58,59,60],speech:[0,4,5,44],speech_embed:[4,41,42],speechembedding2vec:[26,44],speed:[16,58],spell:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],splitter:29,spoken:[4,44],spot:[4,44],squad:[16,33,58],stabil:[12,53],stack:58,stand:[9,17,44,58],standard:[49,59],standardis:49,standardise_imag:49,start:[7,9,44],state:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,29,32,33,34,44,47,49,51,53,56,57,58,59],statement:25,step:25,still:[7,9,14,44,51,53],store:[26,27],str:[4,5,6,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,55,56,57,58,59,60,61],stranger:[24,60],strategi:[19,33,59],streamlin:[12,13,53],string:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],string_typ:[32,33,34],strong:[10,13,33,34,53],stsb:[20,56],studi:33,submiss:53,submit:25,submodul:[26,28,30,35,41,46,48],subpackag:[26,28],substant:33,substanti:[14,17,51,53,58],success:[6,15,24,45,54,60],suffici:[4,44],sum:58,supervis:[10,16,22,53,58],support:[0,4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],surprisingli:[10,22,53,58],surround:59,symmetr:[10,12,13,14,49,50,51,53],synthes:[4,44],synthet:[4,44],system:[7,9,34,44,49],systemat:[19,59],take:[9,16,17,18,19,20,21,22,23,25,32,44,47,56,57,58,59],target:[10,22,33,53,58],task:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,26,27,29,32,33,34,44,47,49,51,53,56,57,58,59],techniqu:[16,26,27,49,58],technolog:[19,59],tend:[22,58],tensor:58,tensorflow:[9,16,17,25,26,44,57,58,59],tensorflow_hub:25,tensorflow_text:25,term:[4,34,44],test:[5,6,12,14,15,22,24,33,44,45,51,53,54,58,60],test_word:[16,17,18,19,20,21,22,23,24,31,32,33,34,47,55,56,57,58,59,60],text2vec:55,text8:59,text:[0,4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,31,32,33,34,41,42,43,44,45,47,49,50,51,53,54],text_image_search_exampl:29,text_items_exampl:29,text_metadata_exampl:29,text_search_exampl:29,text_text:[0,25,29,30],tf_auto_transform:[21,41,55],tf_transform:[21,41,55,59],tfhub:[25,26,30,31,32,34,37,41,42,50,55],tftext:33,tftransformer2vec:[21,57,59],thai:[23,58],than:[14,25,33,47,49,51,53],thei:58,them:[20,21,25,56,57],themselv:[6,15,24,45,54,60],thi:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],thin:[12,53],those:[14,51,53],thousand:59,thread:[22,23,58],three:[12,53],threshold:[9,44],through:[18,58],thu:[19,59],time:[16,58],tlm:[18,58],to_dict:29,to_grayscal:[10,12,13,14,49,50,51,53],todai:[33,34],todo:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],token:[20,21,47,56,57,59],top:[10,12,34,53],top_n:61,torch:[19,21,34,35,59],torch_auto_transform:[21,41,55],torch_longform:[41,55],torch_transform:[19,21,30,31,37,41,55],total:[10,53],tpu:[16,58],trade:[13,22,53,58],tradeoff:[13,53],tradit:[12,23,34,53,58],train:[4,5,7,9,10,12,14,16,17,18,19,22,23,25,26,27,29,32,33,44,47,49,51,53,58,59],trainabl:58,trainable_model:58,transfer:[5,10,22,44,53,58],transform:[0,17,26,32,34,46,56,57,58,59,61],transformer2vec:[26,59],translat:[18,33,58],treat:[9,44],trill2vec:[26,44],trill:[0,5,41,42],trill_distil:[41,42],trilldistilled2vec:44,triplet:[5,44,49],triviaqa:59,truncat:47,tune:[7,9,10,17,18,19,44,47,53,58,59],turkish:[23,58],turn:[20,21,56,57],two:[13,16,22,47,53,58],type:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,25,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],ukplab:[20,32,56],ultim:[5,44],unabl:59,uncas:[19,21,32,57,59],under:25,underscor:33,understand:[26,27],unimod:47,union:[19,21,24,59,60],univers:[22,23,33,58],unlabel:[17,58],unlik:[17,33,58],unreferenc:[14,51,53],unrel:33,unsupervis:[5,44],ural:47,url:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],usabl:[7,9,44],use2vec:[23,26,58],use:[0,4,6,13,15,16,19,22,23,24,25,26,27,29,41,44,45,49,53,54,55,59,60],use_lit:[41,55],use_multi:[23,41,55],use_multi_qa:[30,31,37],use_qa:[30,31,37],used:[7,9,14,18,26,27,29,33,44,49,51,53,58,59],useful:[4,33,44],uselite2vec:58,usemulti2vec:[26,58],usemultiqa2vec:[25,33],useqa2vec:[26,33],user:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,55,56,57,58,59,60],usernam:[4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],uses:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],using:[6,9,10,12,18,19,21,22,26,32,33,34,44,45,49,53,58,59],utf:29,util:[25,26,28,47,53],valid:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],validate_model_url:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],valu:[29,53],variant:[22,33,58],variat:[12,53],varieti:[5,9,33,44,59],variou:29,vctr:[6,15,24,25,45,54,60],vecsearch:[4,5,7,9,44],vector:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,26,29,31,32,33,34,42,43,44,45,47,49,50,51,53,54,55,56,57,58,59,60],vector_length:[4,5,6,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,44,45,47,53,55,56,57,58,59,60],vector_oper:[4,5,7,9,42,43,44],vectorai:[6,15,24,41,42,50,55],vectorai_integr:29,vectorhub:[0,4,5,6,7,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25],vectorhub_ref:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],verbos:29,veri:[7,9,12,44,53],verif:49,version:49,vgg:[14,51,53],vggish2vec:[26,44],vggish:[0,7,41,42],vi_encod:[6,15,24,41,42,50,55],via:[22,33,53,58],viaudio2vec:[26,45],viclient:[4,5,7,9,10,12,13,14,16,17,18,19,20,21,22,23,24,29,31,32,33,34,42,43,44,47,49,50,51,53,55,56,57,58,59,60],video:[7,9,29,41,44],view:[0,29],viimage2vec:[26,54],viindex:29,vision:[10,13,26,27,53],visual:[5,10,44,53],vitext2vec:[6,15,26,45,54,60],voic:[4,5,7,9,44],vtab:[10,53],walk:[16,17,18,19,20,21,22,23,32,56,57,58,59],warm:[7,9,44],warn:29,wav2vec2_vox_960h:43,wav2vec:[0,26,41,42],wav2vec_smal:43,wav2vec_small_100h:43,wav2vec_small_10m:43,wav2vec_small_960h:43,wav2vec_vox:43,wav2vec_vox_100h:43,wav2vec_vox_10m:43,wav:[4,5,6,7,9,44,45],waveform:[9,44],weak:33,weat:[22,58],weather:[33,34],websit:[6,45],weight:[10,12,13,14,49,50,51,53,58],well:[10,19,22,33,53,58,59],were:53,what:[25,26,47],when:[0,4,10,16,19,34,44,53,58,59],where:[9,19,34,44,47,49,58,59],whether:[12,47,53],which:[4,7,9,17,18,25,29,44,47,49,51,53,58,59],white:[10,12,13,14,49,50,51,53],who:33,why:[32,33,34],wide:[9,10,12,13,17,34,44,49,53,58],width:[10,12,13,14,49,50,51,53],wikihop:59,wikipedia:[20,56],wild:49,window:[16,17,18,22,58,59],wise:[13,53],with_traceback:29,without:[4,7,9,12,17,44,53,58],won:[14,51,53],word:[19,20,21,22,55,56,57,58,59],word_emb:58,work:[6,15,24,29,34,45,54,59,60],would:[4,44],write:25,written:25,www:[22,58],xlm:[20,56],yamnet2vec:[26,44],yamnet:[0,9,41,42],year:[12,53],yet:[5,44],yield:[12,53],you:[0,6,7,9,25,44,45],your:26,youtub:[7,9,44,49],zero:[33,47]},titles:["Guide to using Auto-Encoder","DPR2Vec","LAReQA2Vec","USEQA2Vec","SpeechEmbedding2Vec","Trill2Vec","ViAudio2Vec","Vggish2Vec","Wav2Vec","Yamnet2Vec","Bit2Vec","Inception2Vec","InceptionResnet2Vec","MobileNet2Vec","ResNet2Vec","ViImage2Vec","AlBert2Vec","Bert2Vec","LaBSE2Vec","LegalBert2Vec","SentenceTransformer2Vec","Transformer2Vec","USE2Vec","USEMulti2Vec","ViText2Vec","How To Add Your Model To Vector Hub","Welcome to VectorHub\u2019s documentation!","What is Vector Hub?","vectorhub","vectorhub package","vectorhub.bi_encoders package","vectorhub.bi_encoders.qa package","vectorhub.bi_encoders.qa.sentence_transformers package","vectorhub.bi_encoders.qa.tfhub package","vectorhub.bi_encoders.qa.torch_transformers package","vectorhub.bi_encoders.text_image package","vectorhub.bi_encoders.text_image.torch package","vectorhub.bi_encoders.text_text package","vectorhub.bi_encoders.text_text.sentence_transformers package","vectorhub.bi_encoders.text_text.tfhub package","vectorhub.bi_encoders.text_text.torch_transformers package","vectorhub.encoders package","vectorhub.encoders.audio package","vectorhub.encoders.audio.pytorch package","vectorhub.encoders.audio.tfhub package","vectorhub.encoders.audio.vectorai package","vectorhub.encoders.code package","vectorhub.encoders.code.transformers package","vectorhub.encoders.face package","vectorhub.encoders.face.tf package","vectorhub.encoders.image package","vectorhub.encoders.image.fastai package","vectorhub.encoders.image.tensorflow package","vectorhub.encoders.image.tfhub package","vectorhub.encoders.image.vectorai package","vectorhub.encoders.text package","vectorhub.encoders.text.sentence_transformers package","vectorhub.encoders.text.tf_transformers package","vectorhub.encoders.text.tfhub package","vectorhub.encoders.text.torch_transformers package","vectorhub.encoders.text.vectorai package","vectorhub.encoders.video package"],titleterms:{add:25,albert2vec:16,albert:58,audio:[42,43,44,45],auto:0,auto_encod:29,base:[29,31,37,42,50,51,55],bert2vec:17,bert:58,bi_encod:[30,31,32,33,34,35,36,37,38,39,40],bit2vec:10,bit:53,bit_medium:53,clip2vec:36,code:[46,47],codebert:47,content:[29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61],distilroberta_qa:[32,38],doc_util:29,document:26,dpr2vec:1,dpr:[34,40],elmo:58,encod:[0,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61],error:29,face2vec:[49,52],face:[48,49],fastai:51,guid:0,how:25,hub:[25,27],imag:[50,51,52,53,54],import_util:29,inception2vec:11,inception_resnet:53,inceptionresnet2vec:12,inceptionv1:53,inceptionv2:53,inceptionv3:53,indic:26,labs:58,labse2vec:18,lareqa2vec:2,lareqa_qa:[33,39],legal_bert:59,legalbert2vec:19,mobilenet2vec:13,mobilenet:53,mobilenetv2:53,model:25,models_dict:29,modul:[29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61],packag:[29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61],pytorch:[8,21,43],resnet2vec:14,resnet:[51,53],resnetv2:53,sampler:61,sentenc:20,sentence_auto_transform:56,sentence_transform:[32,38,56],sentencetransformer2vec:20,speech_embed:44,speechembedding2vec:4,submodul:[29,31,32,33,34,36,37,38,39,40,42,43,44,45,47,49,50,51,52,53,54,55,56,57,58,59,60,61],subpackag:[29,30,31,35,37,41,42,46,48,50,55],tabl:26,tensorflow:[21,52],text:[55,56,57,58,59,60],text_imag:[35,36],text_text:[37,38,39,40],tf_auto_transform:57,tf_transform:57,tfhub:[2,3,4,5,7,9,10,11,12,13,14,16,17,18,22,23,33,39,44,53,58],torch:36,torch_auto_transform:59,torch_longform:59,torch_transform:[34,40,59],transform:[1,19,20,21,47],transformer2vec:21,trill2vec:5,trill:44,trill_distil:44,use2vec:22,use:58,use_lit:58,use_multi:58,use_multi_qa:[33,39],use_qa:[33,39],usemulti2vec:23,useqa2vec:3,using:0,util:29,vector:[6,15,24,25,27],vectorai:[45,54,60],vectorhub:[26,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61],vggish2vec:7,vggish:44,vi_encod:[45,54,60],viaudio2vec:6,video:61,viimage2vec:15,vitext2vec:24,wav2vec:[8,43],welcom:26,what:27,yamnet2vec:9,yamnet:44,your:25}}) \ No newline at end of file diff --git a/docs/vectorhub.bi_encoders.html b/docs/vectorhub.bi_encoders.html deleted file mode 100644 index 0f88ce07..00000000 --- a/docs/vectorhub.bi_encoders.html +++ /dev/null @@ -1,313 +0,0 @@ - - - - - - - - - - vectorhub.bi_encoders package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - - - -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.bi_encoders.qa.html b/docs/vectorhub.bi_encoders.qa.html deleted file mode 100644 index ca8d3c2f..00000000 --- a/docs/vectorhub.bi_encoders.qa.html +++ /dev/null @@ -1,406 +0,0 @@ - - - - - - - - - - vectorhub.bi_encoders.qa package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.bi_encoders.qa package

- -
-

Submodules

-
-
-

vectorhub.bi_encoders.qa.base module

-
-
-class vectorhub.bi_encoders.qa.base.BaseQA2Vec
-

Bases: vectorhub.encoders.text.base.BaseText2Vec, abc.ABC

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode()
-
- -
-
-abstract encode_answer()
-
- -
-
-abstract encode_question()
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

Module contents

-
-
- - -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.bi_encoders.qa.sentence_transformers.html b/docs/vectorhub.bi_encoders.qa.sentence_transformers.html deleted file mode 100644 index 1d4218cb..00000000 --- a/docs/vectorhub.bi_encoders.qa.sentence_transformers.html +++ /dev/null @@ -1,441 +0,0 @@ - - - - - - - - - - vectorhub.bi_encoders.qa.sentence_transformers package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • vectorhub.bi_encoders.qa.sentence_transformers package
  • - - -
  • - - - View page source - - -
  • - -
- - -
-
-
-
- -
-

vectorhub.bi_encoders.qa.sentence_transformers package

-
-

Submodules

-
-
-

vectorhub.bi_encoders.qa.sentence_transformers.distilroberta_qa module

-

Model Name: Distilled Roberta QA

-

Vector Length: 768 (default)

-

Description: -These are Distilled Roberta QA trained on MSMACRO dataset from sbert.net by UKPLab.

-

Paper: https://arxiv.org/abs/1908.10084

-

Repository: https://github.com/UKPLab/sentence-transformers

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2019-08-27

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-sentence-transformers]

-

Example:

-

-
-
-

` -#pip install vectorhub[encoders-text-sentence-transformers] -from vectorhub.encoders.qa.sentence_transformers import DistilRobertaQA2Vec -model = DistilRobertaQA2Vec('bert-base-uncased') -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.bi_encoders.qa.sentence_transformers.distilroberta_qa.DistilRobertaQA2Vec(model_url='distilroberta-base-msmarco-v1')
-

Bases: vectorhub.bi_encoders.qa.base.BaseQA2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(strings: List[str], context_strings: List[str] = None, string_type: str = 'answer')
-

Bulk encode question/answer using LAReQA model. -:param String: List of strings. -:param Context_string: List of context of the strings. -:param string_type: question/answer.

-

Example: ->>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * ->>> model = LAReQA2Vec() ->>> model.bulk_encode(“Why?”, string_type=’answer’)

-
- -
-
-bulk_encode_answers(answers: List[str])
-
- -
-
-bulk_encode_question(questions: list)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(string: str, context_string: str = None, string_type: str = 'answer')
-

Encode question/answer using LAReQA model. -:param String: Any string -:param Context_string: The context of the string. -:param string_type: question/answer.

-

Example: ->>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * ->>> model = LAReQA2Vec() ->>> model.encode_answer(“Why?”)

-
- -
-
-encode_answer(answer: str, context: str = None)
-
- -
-
-encode_question(question: str)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'distilroberta-base-msmarco-v1': {'vector_length': 768}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

Module contents

-
-
- - -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.bi_encoders.qa.tfhub.html b/docs/vectorhub.bi_encoders.qa.tfhub.html deleted file mode 100644 index 6a42c713..00000000 --- a/docs/vectorhub.bi_encoders.qa.tfhub.html +++ /dev/null @@ -1,828 +0,0 @@ - - - - - - - - - - vectorhub.bi_encoders.qa.tfhub package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • vectorhub.bi_encoders.qa.tfhub package
  • - - -
  • - - - View page source - - -
  • - -
- - -
-
-
-
- -
-

vectorhub.bi_encoders.qa.tfhub package

-
-

Submodules

-
-
-

vectorhub.bi_encoders.qa.tfhub.lareqa_qa module

-

Model Name: LAReQA: Language-agnostic answer retrieval from a multilingual pool

-

Vector Length: 512 (default)

-

Description: -We present LAReQA, a challenging new benchmark for language-agnostic answer retrieval from a multilingual candidate pool. Unlike previous cross-lingual tasks, LAReQA tests for “strong” cross-lingual alignment, requiring semantically related cross-language pairs to be closer in representation space than unrelated same-language pairs. Building on multilingual BERT (mBERT), we study different strategies for achieving strong alignment. We find that augmenting training data via machine translation is effective, and improves significantly over using mBERT out-of-the-box. Interestingly, the embedding baseline that performs the best on LAReQA falls short of competing baselines on zero-shot variants of our task that only target “weak” alignment. This finding underscores our claim that languageagnostic retrieval is a substantively new kind of cross-lingual evaluation.

-

Paper: https://arxiv.org/abs/2004.05484

-

Repository: https://tfhub.dev/google/LAReQA/mBERT_En_En/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-04-11

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-tfhub]

-

Example:

-

-
-
-

` -#pip install vectorhub[encoders-text-tfhub] -from vectorhub.bi_encoders.qa.tfhub import LAReQA2Vec -model = LAReQA2Vec() -model.encode_question('How is the weather today?') -model.encode_answer('The weather is great today.') -`

-
-
-class vectorhub.bi_encoders.qa.tfhub.lareqa_qa.LAReQA2Vec(model_url='https://tfhub.dev/google/LAReQA/mBERT_En_En/1', vector_length=512)
-

Bases: vectorhub.bi_encoders.qa.base.BaseQA2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(strings: List[str], context_strings: List[str] = None, string_type: str = 'answer')
-

Bulk encode question/answer using LAReQA model. -:param String: List of strings. -:param Context_string: List of context of the strings. -:param string_type: question/answer.

-

Example: ->>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * ->>> model = LAReQA2Vec() ->>> model.bulk_encode(“Why?”, string_type=’answer’)

-
- -
-
-bulk_encode_answers(answers: List[str], contexts: List[str] = None)
-
- -
-
-bulk_encode_question(questions: list)
-

Encode questions using LAReQA model. -Example:

-
>>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import *
->>> model = LAReQA2Vec()
->>> model.encode_question(["Why?", "Who?"])
-
-
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(string: str, context_string: str = None, string_type: str = 'answer')
-

Encode question/answer using LAReQA model. -:param String: Any string -:param Context_string: The context of the string. -:param string_type: question/answer.

-

Example: ->>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * ->>> model = LAReQA2Vec() ->>> model.encode_answer(“Why?”)

-
- -
-
-encode_answer(answer: str, context: str = None)
-

Encode answer using LAReQA model. -Example:

-
>>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import *
->>> model = LAReQA2Vec()
->>> model.encode_answer("Why?")
-
-
-
- -
-
-encode_question(question: str)
-

Encode the question using LAReQA model. -Example:

-
>>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import *
->>> model = LAReQA2Vec()
->>> model.encode_question("Why?")
-
-
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/google/LAReQA/mBERT_En_En/1': {}, 'https://tfhub.dev/google/LAReQA/mBERT_X_X/1': {}, 'https://tfhub.dev/google/LAReQA/mBERT_X_X_mono/1': {}, 'https://tfhub.dev/google/LAReQA/mBERT_X_Y/1': {}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

vectorhub.bi_encoders.qa.tfhub.use_multi_qa module

-
-
-class vectorhub.bi_encoders.qa.tfhub.use_multi_qa.USEMultiQA2Vec(model_url='https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3')
-

Bases: vectorhub.bi_encoders.qa.tfhub.use_qa.USEQA2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(strings: List[str], context_strings: List[str] = None, string_type: str = 'answer')
-

Bulk encode question/answer using LAReQA model. -:param String: List of strings. -:param Context_string: List of context of the strings. -:param string_type: question/answer.

-

Example: ->>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * ->>> model = USEQA2Vec() ->>> model.bulk_encode(“Why?”, string_type=’answer’)

-
- -
-
-bulk_encode_answers(answers: List[str], contexts: List[str] = None)
-
- -
-
-bulk_encode_questions(questions: List[str])
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(string: str, context_string: str = None, string_type: str = 'answer')
-

Encode question/answer using LAReQA model. -:param String: Any string -:param Context_string: The context of the string. -:param string_type: question/answer.

-

Example: ->>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * ->>> model = USEQA2Vec() ->>> model.encode_answer(“Why?”)

-
- -
-
-encode_answer(answer: str, context: str = None)
-
- -
-
-encode_question(question: str)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3': {'vector_length': 512}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

vectorhub.bi_encoders.qa.tfhub.use_qa module

-

Model Name: Universal Sentence Encoder Question Answering

-

Vector Length: 512 (default)

-

Description: -- Developed by researchers at Google, 2019, v2 [1]. -- It is trained on a variety of data sources and tasks, with the goal of learning text representations that -are useful out-of-the-box to retrieve an answer given a question, as well as question and answers across different languages. -- It can also be used in other applications, including any type of text classification, clustering, etc. -- Multi-task training setup is based on the paper [Learning Cross-lingual Sentence Representations via a Multi-task Dual Encoder](https://arxiv.org/pdf/1810.12836.pdf) -- Achieved 56.1 on dev set in Squad Retrieval and 46.2 on train.

-

Paper:

-

Repository: https://tfhub.dev/google/universal-sentence-encoder-qa/3

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-03-11

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-tfhub-tftext]

-

Example:

-

-
-
-

` -#pip install vectorhub[encoders-text-tfhub] -from vectorhub.bi_encoders.qa.tfhub import USEQA2Vec -model = USEQA2Vec() -model.encode_question('How is the weather today?') -model.encode_answer('The weather is great today.') -`

-
-
-class vectorhub.bi_encoders.qa.tfhub.use_qa.USEQA2Vec(model_url='https://tfhub.dev/google/universal-sentence-encoder-qa/3')
-

Bases: vectorhub.bi_encoders.qa.base.BaseQA2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(strings: List[str], context_strings: List[str] = None, string_type: str = 'answer')
-

Bulk encode question/answer using LAReQA model. -:param String: List of strings. -:param Context_string: List of context of the strings. -:param string_type: question/answer.

-

Example: ->>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * ->>> model = USEQA2Vec() ->>> model.bulk_encode(“Why?”, string_type=’answer’)

-
- -
-
-bulk_encode_answers(answers: List[str], contexts: List[str] = None)
-
- -
-
-bulk_encode_questions(questions: List[str])
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(string: str, context_string: str = None, string_type: str = 'answer')
-

Encode question/answer using LAReQA model. -:param String: Any string -:param Context_string: The context of the string. -:param string_type: question/answer.

-

Example: ->>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * ->>> model = USEQA2Vec() ->>> model.encode_answer(“Why?”)

-
- -
-
-encode_answer(answer: str, context: str = None)
-
- -
-
-encode_question(question: str)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/google/universal-sentence-encoder-qa/3': {'vector_length': 512}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

Module contents

-
-
- - -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.bi_encoders.qa.torch_transformers.html b/docs/vectorhub.bi_encoders.qa.torch_transformers.html deleted file mode 100644 index 1634171b..00000000 --- a/docs/vectorhub.bi_encoders.qa.torch_transformers.html +++ /dev/null @@ -1,441 +0,0 @@ - - - - - - - - - - vectorhub.bi_encoders.qa.torch_transformers package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • vectorhub.bi_encoders.qa.torch_transformers package
  • - - -
  • - - - View page source - - -
  • - -
- - -
-
-
-
- -
-

vectorhub.bi_encoders.qa.torch_transformers package

-
-

Submodules

-
-
-

vectorhub.bi_encoders.qa.torch_transformers.dpr module

-

Model Name: Dense Passage Retrieval

-

Vector Length: 768 (default)

-

Description: -Open-domain question answering relies on efficient passage retrieval to select candidate contexts, where traditional sparse vector space models, such as TF-IDF or BM25, are the de facto method. In this work, we show that retrieval can be practically implemented using dense representations alone, where embeddings are learned from a small number of questions and passages by a simple dual-encoder framework. When evaluated on a wide range of open-domain QA datasets, our dense retriever outperforms a strong Lucene-BM25 system largely by 9%-19% absolute in terms of top-20 passage retrieval accuracy, and helps our end-to-end QA system establish new state-of-the-art on multiple open-domain QA benchmarks.

-

Paper: https://arxiv.org/abs/2004.04906

-

Repository:

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-10-04

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-torch-transformers]

-

Example:

-

-
-
-

` -#pip install vectorhub[encoders-text-torch-transformers] -from vectorhub.bi_encoders.qa.torch_transformers import DPR2Vec -model = DPR2Vec() -model.encode_question('How is the weather today?') -model.encode_answer('The weather is great today.') -`

-
-
-class vectorhub.bi_encoders.qa.torch_transformers.dpr.DPR2Vec
-

Bases: vectorhub.bi_encoders.qa.base.BaseQA2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(strings: List[str], string_type: str = 'answer')
-

Bulk encode question/answer using LAReQA model. -:param String: List of strings. -:param Context_string: List of context of the strings. -:param string_type: question/answer.

-

Example: ->>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * ->>> model = LAReQA2Vec() ->>> model.bulk_encode(“Why?”, string_type=’answer’)

-
- -
-
-bulk_encode_answers(answers: str)
-

Bulk encode the answers with DPR.

-
- -
-
-bulk_encode_questions(questions: str)
-

Bulk encode the question

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(string: str, string_type: str = 'answer')
-

Encode question/answer using LAReQA model. -:param String: Any string -:param Context_string: The context of the string. -:param string_type: question/answer.

-

Example: ->>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * ->>> model = LAReQA2Vec() ->>> model.encode_answer(“Why?”)

-
- -
-
-encode_answer(answer: str)
-

Encode an answer with DPR.

-
- -
-
-encode_question(question)
-

Encode a question with DPR.

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

Module contents

-
-
- - -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.bi_encoders.text_image.html b/docs/vectorhub.bi_encoders.text_image.html deleted file mode 100644 index 6090dd92..00000000 --- a/docs/vectorhub.bi_encoders.text_image.html +++ /dev/null @@ -1,261 +0,0 @@ - - - - - - - - - - vectorhub.bi_encoders.text_image package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • vectorhub.bi_encoders.text_image package
  • - - -
  • - - - View page source - - -
  • - -
- - -
-
-
-
- -
-

vectorhub.bi_encoders.text_image package

- -
-

Module contents

-
-
- - -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.bi_encoders.text_image.torch.html b/docs/vectorhub.bi_encoders.text_image.torch.html deleted file mode 100644 index 0313229a..00000000 --- a/docs/vectorhub.bi_encoders.text_image.torch.html +++ /dev/null @@ -1,254 +0,0 @@ - - - - - - - - - - vectorhub.bi_encoders.text_image.torch package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • vectorhub.bi_encoders.text_image.torch package
  • - - -
  • - - - View page source - - -
  • - -
- - -
-
-
-
- -
-

vectorhub.bi_encoders.text_image.torch package

-
-

Submodules

-
-
-

vectorhub.bi_encoders.text_image.torch.clip2vec module

-
-
-

Module contents

-
-
- - -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.bi_encoders.text_text.html b/docs/vectorhub.bi_encoders.text_text.html deleted file mode 100644 index cfa9678c..00000000 --- a/docs/vectorhub.bi_encoders.text_text.html +++ /dev/null @@ -1,312 +0,0 @@ - - - - - - - - - - vectorhub.bi_encoders.text_text package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
- - -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.bi_encoders.text_text.sentence_transformers.html b/docs/vectorhub.bi_encoders.text_text.sentence_transformers.html deleted file mode 100644 index 046f6582..00000000 --- a/docs/vectorhub.bi_encoders.text_text.sentence_transformers.html +++ /dev/null @@ -1,287 +0,0 @@ - - - - - - - - - - vectorhub.bi_encoders.text_text.sentence_transformers package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.bi_encoders.text_text.sentence_transformers package

-
-

Submodules

-
-
-

vectorhub.bi_encoders.text_text.sentence_transformers.distilroberta_qa module

-
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.bi_encoders.text_text.tfhub.html b/docs/vectorhub.bi_encoders.text_text.tfhub.html deleted file mode 100644 index bc7dfb69..00000000 --- a/docs/vectorhub.bi_encoders.text_text.tfhub.html +++ /dev/null @@ -1,293 +0,0 @@ - - - - - - - - - - vectorhub.bi_encoders.text_text.tfhub package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.bi_encoders.text_text.tfhub package

-
-

Submodules

-
-
-

vectorhub.bi_encoders.text_text.tfhub.lareqa_qa module

-
-
-

vectorhub.bi_encoders.text_text.tfhub.use_multi_qa module

-
-
-

vectorhub.bi_encoders.text_text.tfhub.use_qa module

-
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.bi_encoders.text_text.torch_transformers.html b/docs/vectorhub.bi_encoders.text_text.torch_transformers.html deleted file mode 100644 index 199892b8..00000000 --- a/docs/vectorhub.bi_encoders.text_text.torch_transformers.html +++ /dev/null @@ -1,287 +0,0 @@ - - - - - - - - - - vectorhub.bi_encoders.text_text.torch_transformers package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.bi_encoders.text_text.torch_transformers package

-
-

Submodules

-
-
-

vectorhub.bi_encoders.text_text.torch_transformers.dpr module

-
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.audio.html b/docs/vectorhub.encoders.audio.html deleted file mode 100644 index 867df27f..00000000 --- a/docs/vectorhub.encoders.audio.html +++ /dev/null @@ -1,418 +0,0 @@ - - - - - - - - - - vectorhub.encoders.audio package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.audio package

- -
-

Submodules

-
-
-

vectorhub.encoders.audio.base module

-
-
-class vectorhub.encoders.audio.base.BaseAudio2Vec
-

Bases: vectorhub.base.Base2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(audios, vector_operation='mean')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(audio: str, new_sampling_rate: int = 16000)
-

An method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.audio.pytorch.html b/docs/vectorhub.encoders.audio.pytorch.html deleted file mode 100644 index 50fa2a17..00000000 --- a/docs/vectorhub.encoders.audio.pytorch.html +++ /dev/null @@ -1,417 +0,0 @@ - - - - - - - - - - vectorhub.encoders.audio.pytorch package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.audio.pytorch package

-
-

Submodules

-
-
-

vectorhub.encoders.audio.pytorch.wav2vec module

-
-
-class vectorhub.encoders.audio.pytorch.wav2vec.Wav2Vec(model_url: str = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small.pt')
-

Bases: vectorhub.encoders.audio.base.BaseAudio2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(audios, vector_operation='mean')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(audio, vector_operation='mean')
-

Example

-
>>> from vectorhub.encoders.audio import Wav2Vec
->>> encoder = Wav2Vec()
->>> encoder.encode("...")
-
-
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(audio: str, new_sampling_rate: int = 16000)
-

An method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-urls = {'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec2_vox_960h.pt': {}, 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small.pt': {}, 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_100h.pt': {}, 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_10m.pt': {}, 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_960h.pt': {}, 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox.pt': {}, 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_100h.pt': {}, 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_10m.pt': {}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.audio.tfhub.html b/docs/vectorhub.encoders.audio.tfhub.html deleted file mode 100644 index dd8c4512..00000000 --- a/docs/vectorhub.encoders.audio.tfhub.html +++ /dev/null @@ -1,1037 +0,0 @@ - - - - - - - - - - vectorhub.encoders.audio.tfhub package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.audio.tfhub package

-
-

Submodules

-
-
-

vectorhub.encoders.audio.tfhub.speech_embedding module

-

Model Name: Speech Embedding

-

Vector Length: 96 (default)

-

Description: -With the rise of low power speech-enabled devices, there is a growing demand to quickly produce models for recognizing arbitrary sets of keywords. As with many machine learning tasks, one of the most challenging parts in the model creation process is obtaining a sufficient amount of training data. In this paper, we explore the effectiveness of synthesized speech data in training small spoken term detection models of around 400k parameters. Instead of training such models directly on the audio or low level features such as MFCCs, we use a pre-trained speech embedding model trained to extract useful features for keyword spotting models. Using this speech embedding, we show that a model which detects 10 keywords when trained on only synthetic speech is equivalent to a model trained on over 500 real examples. We also show that a model without our speech embeddings would need to be trained on over 4000 real examples to reach the same accuracy.

-

Paper: https://arxiv.org/abs/2002.01322

-

Repository: https://tfhub.dev/google/speech_embedding/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-01-31

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-audio-tfhub]

-

Example:

-

-
-
-

` -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import SpeechEmbedding2Vec -model = SpeechEmbedding2Vec() -vector = model.encode('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -`

-
-
-class vectorhub.encoders.audio.tfhub.speech_embedding.SpeechEmbedding2Vec(model_url: str = 'https://tfhub.dev/google/speech_embedding/1', signature: str = 'default')
-

Bases: vectorhub.encoders.audio.base.BaseAudio2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(audios, vector_operation='mean')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(audio, vector_operation='mean')
-

Encode the vector. -Example:

-
>>> from vectorhub.encoders.audio import SpeechEmbedding2Vec
->>> encoder = SpeechEmbedding2Vec()
->>> encoder.encode(...)
-
-
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(audio: str, new_sampling_rate: int = 16000)
-

An method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-urls = {'https://tfhub.dev/google/speech_embedding/1': {'vector_length': 96}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.audio.tfhub.trill module

-

Model Name: Trill - Triplet Loss Network

-

Vector Length: 512 (default)

-

Description: -The ultimate goal of transfer learning is to reduce labeled data requirements by exploiting a pre-existing embedding model trained for different datasets or tasks. The visual and language communities have established benchmarks to compare embeddings, but the speech community has yet to do so. This paper proposes a benchmark for comparing speech representations on non-semantic tasks, and proposes a representation based on an unsupervised triplet-loss objective. The proposed representation outperforms other representations on the benchmark, and even exceeds state-of-the-art performance on a number of transfer learning tasks. The embedding is trained on a publicly available dataset, and it is tested on a variety of low-resource downstream tasks, including personalization tasks and medical domain. The benchmark, models, and evaluation code are publicly released.

-

Paper: https://arxiv.org/abs/2002.12764

-

Repository: https://tfhub.dev/google/nonsemantic-speech-benchmark/trill/3

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-02-25

-

Limitations: Not stated.

-

Installation: pip install vectorhub['encoders-audio-tfhub']

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import Trill2Vec -model = Trill2Vec() -sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -model.encode(sample) -`

-
-
-class vectorhub.encoders.audio.tfhub.trill.Trill2Vec(model_url: str = 'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill/3', layer: str = 'embedding')
-

Bases: vectorhub.encoders.audio.base.BaseAudio2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(audios, vector_operation='mean')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(audio, vector_operation='mean')
-

Example

-
>>> from encoders.audio.trill import Trill2Vec
->>> encoder = Trill2Vec()
->>> encoder.encode(...)
-
-
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(audio: str, new_sampling_rate: int = 16000)
-

An method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-urls = {'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill/3': {'vector_length': 512}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.audio.tfhub.trill_distilled module

-

Model Name: Trill Distilled - Triplet Loss Network

-

Vector Length: 2048 (default)

-

Description: -The ultimate goal of transfer learning is to reduce labeled data requirements by exploiting a pre-existing embedding model trained for different datasets or tasks. The visual and language communities have established benchmarks to compare embeddings, but the speech community has yet to do so. This paper proposes a benchmark for comparing speech representations on non-semantic tasks, and proposes a representation based on an unsupervised triplet-loss objective. The proposed representation outperforms other representations on the benchmark, and even exceeds state-of-the-art performance on a number of transfer learning tasks. The embedding is trained on a publicly available dataset, and it is tested on a variety of low-resource downstream tasks, including personalization tasks and medical domain. The benchmark, models, and evaluation code are publicly released.

-

Paper: https://arxiv.org/abs/2002.12764

-

Repository: https://tfhub.dev/google/nonsemantic-speech-benchmark/trill-distilled/3

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-02-25

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-audio-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import TrillDistilled2Vec -model = TrillDistilled2Vec() -sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -model.encode(sample) -`

-
-
-class vectorhub.encoders.audio.tfhub.trill_distilled.TrillDistilled2Vec(model_url: str = 'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill-distilled/3', layer: str = 'embedding')
-

Bases: vectorhub.encoders.audio.base.BaseAudio2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(audios, vector_operation='mean')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(audio, vector_operation='mean', sample_rate=16000)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(audio: str, new_sampling_rate: int = 16000)
-

An method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-urls = {'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill-distilled/3': {'vector_length': 2048}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.audio.tfhub.vggish module

-

Model Name: VGGish

-

Vector Length: 128 (default)

-

Description: -An audio event embedding model trained on the YouTube-8M dataset. -VGGish should be used: -- as a high-level feature extractor: the 128-D embedding output of VGGish can be used as the input features of another shallow model which can then be trained on a small amount of data for a particular task. This allows quickly creating specialized audio classifiers without requiring a lot of labeled data and without having to train a large model end-to-end. -- as a warm start: the VGGish model parameters can be used to initialize part of a larger model which allows faster fine-tuning and model exploration.

-

Paper:

-

Repository: https://tfhub.dev/google/vggish/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-03-11

-

Limitations: -VGGish has been trained on millions of YouTube videos and although these are very diverse, there can still be a domain -mismatch between the average YouTube video and the audio inputs expected for any given task. You should expect to do some -amount of fine-tuning and calibration to make VGGish usable in any system that you build.

-

Installation: pip install vectorhub[encoders-audio-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import Vggish2Vec -model = Vggish2Vec() -sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -model.encode(sample) -`

-
-
-class vectorhub.encoders.audio.tfhub.vggish.Vggish2Vec(model_url: str = 'https://tfhub.dev/google/vggish/1')
-

Bases: vectorhub.encoders.audio.base.BaseAudio2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(audios, vector_operation='mean')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(audio, vector_operation='mean')
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(audio: str, new_sampling_rate: int = 16000)
-

An method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-urls = {'https://tfhub.dev/google/vggish/1': {'vector_length': 128}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.audio.tfhub.yamnet module

-

Model Name: Yamnet

-

Vector Length: 1024 (default)

-

Description: -YAMNet is an audio event classifier that takes audio waveform as input and makes independent predictions for each -of 521 audio events from the AudioSet ontology. The model uses the MobileNet v1 architecture and was trained using -the AudioSet corpus. This model was originally released in the TensorFlow Model Garden, where we have the model -source code, the original model checkpoint, and more detailed documentation. -This model can be used:

-
    -
  • as a stand-alone audio event classifier that provides a reasonable baseline across a wide variety of audio events.

  • -
  • as a high-level feature extractor: the 1024-D embedding output of YAMNet can be used as the input features of another shallow model which can then be trained on a small amount of data for a particular task. This allows quickly creating specialized audio classifiers without requiring a lot of labeled data and without having to train a large model end-to-end.

  • -
  • as a warm start: the YAMNet model parameters can be used to initialize part of a larger model which allows faster fine-tuning and model exploration.

  • -
-

Paper:

-

Repository: https://tfhub.dev/google/yamnet/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-03-11

-

Limitations: -YAMNet’s classifier outputs have not been calibrated across classes, so you cannot directly treat -the outputs as probabilities. For any given task, you will very likely need to perform a calibration with task-specific data -which lets you assign proper per-class score thresholds and scaling. -YAMNet has been trained on millions of YouTube videos and although these are very diverse, there can still be a domain mismatch -between the average YouTube video and the audio inputs expected for any given task. You should expect to do some amount of -fine-tuning and calibration to make YAMNet usable in any system that you build.

-

Installation: pip install vectorhub[encoders-audio-tfhub]

-

Example:

-

-
-
-

` -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import Yamnet2Vec -model = Yamnet2Vec() -sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -model.encode(sample) -`

-
-
-class vectorhub.encoders.audio.tfhub.yamnet.Yamnet2Vec(model_url: str = 'https://tfhub.dev/google/yamnet/1')
-

Bases: vectorhub.encoders.audio.base.BaseAudio2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(audios, vector_operation='mean', layer='embeddings')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(audio, vector_operation='mean', layer='embeddings')
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(audio: str, new_sampling_rate: int = 16000)
-

An method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-urls = {'https://tfhub.dev/google/yamnet/1': {'vector_length': 1024}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.audio.vectorai.html b/docs/vectorhub.encoders.audio.vectorai.html deleted file mode 100644 index 3a490e21..00000000 --- a/docs/vectorhub.encoders.audio.vectorai.html +++ /dev/null @@ -1,320 +0,0 @@ - - - - - - - - - - vectorhub.encoders.audio.vectorai package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.audio.vectorai package

-
-

Submodules

-
-
-

vectorhub.encoders.audio.vectorai.vi_encoder module

-

Vector AI’s deployed model. The purpose of this model is to -allow developers to easily build encodings and see for themselves -how the embedding works. These models are selected to work out-of-the-box -after testing for their success on our end.

-

To get access to Vector AI, we need to use

-

Example

-
>>> from vectorhub.text.encoder.vectorai import ViText2Vec
->>> model = ViText2Vec(username, api_key)
->>> model.encode("audio_file.wav")
-
-
-
-
-class vectorhub.encoders.audio.vectorai.vi_encoder.ViAudio2Vec(username, api_key, url: str = 'https://api.vctr.ai', collection_name='base')
-

Bases: object

-

Request for a username and API key from gh.vctr.ai -:param Username and api_key: You can request a username and api key from vector AI Github package -:param using request_api_key method.: -:param url: Url for Vector AI website. -:param collection_name: Not necessary for users.

-
-
-encode(audio)
-
- -
-
-property vector_length
-
- -
- -
-
-

Module contents

-

The Vector AI deployed model

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.code.html b/docs/vectorhub.encoders.code.html deleted file mode 100644 index 21c2b7d8..00000000 --- a/docs/vectorhub.encoders.code.html +++ /dev/null @@ -1,261 +0,0 @@ - - - - - - - - - - vectorhub.encoders.code package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.code package

- -
-

Module contents

-
-
- - -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.code.transformers.html b/docs/vectorhub.encoders.code.transformers.html deleted file mode 100644 index f880eaaa..00000000 --- a/docs/vectorhub.encoders.code.transformers.html +++ /dev/null @@ -1,418 +0,0 @@ - - - - - - - - - - vectorhub.encoders.code.transformers package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • vectorhub.encoders.code.transformers package
  • - - -
  • - - - View page source - - -
  • - -
- - -
-
-
-
- -
-

vectorhub.encoders.code.transformers package

-
-

Submodules

-
-
-

vectorhub.encoders.code.transformers.codebert module

-

Model Name: CodeBert

-

Vector Length: 768 (default)

-

Description: -We present CodeBERT, a bimodal pre-trained model for programming language (PL) and nat-ural language (NL). CodeBERT learns general-purpose representations that support downstream NL-PL applications such as natural language codesearch, code documentation generation, etc. We develop CodeBERT with Transformer-based neural architecture, and train it with a hybrid objective function that incorporates the pre-training task of replaced token detection, which is to detect plausible alternatives sampled from generators. This enables us to utilize both bimodal data of NL-PL pairs and unimodal data, where the former provides input tokens for model training while the latter helps to learn better generators. We evaluate CodeBERT on two NL-PL applications by fine-tuning model parameters. Results show that CodeBERT achieves state-of-the-art performance on both natural language code search and code documentation generation tasks. Furthermore, to investigate what type of knowledge is learned in CodeBERT, we construct a dataset for NL-PL probing, and evaluate in a zero-shot setting where parameters of pre-trained models are fixed. Results show that CodeBERT performs better than previous pre-trained models on NL-PL probing.

-

Paper: https://arxiv.org/abs/2002.08155

-

Repository: https://github.com/microsoft/CodeBERT

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-02-19

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-code-transformers]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-code-transformers] -from vectorhub.encoders.code.transformers import Code2Vec -model = Code2Vec() -sample = model.encode('import pandas as pd') -`

-
-
-class vectorhub.encoders.code.transformers.codebert.Code2Vec(model_name='microsoft/codebert-base')
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(descriptions: List[str], codes: List[str] = None, pooling_method: str = 'mean', truncation=True)
-

Pooling method is either pooler_output or mean. -Notes: if it is mean, we can take the last hidden state and add it to the -model. -:param Pooling_method: Pooling method can be either mean or pooled output. -:param Truncation: Whether the the sentence should be truncated.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(description: str, code: str = None, pooling_method='mean', truncation=True)
-

Pooling method is either pooler_output or mean. -Notes: if it is mean, we can take the last hidden state and add it to the -model. -:param Description: The description of what the code is doing -:param Code: What the code is doing. -:param Pooling_method: Pooling method can be either mean or pooled output. -:param Truncation: Whether the the sentence should be truncated.

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'microsoft/codebert-base': {'vector_length': 768}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

Module contents

-

Module for all transformers-based Code2Vec models.

-
-
- - -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.face.html b/docs/vectorhub.encoders.face.html deleted file mode 100644 index 4a8ad75c..00000000 --- a/docs/vectorhub.encoders.face.html +++ /dev/null @@ -1,261 +0,0 @@ - - - - - - - - - - vectorhub.encoders.face package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.face package

- -
-

Module contents

-
-
- - -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.face.tf.html b/docs/vectorhub.encoders.face.tf.html deleted file mode 100644 index 64fa60a5..00000000 --- a/docs/vectorhub.encoders.face.tf.html +++ /dev/null @@ -1,456 +0,0 @@ - - - - - - - - - - vectorhub.encoders.face.tf package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • vectorhub.encoders.face.tf package
  • - - -
  • - - - View page source - - -
  • - -
- - -
-
-
-
- -
-

vectorhub.encoders.face.tf package

-
-

Submodules

-
-
-

vectorhub.encoders.face.tf.face2vec module

-

Model Name: Face2Vec (FaceNet)

-

Vector Length: 512 (default)

-

Description: -Despite significant recent advances in the field of face recognition, implementing face verification and recognition efficiently at scale presents serious challenges to current approaches. In this paper we present a system, called FaceNet, that directly learns a mapping from face images to a compact Euclidean space where distances directly correspond to a measure of face similarity. Once this space has been produced, tasks such as face recognition, verification and clustering can be easily implemented using standard techniques with FaceNet embeddings as feature vectors. -Our method uses a deep convolutional network trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. The benefit of our approach is much greater representational efficiency: we achieve state-of-the-art face recognition performance using only 128-bytes per face. -On the widely used Labeled Faces in the Wild (LFW) dataset, our system achieves a new record accuracy of 99.63%. On YouTube Faces DB it achieves 95.12%. Our system cuts the error rate in comparison to the best published result by 30% on both datasets. -We also introduce the concept of harmonic embeddings, and a harmonic triplet loss, which describe different versions of face embeddings (produced by different networks) that are compatible to each other and allow for direct comparison between each other.

-

Paper: https://arxiv.org/pdf/1503.03832.pdf

-

Repository: N/A

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2015-03-12

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tf-face-detection]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-image-tf-face-detection] -from vectorhub.encoders.face.tf import Face2Vec -model = Face2Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.face.tf.face2vec.Face2Vec(model_url: str = 'https://drive.google.com/u/0/uc?id=1PZ_6Zsy1Vb0s0JmjEmVd8FS99zoMCiN1&export=download', redownload=True)
-

Bases: vectorhub.encoders.image.base.BaseImage2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images)
-

Bulk encode. Chunk size should be specified outside of the images.

-
- -
-
-property cache_dir
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-extract_face(image_input, reshape_size=None)
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-static is_url_working(url)
-
- -
-
-property model_path
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_face_landmarks(image_filename: str)
-

Show face landmarks

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-standardise_image(face_pixels)
-

Standardise the image for face pixels.

-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-property urls
-

A simple dictionary with urls and their vector lengths

-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

Module contents

-
-
- - -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.html b/docs/vectorhub.encoders.html deleted file mode 100644 index 4cd47096..00000000 --- a/docs/vectorhub.encoders.html +++ /dev/null @@ -1,405 +0,0 @@ - - - - - - - - - - vectorhub.encoders package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders package

-
-

Subpackages

-
- -
-
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.image.fastai.html b/docs/vectorhub.encoders.image.fastai.html deleted file mode 100644 index f3636a46..00000000 --- a/docs/vectorhub.encoders.image.fastai.html +++ /dev/null @@ -1,620 +0,0 @@ - - - - - - - - - - vectorhub.encoders.image.fastai package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.image.fastai package

-
-

Submodules

-
-
-

vectorhub.encoders.image.fastai.base module

-

The base class for FastAI as much of it can be replaced easily by changing the model.

-
-
-class vectorhub.encoders.image.fastai.base.FastAIBase(databunch=None, architecture=None)
-

Bases: vectorhub.encoders.image.base.BaseImage2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-abstract property extraction_layer
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.image.fastai.resnet module

-

Model Name: ResNet

-

Vector Length: 1024 (default)

-

Description: -Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers—8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers.

-

Paper: https://arxiv.org/abs/1512.03385

-

Repository:

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2015-12-10

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-fastai]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-image-fastai] -from vectorhub.encoders.image.fastai import FastAIResnet2Vec -model = FastAIResnet2Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.fastai.resnet.FastAIResnet2Vec(architecture='resnet34', databunch=None)
-

Bases: vectorhub.encoders.image.fastai.base.FastAIBase

-

Refer to possible_architectures method for reference to which architectures can be instantiated. -:param Architecture: The name of the architecture -:param Databunch: A FastAI Data collection data type that is used to instantiate a learner object.

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-property architecture_mappings
-

Architecture mappings

-
- -
-
-bulk_encode(images)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-property extraction_layer
-

Here we selected the default to be layer_num 1 to extract the layer with the highest number of dimensions -after it has been flattened.

-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-static is_url_working(url)
-
- -
-
-property possible_architectures
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.image.html b/docs/vectorhub.encoders.image.html deleted file mode 100644 index 39c8a938..00000000 --- a/docs/vectorhub.encoders.image.html +++ /dev/null @@ -1,448 +0,0 @@ - - - - - - - - - - vectorhub.encoders.image package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.image package

- -
-

Submodules

-
-
-

vectorhub.encoders.image.base module

-
-
-class vectorhub.encoders.image.base.BaseImage2Vec
-

Bases: vectorhub.base.Base2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.image.tensorflow.html b/docs/vectorhub.encoders.image.tensorflow.html deleted file mode 100644 index ba89d6e4..00000000 --- a/docs/vectorhub.encoders.image.tensorflow.html +++ /dev/null @@ -1,254 +0,0 @@ - - - - - - - - - - vectorhub.encoders.image.tensorflow package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • vectorhub.encoders.image.tensorflow package
  • - - -
  • - - - View page source - - -
  • - -
- - -
-
-
-
- -
-

vectorhub.encoders.image.tensorflow package

-
-

Submodules

-
-
-

vectorhub.encoders.image.tensorflow.face2vec module

-
-
-

Module contents

-
-
- - -
- -
-
- -
- -
-

- © Copyright 2020, Vector AI. - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
-
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.image.tfhub.html b/docs/vectorhub.encoders.image.tfhub.html deleted file mode 100644 index e9177dcc..00000000 --- a/docs/vectorhub.encoders.image.tfhub.html +++ /dev/null @@ -1,2110 +0,0 @@ - - - - - - - - - - vectorhub.encoders.image.tfhub package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.image.tfhub package

-
-

Submodules

-
-
-

vectorhub.encoders.image.tfhub.bit module

-

Model Name: BiT - Big Transfer, General Visual Representation Learning (Small)

-

Vector Length: 2048 (default)

-

Description: -Transfer of pre-trained representations improves sample efficiency and simplifies hyperparameter tuning when training -deep neural networks for vision. We revisit the paradigm of pre-training on large supervised datasets and fine-tuning the model -on a target task. We scale up pre-training, and propose a simple recipe that we call Big Transfer (BiT). By combining a few carefully -selected components, and transferring using a simple heuristic, we achieve strong performance on over 20 datasets. BiT performs well across -a surprisingly wide range of data regimes – from 1 example per class to 1M total examples. BiT achieves 87.5% top-1 accuracy on ILSVRC-2012, 99.4% on CIFAR-10, and 76.3% on the 19 task Visual Task Adaptation Benchmark (VTAB). On small datasets, BiT attains 76.8% on -ILSVRC-2012 with 10 examples per class, and 97.0% on CIFAR-10 with 10 examples per class. We conduct detailed analysis -of the main components that lead to high transfer performance.

-

Paper: https://arxiv.org/abs/1912.11370

-

Repository: https://github.com/google-research/big_transfer

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2019-12-24

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import BitSmall2Vec -model = BitSmall2Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.bit.BitSmall2Vec(model_url: str = 'https://tfhub.dev/google/bit/s-r50x1/1')
-

Bases: vectorhub.encoders.image.base.BaseImage2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images)
-

Bulk encode. Chunk size should be specified outside of the images.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-urls = {'https://tfhub.dev/google/bit/s-r101x1/1': {'vector_length': 2048}, 'https://tfhub.dev/google/bit/s-r101x3/1': {'vector_length': 6144}, 'https://tfhub.dev/google/bit/s-r152x4/1': {'vector_length': 8192}, 'https://tfhub.dev/google/bit/s-r50x1/1': {'vector_length': 2048}, 'https://tfhub.dev/google/bit/s-r50x3/1': {'vector_length': 6144}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.image.tfhub.bit_medium module

-

Model Name: BiT Medium - Big Transfer, General Visual Representation Learning (Medium)

-

Vector Length: 2048 (default)

-

Description: -Transfer of pre-trained representations improves sample efficiency and simplifies hyperparameter tuning when training -deep neural networks for vision. We revisit the paradigm of pre-training on large supervised datasets and fine-tuning the model -on a target task. We scale up pre-training, and propose a simple recipe that we call Big Transfer (BiT). By combining a few carefully -selected components, and transferring using a simple heuristic, we achieve strong performance on over 20 datasets. BiT performs well across -a surprisingly wide range of data regimes – from 1 example per class to 1M total examples. BiT achieves 87.5% top-1 accuracy on ILSVRC-2012, -99.4% on CIFAR-10, and 76.3% on the 19 task Visual Task Adaptation Benchmark (VTAB). On small datasets, BiT attains 76.8% on -ILSVRC-2012 with 10 examples per class, and 97.0% on CIFAR-10 with 10 examples per class. We conduct detailed analysis -of the main components that lead to high transfer performance.

-

Paper: https://arxiv.org/abs/1912.11370

-

Repository: https://github.com/google-research/big_transfer

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2019-12-24

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import BitMedium2Vec -model = BitMedium2Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.bit_medium.BitMedium2Vec(model_url: str = 'https://tfhub.dev/google/bit/m-r50x1/1')
-

Bases: vectorhub.encoders.image.tfhub.bit.BitSmall2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images)
-

Bulk encode. Chunk size should be specified outside of the images.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-urls = {'https://tfhub.dev/google/bit/m-r101x1/1': {'vector_length': 2048}, 'https://tfhub.dev/google/bit/m-r101x3/1': {'vector_length': 6144}, 'https://tfhub.dev/google/bit/m-r152x4/1': {'vector_length': 8192}, 'https://tfhub.dev/google/bit/m-r50x1/1': {'vector_length': 2048}, 'https://tfhub.dev/google/bit/m-r50x3/1': {'vector_length': 6144}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.image.tfhub.inception_resnet module

-

Model Name: Inception Resnet

-

Vector Length: 1536 (default)

-

Description: -Very deep convolutional networks have been central to the largest advances in image recognition performance in -recent years. One example is the Inception architecture that has been shown to achieve very good performance at -relatively low computational cost. Recently, the introduction of residual connections in conjunction with a more traditional -architecture has yielded state-of-the-art performance in the 2015 ILSVRC challenge; its performance was similar to the latest -generation Inception-v3 network. This raises the question of whether there are any benefit in combining the Inception architecture -with residual connections. Here we give clear empirical evidence that training with residual connections accelerates the training -of Inception networks significantly. There is also some evidence of residual Inception networks outperforming similarly expensive -Inception networks without residual connections by a thin margin. We also present several new streamlined architectures for both -residual and non-residual Inception networks. These variations improve the single-frame recognition performance on the ILSVRC 2012 -classification task significantly. We further demonstrate how proper activation scaling stabilizes the training of very wide residual -Inception networks. With an ensemble of three residual and one Inception-v4, we achieve 3.08 percent top-5 error on the test set of the -ImageNet classification (CLS) challenge.

-

Paper: https://arxiv.org/abs/1602.07261

-

Repository: https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2016-02-23

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import InceptionResnet2Vec -model = InceptionResnet2Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.inception_resnet.InceptionResnet2Vec(model_url='https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4')
-

Bases: vectorhub.encoders.image.base.BaseImage2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-

Encode an image using InceptionResnet.

-

Example

-
>>> from vectorhub.image.encoder.tfhub import inception_resnet
->>> model = InceptionResnet2Vec(username, api_key)
->>> model.encode("Hey!")
-
-
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-urls = {'https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4': {'vector_length': 1536}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.image.tfhub.inceptionv1 module

-

Model Name: Inception V1

-

Vector Length: 1024 (default)

-

Description: -We propose a deep convolutional neural network architecture codenamed “Inception”, which was responsible for setting the new state of the art for classification and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014 (ILSVRC 2014). The main hallmark of this architecture is the improved utilization of the computing resources inside the network. This was achieved by a carefully crafted design that allows for increasing the depth and width of the network while keeping the computational budget constant. To optimize quality, the architectural decisions were based on the Hebbian principle and the intuition of multi-scale processing. One particular incarnation used in our submission for ILSVRC 2014 is called GoogLeNet, a 22 layers deep network, the quality of which is assessed in the context of classification and detection.

-

Paper: https://arxiv.org/abs/1409.4842

-

Repository: https://tfhub.dev/google/imagenet/inception_v1/feature_vector/4

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2014-09-17

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import InceptionV12Vec -model = InceptionV22Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.inceptionv1.InceptionV12Vec(model_url: str = 'https://tfhub.dev/google/imagenet/inception_v1/feature_vector/4')
-

Bases: vectorhub.encoders.image.base.BaseImage2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-urls = {'https://tfhub.dev/google/imagenet/inception_v1/feature_vector/4': {'vector_length': 1024}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.image.tfhub.inceptionv2 module

-

Model Name: Inception

-

Vector Length: 1024 (default)

-

Description: -We propose a deep convolutional neural network architecture codenamed “Inception”, which was responsible for setting the new state of the art for classification and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014 (ILSVRC 2014). The main hallmark of this architecture is the improved utilization of the computing resources inside the network. This was achieved by a carefully crafted design that allows for increasing the depth and width of the network while keeping the computational budget constant. To optimize quality, the architectural decisions were based on the Hebbian principle and the intuition of multi-scale processing. One particular incarnation used in our submission for ILSVRC 2014 is called GoogLeNet, a 22 layers deep network, the quality of which is assessed in the context of classification and detection.

-

Paper: https://arxiv.org/abs/1409.4842

-

Repository: https://tfhub.dev/google/imagenet/inception_v1/feature_vector/4

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2015-12-11

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import InceptionV22Vec -model = InceptionV22Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.inceptionv2.InceptionV22Vec(model_url: str = 'https://tfhub.dev/google/imagenet/inception_v2/feature_vector/4')
-

Bases: vectorhub.encoders.image.tfhub.inceptionv1.InceptionV12Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-urls = {'https://tfhub.dev/google/imagenet/inception_v2/feature_vector/4': {'vector_length': 1024}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.image.tfhub.inceptionv3 module

-

Model Name: Inception

-

Vector Length: 2048 (default)

-

Description: -We propose a deep convolutional neural network architecture codenamed “Inception”, which was responsible for setting the new state of the art for classification and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014 (ILSVRC 2014). The main hallmark of this architecture is the improved utilization of the computing resources inside the network. This was achieved by a carefully crafted design that allows for increasing the depth and width of the network while keeping the computational budget constant. To optimize quality, the architectural decisions were based on the Hebbian principle and the intuition of multi-scale processing. One particular incarnation used in our submission for ILSVRC 2014 is called GoogLeNet, a 22 layers deep network, the quality of which is assessed in the context of classification and detection.

-

Paper:

-

Repository: https://tfhub.dev/google/imagenet/inception_v1/feature_vector/4

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2015-12-11

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import InceptionV32Vec -model = InceptionV32Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.inceptionv3.InceptionV32Vec(model_url: str = 'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/4')
-

Bases: vectorhub.encoders.image.tfhub.inceptionv1.InceptionV12Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-urls = {'https://tfhub.dev/google/inaturalist/inception_v3/feature_vector/4': {'vector_length': 2048}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.image.tfhub.mobilenet module

-

Model Name: MobileNet

-

Vector Length: 1024 (default)

-

Description: -We present a class of efficient models called MobileNets for mobile and embedded vision applications. MobileNets are based on a streamlined architecture that uses depth-wise separable convolutions to build light weight deep neural networks. We introduce two simple global hyper-parameters that efficiently trade off between latency and accuracy. These hyper-parameters allow the model builder to choose the right sized model for their application based on the constraints of the problem. We present extensive experiments on resource and accuracy tradeoffs and show strong performance compared to other popular models on ImageNet classification. We then demonstrate the effectiveness of MobileNets across a wide range of applications and use cases including object detection, finegrain classification, face attributes and large scale geo-localization.

-

Paper: https://arxiv.org/abs/1704.04861

-

Repository: https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/4

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2017-04-17

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import MobileNetV12Vec -model = MobileNetV12Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.mobilenet.MobileNetV12Vec(model_url: str = 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/4', resize_mode: str = 'symmetric')
-

Bases: vectorhub.encoders.image.base.BaseImage2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images)
-

Bulk encode. Chunk size should be specified outside of the images.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-urls = {'https://tfhub.dev/google/imagenet/mobilenet_v1_025_128/feature_vector/4': {'image_dimensions': 128, 'vector_length': 256}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_160/feature_vector/4': {'image_dimensions': 160, 'vector_length': 256}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_192/feature_vector/4': {'image_dimensions': 192, 'vector_length': 256}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 256}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_128/feature_vector/4': {'image_dimensions': 128, 'vector_length': 512}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_160/feature_vector/4': {'image_dimensions': 160, 'vector_length': 512}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_192/feature_vector/4': {'image_dimensions': 192, 'vector_length': 512}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 512}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_075_128/feature_vector/4': {'image_dimensions': 128, 'vector_length': 768}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_075_160/feature_vector/4': {'image_dimensions': 160, 'vector_length': 768}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_075_192/feature_vector/4': {'image_dimensions': 192, 'vector_length': 768}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_075_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 768}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_128/feature_vector/4': {'image_dimensions': 128, 'vector_length': 1024}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_160/feature_vector/4': {'image_dimensions': 160, 'vector_length': 1024}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_192/feature_vector/4': {'image_dimensions': 192, 'vector_length': 1024}, 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 1024}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.image.tfhub.mobilenetv2 module

-

Model Name: MobileNet V2

-

Vector Length: 1792 (default)

-

Description: -We present a class of efficient models called MobileNets for mobile and embedded vision applications. MobileNets are based on a streamlined architecture that uses depth-wise separable convolutions to build light weight deep neural networks. We introduce two simple global hyper-parameters that efficiently trade off between latency and accuracy. These hyper-parameters allow the model builder to choose the right sized model for their application based on the constraints of the problem. We present extensive experiments on resource and accuracy tradeoffs and show strong performance compared to other popular models on ImageNet classification. We then demonstrate the effectiveness of MobileNets across a wide range of applications and use cases including object detection, finegrain classification, face attributes and large scale geo-localization.

-

Paper: https://arxiv.org/abs/1704.04861

-

Repository: https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/4

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2018-01-13

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import MobileNetV22Vec -model = MobileNetV22Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.mobilenetv2.MobileNetV22Vec(model_url: str = 'https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/feature_vector/4', resize_mode: str = 'symmetric')
-

Bases: vectorhub.encoders.image.tfhub.mobilenet.MobileNetV12Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images)
-

Bulk encode. Chunk size should be specified outside of the images.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-urls = {'https://tfhub.dev/google/imagenet/mobilenet_v2_035_128/feature_vector/4': {'image_dimensions': 128, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_035_160/feature_vector/4': {'image_dimensions': 160, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_035_192/feature_vector/4': {'image_dimensions': 192, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_035_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_035_96/feature_vector/4': {'image_dimensions': 96, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_050_128/feature_vector/4': {'image_dimensions': 128, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_050_160/feature_vector/4': {'image_dimensions': 160, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_050_192/feature_vector/4': {'image_dimensions': 192, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_050_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_050_96/feature_vector/4': {'image_dimensions': 96, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_075_128/feature_vector/4': {'image_dimensions': 128, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_075_160/feature_vector/4': {'image_dimensions': 160, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_075_192/feature_vector/4': {'image_dimensions': 192, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_075_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_075_96/feature_vector/4': {'image_dimensions': 96, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_128/feature_vector/4': {'image_dimensions': 128, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_160/feature_vector/4': {'image_dimensions': 160, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_192/feature_vector/4': {'image_dimensions': 192, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_96/feature_vector/4': {'image_dimensions': 96, 'vector_length': 1280}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_130_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 1664}, 'https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/feature_vector/4': {'image_dimensions': 224, 'vector_length': 1792}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.image.tfhub.resnet module

-

Model Name: ResNet

-

Vector Length: 2048 (default)

-

Description: -Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers—8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers.

-

Paper: https://arxiv.org/abs/1512.03385

-

Repository:

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2015-12-10

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-image-tfhub]

-

Example:

-

-
-
-

This is an example

-

`python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import ResnetV12Vec -model = ResnetV12Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.resnet.ResnetV12Vec(model_url: str = 'https://tfhub.dev/google/imagenet/resnet_v1_50/feature_vector/4')
-

Bases: vectorhub.encoders.image.base.BaseImage2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images: List[str])
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-urls = {'https://tfhub.dev/google/imagenet/resnet_v1_101/feature_vector/4': {'vector_length': 2048}, 'https://tfhub.dev/google/imagenet/resnet_v1_152/feature_vector/4': {'vector_length': 2048}, 'https://tfhub.dev/google/imagenet/resnet_v1_50/feature_vector/4': {'vector_length': 2048}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

vectorhub.encoders.image.tfhub.resnetv2 module

-

Model Name: ResNet

-

Vector Length: 2048 (default)

-

Description: -Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers—8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers.

-

Paper: https://arxiv.org/abs/1512.03385

-

Repository:

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2016-03-16

-

Limitations: Not stated.

-

Installation: pip install vectorhub['encoders-image-tfhub']

-

Example:

-

-
-
-

`python -#pip install vectorhub['encoders-image-tfhub'] -from vectorhub.encoders.image.tfhub import ResnetV22Vec -model = ResnetV22Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -`

-
-
-class vectorhub.encoders.image.tfhub.resnetv2.ResnetV22Vec(model_url: str = 'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/4')
-

Bases: vectorhub.encoders.image.tfhub.resnet.ResnetV12Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(images: List[str])
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(image)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-image_resize(image_array, width=0, height=0, rescale=0, resize_mode='symmetric')
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(image: str)
-

An method to read images. -:param image: An image link/bytes/io Bytesio data format. -:param as_gray: read in the image as black and white

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-property rgb_weights
-

Get RGB weights for grayscaling.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-show_image(sample, cmap=None, is_grayscale=True)
-

Show an image once it is read. -Arg:

-
-

sample: Image that is read (numpy array)

-
-
- -
-
-to_grayscale(sample, rgb_weights: Optional[list] = None)
-

Converting an image from RGB to Grayscale

-
- -
-
-property urls
-

dict() -> new empty dictionary -dict(mapping) -> new dictionary initialized from a mapping object’s

-
-

(key, value) pairs

-
-
-
dict(iterable) -> new dictionary initialized as if via:

d = {} -for k, v in iterable:

-
-

d[k] = v

-
-
-
dict(**kwargs) -> new dictionary initialized with the name=value pairs

in the keyword argument list. For example: dict(one=1, two=2)

-
-
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.image.vectorai.html b/docs/vectorhub.encoders.image.vectorai.html deleted file mode 100644 index 115c7c26..00000000 --- a/docs/vectorhub.encoders.image.vectorai.html +++ /dev/null @@ -1,308 +0,0 @@ - - - - - - - - - - vectorhub.encoders.image.vectorai package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.image.vectorai package

-
-

Submodules

-
-
-

vectorhub.encoders.image.vectorai.vi_encoder module

-

Vector AI’s deployed model. The purpose of this model is to allow developers to easily build encodings and see for themselves -how the embedding works. These models are selected to work out-of-the-box after testing for their success on our end.

-

To get access to Vector AI, we need to use

-

Example

-
>>> from vectorhub.text.encoder.vectorai import ViText2Vec
->>> model = ViText2Vec(username, api_key)
->>> model.encode("sample.jpg")
-
-
-
-
-class vectorhub.encoders.image.vectorai.vi_encoder.ViImage2Vec(username, api_key, url=None, collection_name='base')
-

Bases: object

-

Request for a username and API key from gh.vctr.ai

-
-
-encode(image)
-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.text.html b/docs/vectorhub.encoders.text.html deleted file mode 100644 index 3241713f..00000000 --- a/docs/vectorhub.encoders.text.html +++ /dev/null @@ -1,446 +0,0 @@ - - - - - - - - - - vectorhub.encoders.text package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.text package

- -
-

Submodules

-
-
-

vectorhub.encoders.text.base module

-

Base Text2Vec Model

-
-
-class vectorhub.encoders.text.base.BaseText2Vec
-

Bases: vectorhub.base.Base2Vec, abc.ABC

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-abstract encode(words: List[str])
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.text.sentence_transformers.html b/docs/vectorhub.encoders.text.sentence_transformers.html deleted file mode 100644 index 25b45894..00000000 --- a/docs/vectorhub.encoders.text.sentence_transformers.html +++ /dev/null @@ -1,449 +0,0 @@ - - - - - - - - - - vectorhub.encoders.text.sentence_transformers package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.text.sentence_transformers package

-
-

Submodules

-
-
-

vectorhub.encoders.text.sentence_transformers.sentence_auto_transformers module

-

Model Name: Sentence Transformer Models

-

Vector Length: Depends on model.

-

Description: -These are Sentence Transformer models from sbert.net by UKPLab.

-

Paper: https://arxiv.org/abs/1908.10084

-

Repository: https://github.com/UKPLab/sentence-transformers

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2019-08-27

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-sentence-transformers]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-sentence-transformers] -from vectorhub.encoders.text.sentence_transformers import SentenceTransformer2Vec -model = SentenceTransformer2Vec('distilroberta-base-paraphrase-v1') -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.sentence_transformers.sentence_auto_transformers.SentenceTransformer2Vec(model_name: str)
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: List[str])List[List[float]]
-

Bulk encode words from transformers.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: str)List[float]
-

Encode word from transformers. -This takes the beginning set of tokens and turns them into vectors -and returns mean pooling of the tokens. -:param word: string

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_list_of_urls()
-

Return list of URLS.

-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'LaBSE': {'vector_length': 768}, 'average_word_embeddings_glove.6B.300d': {'vector_length': 300}, 'average_word_embeddings_glove.840B.300d': {'vector_length': 300}, 'average_word_embeddings_komninos': {'vector_length': 300}, 'average_word_embeddings_levy_dependency': {'vector_length': 768}, 'bert-base-wikipedia-sections-mean-tokens': {'vector_length': 768}, 'bert-large-nli-stsb-mean-tokens': {'vector_length': 1024}, 'distilbert-base-nli-stsb-mean-tokens': {'vector_length': 768}, 'distilbert-base-nli-stsb-quora-ranking': {'vector_length': 768}, 'distilbert-multilingual-nli-stsb-quora-ranking': {'vector_length': 768}, 'distilroberta-base-msmarco-v1': {'vector_length': 768}, 'distilroberta-base-paraphrase-v1': {'vector_length': 768}, 'distiluse-base-multilingual-cased-v2': {'vector_length': 512}, 'roberta-base-nli-stsb-mean-tokens': {'vector_length': 768}, 'roberta-large-nli-stsb-mean-tokens': {'vector_length': 1024}, 'xlm-r-bert-base-nli-stsb-mean-tokens': {'vector_length': 768}, 'xlm-r-distilroberta-base-paraphrase-v1': {'vector_length': 768}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.text.tf_transformers.html b/docs/vectorhub.encoders.text.tf_transformers.html deleted file mode 100644 index 35ca913a..00000000 --- a/docs/vectorhub.encoders.text.tf_transformers.html +++ /dev/null @@ -1,438 +0,0 @@ - - - - - - - - - - vectorhub.encoders.text.tf_transformers package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.text.tf_transformers package

-
-

Submodules

-
-
-

vectorhub.encoders.text.tf_transformers.tf_auto_transformers module

-

Model Name: Transformer Models

-

Vector Length: Depends on model.

-

Description: -These are Tensorflow Automodels from HuggingFace.

-

Paper: https://arxiv.org/abs/1910.03771

-

Repository: https://huggingface.co/transformers/pretrained_models.html

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: None

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-tf-transformers]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tf-transformers] -from vectorhub.encoders.text.tf_transformers import TFTransformer2Vec -model = TFTransformer2Vec('bert-base-uncased') -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.tf_transformers.tf_auto_transformers.TFTransformer2Vec(model_name: str, config=None)
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: List[str])List[List[float]]
-

Bulk encode words from transformers.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: str)List[float]
-

Encode word from transformers. -This takes the beginning set of tokens and turns them into vectors -and returns mean pooling of the tokens. -:param word: string

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.text.tfhub.html b/docs/vectorhub.encoders.text.tfhub.html deleted file mode 100644 index a3101bbe..00000000 --- a/docs/vectorhub.encoders.text.tfhub.html +++ /dev/null @@ -1,1427 +0,0 @@ - - - - - - - - - - vectorhub.encoders.text.tfhub package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.text.tfhub package

-
-

Submodules

-
-
-

vectorhub.encoders.text.tfhub.albert module

-

Model Name: Albert - A Lite Bert

-

Vector Length: 768 (albert_en_base)

-

Description: -Increasing model size when pretraining natural language representations often results in improved performance on downstream tasks. However, at some point further model increases become harder due to GPU/TPU memory limitations and longer training times. To address these problems, we present two parameter-reduction techniques to lower memory consumption and increase the training speed of BERT. Comprehensive empirical evidence shows that our proposed methods lead to models that scale much better compared to the original BERT. We also use a self-supervised loss that focuses on modeling inter-sentence coherence, and show it consistently helps downstream tasks with multi-sentence inputs. As a result, our best model establishes new state-of-the-art results on the GLUE, RACE, and squad benchmarks while having fewer parameters compared to BERT-large.

-

Paper: https://arxiv.org/abs/1909.11942

-

Repository: https://tfhub.dev/tensorflow/albert_en_base/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2019-09-26

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import Albert2Vec -model = Albert2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.tfhub.albert.Albert2Vec(model_url: str = 'https://tfhub.dev/tensorflow/albert_en_base/2', max_seq_length: int = 228, normalize: bool = True, preprocessor_url: str = 'http://tfhub.dev/tensorflow/albert_en_preprocess/1')
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: list, pooling_strategy='pooled_output')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: str, pooling_strategy='pooled_output')
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-init(model_url)
-
- -
-
-init_tokenizer(preprocessor_url)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/tensorflow/albert_en_base/1': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/albert_en_base/2': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/albert_en_large/1': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/albert_en_large/2': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/albert_en_xlarge/1': {'vector_length': 2048}, 'https://tfhub.dev/tensorflow/albert_en_xlarge/2': {'vector_length': 2048}, 'https://tfhub.dev/tensorflow/albert_en_xxlarge/1': {'vector_length': 4096}, 'https://tfhub.dev/tensorflow/albert_en_xxlarge/2': {'vector_length': 4096}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

vectorhub.encoders.text.tfhub.bert module

-

Model Name: BERT - Bidirectional Encoder Representations

-

Vector Length: 1024 (Bert Large)

-

Description: -We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications.

-

![Bert Image](https://miro.medium.com/max/619/1*iJqlhZz-g6ZQJ53-rE9VvA.png)

-

Paper: https://arxiv.org/abs/1810.04805v2

-

Repository: https://tfhub.dev/google/collections/bert/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2018-10-11

-

Limitations: -* NA

-

Installation: pip install vectorhub[encoders-text-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import Bert2Vec -model = Bert2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.tfhub.bert.Bert2Vec(model_url: str = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/3', max_seq_length: int = 64, normalize: bool = True)
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: list, pooling_strategy='pooled_output')
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: str, pooling_strategy='pooled_output')
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-init(model_url: str)
-
- -
-
-init_tokenizer()
-
- -
-
-static is_url_working(url)
-
- -
-
-process(input_strings: str)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/2': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/3': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_en_cased_L-24_H-1024_A-16/2': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_cased_L-24_H-1024_A-16/3': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/2': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/3': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_wwm_cased_L-24_H-1024_A-16/2': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_wwm_cased_L-24_H-1024_A-16/3': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/2': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/3': {'vector_length': 1024}, 'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/2': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/3': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/2': {'vector_length': 768}, 'https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/3': {'vector_length': 768}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

vectorhub.encoders.text.tfhub.elmo module

-

Model Name: Elmo (Embeddings From Language Models)

-

Vector Length: 1024 (default)

-

Description: -Computes contextualized word representations using character-based word representations and bidirectional LSTMs, as described in the paper “Deep contextualized word representations” [1].

-

ELMo (Embeddings from Language Models) representations are deep as they are a function of all of the -internal layers of the biLM. More specifically, we learn a linear combination of the vectors stacked above each input word for each end task.

-

Paper: https://arxiv.org/abs/1802.05365

-

Repository: https://tfhub.dev/google/elmo/3

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-07-03

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import Elmo2Vec -model = Elmo2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.tfhub.elmo.Elmo2Vec(model_url: str = 'https://tfhub.dev/google/elmo/3', trainable_model=True)
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts, output_layer: str = 'elmo')
-
-
The output layer can be one of the following:

lstm_outputs1: the first LSTM hidden state with shape [batch_size, max_length, 1024]. -lstm_outputs2: the second LSTM hidden state with shape [batch_size, max_length, 1024]. -elmo: the weighted sum of the 3 layers, where the weights are trainable. This tensor has shape [batch_size, max_length, 1024] -default: a fixed mean-pooling of all contextualized word representations with shape [batch_size, 1024].

-
-
-

Note: The output layer word_emb is character-based and is not supported by VectorHub.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text, output_layer: str = 'elmo')
-

The output layer can be one of the following: -lstm_outputs1: the first LSTM hidden state with shape [batch_size, max_length, 1024]. -lstm_outputs2: the second LSTM hidden state with shape [batch_size, max_length, 1024]. -elmo: the weighted sum of the 3 layers, where the weights are trainable. This tensor has shape [batch_size, max_length, 1024] -default: a fixed mean-pooling of all contextualized word representations with shape [batch_size, 1024]. -Note: The output layer word_emb is character-based and is not supported by VectorHub.

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/google/elmo/3': {'vector_length': 1024}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

vectorhub.encoders.text.tfhub.labse module

-

Model Name: LaBSE - Language-agnostic BERT Sentence Embedding

-

Vector Length: 768 (default)

-

Description: -The language-agnostic BERT sentence embedding encodes text into high dimensional vectors. The model is trained and optimized to produce similar representations exclusively for bilingual sentence pairs that are translations of each other. So it can be used for mining for translations of a sentence in a larger corpus. -In “Language-agnostic BERT Sentence Embedding”, we present a multilingual BERT embedding model, called LaBSE, that produces language-agnostic cross-lingual sentence embeddings for 109 languages. The model is trained on 17 billion monolingual sentences and 6 billion bilingual sentence pairs using MLM and TLM pre-training, resulting in a model that is effective even on low-resource languages for which there is no data available during training. Further, the model establishes a new state of the art on multiple parallel text (a.k.a. bitext) retrieval tasks. We have released the pre-trained model to the community through tfhub, which includes modules that can be used as-is or can be fine-tuned using domain-specific data.

-

Paper: https://arxiv.org/pdf/2007.01852v1.pdf

-

Repository: https://tfhub.dev/google/LaBSE/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-07-03

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import LaBSE2Vec -model = LaBSE2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.tfhub.labse.LaBSE2Vec(model_url: str = 'https://tfhub.dev/google/LaBSE/1', max_seq_length: int = 128, normalize: bool = True)
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: list)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: str)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-init(model_url: str)
-
- -
-
-init_tokenizer()
-
- -
-
-static is_url_working(url)
-
- -
-
-process(input_strings: str)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/google/LaBSE/1': {'vector_length': 768}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

vectorhub.encoders.text.tfhub.use module

-

Model Name: USE - Universal Sentence Encoder

-

Vector Length: 512 (Base model)

-

Description: -We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.”,

-

![USE Image](https://www.gstatic.com/aihub/tfhub/universal-sentence-encoder/example-similarity.png)

-

Image from [Google](https://tfhub.dev/google/universal-sentence-encoder/1).

-

Paper: https://arxiv.org/abs/1803.11175

-

Repository: https://tfhub.dev/google/collections/universal-sentence-encoder/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2018-03-29

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import USE2Vec -model = USE2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.tfhub.use.USE2Vec(model_url: str = 'https://tfhub.dev/google/universal-sentence-encoder/4')
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts, threads=10, chunks=100)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/google/universal-sentence-encoder-large/5': {'vector_length': 512}, 'https://tfhub.dev/google/universal-sentence-encoder/4': {'vector_length': 512}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

vectorhub.encoders.text.tfhub.use_lite module

-
-
-class vectorhub.encoders.text.tfhub.use_lite.USELite2Vec(model_url: str = 'https://tfhub.dev/google/universal-sentence-encoder-lite/2')
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts, threads=10, chunks=100)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-init()
-
- -
-
-static is_url_working(url)
-
- -
-
-process_texts(texts)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/google/universal-sentence-encoder-lite/2': {'vector_length': 512}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

vectorhub.encoders.text.tfhub.use_multi module

-

Model Name: USE Multi - Universal Sentence Encoder Multilingual

-

Vector Length: 512 (Base model)

-

Description: -The Universal Sentence Encoder Multilingual module is an extension of the Universal Sentence Encoder Large that includes training on multiple tasks across languages. Supports 16 languages (Arabic, Chinese-simplified, Chinese-traditional, English, French, German, Italian, Japanese, Korean, Dutch, Polish, Portuguese, Spanish, Thai, Turkish, Russian) text encoder.

-

Paper: https://arxiv.org/abs/1803.11175

-

Repository: https://tfhub.dev/google/collections/universal-sentence-encoder/1

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2018-03-29

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-tfhub]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tfhub] -from vectorhub.encoders.text.tfhub import USEMulti2Vec -model = USEMulti2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.tfhub.use_multi.USEMulti2Vec(model_url: str = 'https://tfhub.dev/google/universal-sentence-encoder-multilingual/3')
-

Bases: vectorhub.encoders.text.tfhub.use.USE2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts, threads=10, chunks=100)
-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-init(model_url: str)
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3': {'vector_length': 512}, 'https://tfhub.dev/google/universal-sentence-encoder-multilingual/3': {'vector_length': 512}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.text.torch_transformers.html b/docs/vectorhub.encoders.text.torch_transformers.html deleted file mode 100644 index 0ea80c03..00000000 --- a/docs/vectorhub.encoders.text.torch_transformers.html +++ /dev/null @@ -1,772 +0,0 @@ - - - - - - - - - - vectorhub.encoders.text.torch_transformers package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.text.torch_transformers package

-
-

Submodules

-
-
-

vectorhub.encoders.text.torch_transformers.legal_bert module

-

Model Name: Legal Bert

-

Vector Length: 768 (default)

-

Description: -BERT has achieved impressive performance in several NLP tasks. However, there has been limited investigation on its adaptation guidelines in specialised domains. Here we focus on the legal domain, where we explore several approaches for applying BERT models to downstream legal tasks, evaluating on multiple datasets. Our findings indicate that the previous guidelines for pre-training and fine-tuning, often blindly followed, do not always generalize well in the legal domain. Thus we propose a systematic investigation of the available strategies when applying BERT in specialised domains. These are: (a) use the original BERT out of the box, (b) adapt BERT by additional pre-training on domain-specific corpora, and (c) pre-train BERT from scratch on domain-specific corpora. We also propose a broader hyper-parameter search space when fine-tuning for downstream tasks and we release LEGAL-BERT, a family of BERT models intended to assist legal NLP research, computational law, and legal technology applications.

-

Paper: https://arxiv.org/abs/2010.02559

-

Repository: https://huggingface.co/nlpaueb/legal-bert-base-uncased

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-10-06

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-torch-transformers]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-torch-transformers] -from vectorhub.encoders.text.torch_transformers import LegalBert2Vec -model = LegalBert2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.torch_transformers.legal_bert.LegalBert2Vec(model_name: str = 'nlpaueb/legal-bert-base-uncased')
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: List[str])List[List[float]]
-

Encode multiple sentences using transformers. -:param texts: List[str]

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: Union[str, List[str]])List[float]
-

Encode words using transformers. -:param text: str

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'nlpaueb/bert-base-uncased-contracts': {'data': 'Trained on US contracts'}, 'nlpaueb/bert-base-uncased-echr\t': {'data': 'Trained on ECHR cases'}, 'nlpaueb/bert-base-uncased-eurlex': {'data': 'Trained on EU legislation'}, 'nlpaueb/legal-bert-base-uncased': {'data': 'Trained on all the above'}, 'nlpaueb/legal-bert-small-uncased': {'data': 'Trained on all the above'}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

vectorhub.encoders.text.torch_transformers.torch_auto_transformers module

-

Model Name: Transformer Models

-

Vector Length: Depends on model.

-

Description: -These are Tensorflow Automodels from HuggingFace.

-

Paper: https://arxiv.org/abs/1910.03771

-

Repository: https://huggingface.co/transformers/pretrained_models.html

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: None

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-torch-transformers-auto]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-tf-transformers] -from vectorhub.encoders.text.tf_transformers import TFTransformer2Vec -model = TFTransformer2Vec('bert-base-uncased') -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.torch_transformers.torch_auto_transformers.Transformer2Vec(model_name: str)
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: List[str])List[List[float]]
-

Encode multiple sentences using transformers. -:param Sentences: List[str]

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: Union[str, List[str]])List[float]
-

Encode words using transformers. -:param word: str

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'bert-base-uncased': {'vector_length': 768}, 'distilbert-base-uncased': {'vector_length': 768}, 'facebook/bart-base': {'vector_length': 768}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-vectorhub.encoders.text.torch_transformers.torch_auto_transformers.list_tested_transformer_models()
-

List the transformed models.

-
- -
-
-

vectorhub.encoders.text.torch_transformers.torch_longformers module

-

Model Name: Longformer

-

Vector Length: 768 (default)

-

Description: -From the abstract of the paper:

-

Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer’s attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.

-

Novelties in the LongFormer paper come from its use of attention pattern:

-
    -
  • Sliding Window: The attention pattern employes fixed-size window attention surrounding each token on both sides.

  • -
  • Dilated Sliding Window: Similar to CNN dilation, sliding windows can be dilated (i.e. have gaps)

  • -
  • Global Attention: Certain tokens attend to all tokens (e.g. for classification, global attention is used for CLS)

  • -
  • Linear Projections for Global Attention: 2 sets of projections are used to compute attention scores of sliding window attention.

  • -
-

Paper: https://arxiv.org/abs/2004.05150

-

Repository: https://huggingface.co/allenai/longformer-base-4096

-

Architecture: Not stated.

-

Tasks: Not stated.

-

Release Date: 2020-04-10

-

Limitations: Not stated.

-

Installation: pip install vectorhub[encoders-text-torch-transformers]

-

Example:

-

-
-
-

`python -#pip install vectorhub[encoders-text-torch-transformers] -from vectorhub.encoders.text.torch_transformers import Longformer2Vec -model = Longformer2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -`

-
-
-class vectorhub.encoders.text.torch_transformers.torch_longformers.Longformer2Vec(model_name: str = 'allenai/longformer-base-4096')
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: List[str], pooling_method='mean')List[List[float]]
-

Encode multiple sentences using transformers. -:param texts: List[str]

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-definition = <vectorhub.doc_utils.ModelDefinition object>
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: Union[str, List[str]])List[float]
-

Encode words using transformers. -:param text: str

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-urls = {'allenai/longformer-base-4096': {'vector_length': 4096}, 'allenai/longformer-large-4096': {'vector_length': 4096}}
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.text.vectorai.html b/docs/vectorhub.encoders.text.vectorai.html deleted file mode 100644 index 9044174d..00000000 --- a/docs/vectorhub.encoders.text.vectorai.html +++ /dev/null @@ -1,420 +0,0 @@ - - - - - - - - - - vectorhub.encoders.text.vectorai package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.text.vectorai package

-
-

Submodules

-
-
-

vectorhub.encoders.text.vectorai.vi_encoder module

-

Vector AI’s deployed model. The purpose of this model is to allow developers to easily build encodings and see for themselves -how the embedding works. These models are selected to work out-of-the-box after testing for their success on our end.

-

To get access to Vector AI, we need to use

-

Example

-
>>> from vectorhub.text.encoder.vectorai import ViText2Vec
->>> model = ViText2Vec(username, api_key)
->>> model.encode("Hey!")
->>> model.bulk_encode(["hey", "stranger"])
-
-
-
-
-class vectorhub.encoders.text.vectorai.vi_encoder.ViText2Vec(username, api_key, url=None, collection_name='base')
-

Bases: vectorhub.encoders.text.base.BaseText2Vec

-

Request for a username and API key from gh.vctr.ai!

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-bulk_encode(texts: List[str])
-

Bulk convert text to vectors

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-encode(text: Union[str, List[str]])
-

Convert text to vectors.

-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-read(text: str)
-

An abstract method to specify the read method to read the data.

-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-property test_word
-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
-
-property vector_length
-

Set the vector length of the model.

-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.encoders.video.html b/docs/vectorhub.encoders.video.html deleted file mode 100644 index c432a20f..00000000 --- a/docs/vectorhub.encoders.video.html +++ /dev/null @@ -1,324 +0,0 @@ - - - - - - - - - - vectorhub.encoders.video package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub.encoders.video package

-
-

Submodules

-
-
-

vectorhub.encoders.video.sampler module

-
-
-class vectorhub.encoders.video.sampler.FrameSamplingFilter(every=None, hertz=None, top_n=None)
-

Bases: object

-
-
-get_audio_sampling_rate(filename: str)
-
- -
-
-get_audio_vector(new_sampling_rate: int = 16000)
-
- -
-
-get_frame(index: int)
-
- -
-
-initialize_video(filename: str)
-
- -
-
-iter_frames()
-
- -
-
-load_clip(filename: str)
-
- -
-
-transform(filename: str)
-
- -
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/vectorhub.html b/docs/vectorhub.html deleted file mode 100644 index af5f075b..00000000 --- a/docs/vectorhub.html +++ /dev/null @@ -1,714 +0,0 @@ - - - - - - - - - - vectorhub package — VectorHub 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

vectorhub package

- -
-

Submodules

-
-
-

vectorhub.auto_encoder module

-

Class for AutoEncoders.

-
-
-class vectorhub.auto_encoder.AutoBiEncoder
-

Bases: object

-
-
-static from_model(model_id, *args, **kwargs)
-
- -
- -
-
-class vectorhub.auto_encoder.AutoEncoder
-

Bases: object

-

The AutoEncoder class. To view the AutoEncoder’s model, this means

-

Example

-
>>> from vectorhub.auto_encoder import *
->>> model = AutoEncoder.from_model('text/bert')
->>> # To view a list of models use the follow
->>> list_all_auto_models()
-
-
-
-
-static from_model(model_id, *args, **kwargs)
-

The AutoEncoder class. To view the AutoEncoder’s model, this means -:param model_id: The ID of the model - these can be found in list_all_auto_models() -:param args: The arguments for the model instantiation -:param kwargs: The keyword arguments for the model instantiation

-

Example

-
>>> from vectorhub.auto_encoder import *
->>> model = AutoEncoder.from_model('text/bert')
->>> # To view a list of models use the follow
->>> list_all_auto_models()
-
-
-
- -
- -
-
-vectorhub.auto_encoder.get_model_definitions(json_fn='models.json')
-

Get a list of dictionaries with the definitions of the models.

-
- -
-
-vectorhub.auto_encoder.list_all_auto_models()
-

List all available models available for auto models.

-
- -
-
-

vectorhub.base module

-
-
-class vectorhub.base.Base2Vec
-

Bases: vectorhub.indexer.ViIndexer

-

Base class for vector

-
-
-add_documents(username: str, api_key: str, items: List[Any], metadata: Optional[List[Any]] = None, collection_name: Optional[str] = None)
-

Add documents to the Vector AI cloud.

-
- -
-
-classmethod chunk(lst: List, chunk_size: int)
-

Chunk an iterable object in Python but not a pandas DataFrame. -:param lst: Python List -:param chunk_size: The chunk size of an object.

-

Example

-
>>> documents = [{...}]
->>> ViClient.chunk(documents)
-
-
-
- -
-
-delete_collection(collection_name=None)
-
- -
-
-property encoder_type
-

The encoder type ensures it uses either the ‘encode’ or ‘encode_question’/’encode_answer’ -Currently supported encoder types:

-
-

Question-Answer -Text-Image -Encoder

-
-
- -
-
-get_vector_field_name()
-
- -
-
-static is_url_working(url)
-
- -
-
-request_api_key(username: str, email: str, referral_code='vectorhub_referred')
-

Requesting an API key.

-
- -
-
-retrieve_all_documents()
-

Retrieve all documents.

-
- -
-
-retrieve_documents(num_of_documents: int)
-

Get all the documents in our package.

-
- -
-
-search(item: Any, num_results: int = 10)
-

Simple search with Vector AI

-
- -
-
-classmethod validate_model_url(model_url: str, list_of_urls: List[str])
-

Validate the model url belongs in the list of urls. This is to help -users to avoid mis-spelling the name of the model.

-

# TODO: -Improve model URL validation to not include final number in URl string.

-
-
Parameters
-
    -
  • model_url – The URl of the the model in question

  • -
  • list_of_urls – The list of URLS for the model in question

  • -
-
-
-
- -
- -
-
-vectorhub.base.catch_vector_errors(func)
-

Decorate function and avoid vector errors. -.. rubric:: Example

-
-
class A:

@catch_vector_errors -def encode(self):

-
-

return [1, 2, 3]

-
-
-
-
- -
-
-

vectorhub.doc_utils module

-
-
-class vectorhub.doc_utils.ModelDefinition(model_id: str = '', model_name: str = '', vector_length: int = '', description: str = '', paper: str = '', repo: str = '', architecture: str = 'Not stated.', tasks: str = 'Not stated.', release_date: datetime.date = '', limitations: str = 'Not stated.', installation: str = 'Not stated.', example: str = 'Not stated.', markdown_filepath: str = '', **kwargs)
-

Bases: object

-

Model definition. -:param model_id: the identity of the model. Required for AutoEncoder. -:param model_name: The name of the model -:param vector_length: The length of the vector -:param description: The description of the encoder -:param paper: The paper which dictates the encoder -:param repo: The repository fo the model -:param architecture: The architecture of the model. -:param task: The downstream task that the model was trained on -:param limitations: The limitations of the encoder -:param installation: How to isntall the encoder. -:param example: The example of the encoder

-
-
-property DATA_TYPE_TO_EXAMPLE
-
- -
-
-property audio_items_examples
-
- -
-
-property audio_metadata_examples
-
- -
-
-property audio_search_example
-
- -
-
-create_docs()
-

Return a string with the RST documentation of the model.

-
- -
-
-property data_type
-

Returns text/audio/image/qa

-
- -
-
-from_markdown(markdown_filepath: str, encoding='UTF-8', splitter='(\\#\\#+\\ +)|(\\n)', verbose=False)
-

Reads definitions from the markdown. -:param markdown_filepath: The path of the markdown file. -:param encoding: The encoding used to open the Markdown file

-
- -
-
-property image_items_examples
-
- -
-
-property image_metadata_examples
-
- -
-
-property image_search_example
-
- -
-
-property item_examples
-
- -
-
-property metadata_examples
-
- -
-
-property qa_items_examples
-
- -
-
-property qa_metadata_examples
-
- -
-
-property qa_search_example
-
- -
-
-property search_example
-
- -
-
-property text_image_search_example
-
- -
-
-property text_items_examples
-
- -
-
-property text_metadata_examples
-
- -
-
-property text_search_example
-
- -
-
-to_dict(return_base_dictionary=False)
-

Create a dictionary with all the attributes of the model.

-
- -
-
-property vectorai_integration
-
- -
- -
-
-

vectorhub.errors module

-

Errors

-
-
-exception vectorhub.errors.ModelError(message: str)
-

Bases: Exception

-

Base error class for all errors in library

-

The main Vector Hub base error. -:param message: The error message

-

Example

-
>>> raise ModelError("Missing ____.")
-
-
-
-
-args
-
- -
-
-with_traceback()
-

Exception.with_traceback(tb) – -set self.__traceback__ to tb and return self.

-
- -
- -
-
-

vectorhub.import_utils module

-

Utilities for importing libraries.

-
-
-vectorhub.import_utils.get_package_requirements(requirement_type: str)
-

Load in extra_requirements.json from the package

-
- -
-
-vectorhub.import_utils.is_all_dependency_installed(requirement_type: str, raise_warning=True)
-

Returns True/False if the dependency is isntalled -:param requirement_type: The type of requirement. This can be found in the values in extra_requirements.json -:param raise_warning: Raise warning if True

-
- -
-
-vectorhub.import_utils.is_dependency_installed(dependency: str)
-

Returns True if the dependency is installed else False.

-
- -
-
-

vectorhub.models_dict module

-

Dictionary For Models

-
-
-

vectorhub.utils module

-

Various utilities for VectorHub.

-
-
-vectorhub.utils.list_installed_models(extra_requirements_file: str = '/home/runner/work/vectorhub/vectorhub/vectorhub/../extra_requirements.json')
-

List models that are installed. -We use resource_filename to resolve relative directory issues.

-
- -
-
-vectorhub.utils.list_models(return_names_only=False)
-

List available models. -:param return_names_only: Return the model names

-
- -
-
-

Module contents

-
-
- - -
- -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docsrc/Makefile b/docsrc/Makefile deleted file mode 100644 index a7ce0aa9..00000000 --- a/docsrc/Makefile +++ /dev/null @@ -1,21 +0,0 @@ - -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docsrc/README.md b/docsrc/README.md deleted file mode 100644 index cd92da50..00000000 --- a/docsrc/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Making Documentation - -To make documentation, run the following: - -``` -make clean && make html -``` diff --git a/docsrc/auto_encoder.rst b/docsrc/auto_encoder.rst deleted file mode 100644 index 37b09863..00000000 --- a/docsrc/auto_encoder.rst +++ /dev/null @@ -1,31 +0,0 @@ -Guide to using Auto-Encoder -===================================== - -Inspired by transformers' adoption of the auto-models, we created an -AutoEncoder class that allows you to easily get the relevant models. Not to be confused with the autoencoder architecture. - -The relevant models can be found here: - -.. code-block:: python - - from vectorhub import AutoEncoder - encoder = AutoEncoder('text/bert') - encoder.encode("Hi...") - - -To view the list of available models, you can call: - - -.. code-block:: python - - import vectorhub as vh - vh.list_available_auto_models() - -When you instantiate the autoencoder, you will need to pip install -the relevant module. The requirements here can be given here. - -The list of supported models are: - -.. code-block:: python - - ['text/albert', 'text/bert', 'text/labse', 'text/use', 'text/use-multi', 'text/use-lite', 'text/legal-bert', 'audio/fairseq', 'audio/speech-embedding', 'audio/trill', 'audio/trill-distilled', 'audio/vggish', 'audio/yamnet', 'audio/wav2vec', 'image/bit', 'image/bit-medium', 'image/inception', 'image/inception-v2', 'image/inception-v3', 'image/inception-resnet', 'image/mobilenet', 'image/mobilenet-v2', 'image/resnet', 'image/resnet-v2', 'text_text/use-multi-qa', 'text_text/use-qa', 'text_text/dpr', 'text_text/lareqa-qa] diff --git a/docsrc/bi_encoders.text_text.dpr2vec.rst b/docsrc/bi_encoders.text_text.dpr2vec.rst deleted file mode 100644 index 5c98ae01..00000000 --- a/docsrc/bi_encoders.text_text.dpr2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -DPR2Vec ----------------------------------------- - -Transformers -========================================== - -.. automodule:: vectorhub.bi_encoders.text_text.torch_transformers.dpr - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/bi_encoders.text_text.lareqa_qa2vec.rst b/docsrc/bi_encoders.text_text.lareqa_qa2vec.rst deleted file mode 100644 index f34523de..00000000 --- a/docsrc/bi_encoders.text_text.lareqa_qa2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -LAReQA2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.bi_encoders.text_text.tfhub.lareqa_qa - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/bi_encoders.text_text.use_qa2vec.rst b/docsrc/bi_encoders.text_text.use_qa2vec.rst deleted file mode 100644 index cf33504c..00000000 --- a/docsrc/bi_encoders.text_text.use_qa2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -USEQA2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.bi_encoders.text_text.tfhub.use_qa - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/conf.py b/docsrc/conf.py deleted file mode 100644 index 58f439bc..00000000 --- a/docsrc/conf.py +++ /dev/null @@ -1,67 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# http://www.sphinx-doc.org/en/master/config - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys -sys.path.insert(0, os.path.abspath('..')) -import vectorhub - - -# -- Project information ----------------------------------------------------- - -project = 'VectorHub' -copyright = '2020, Vector AI' -author = 'Vector AI' - -# The full version, including alpha/beta/rc tags -release = '0.1' #aix360.__version__ - - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.napoleon', -] - -# document __init__ methods -autoclass_content = 'both' - -# Add any paths that contain templates here, relative to this directory. -# templates_path = ['_templates'] - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'sphinx_rtd_theme' - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -master_doc = 'index' - -autodoc_default_options = { - 'inherited-members': True -} \ No newline at end of file diff --git a/docsrc/encoders.audio.speech_embedding2vec.rst b/docsrc/encoders.audio.speech_embedding2vec.rst deleted file mode 100644 index d29bcfda..00000000 --- a/docsrc/encoders.audio.speech_embedding2vec.rst +++ /dev/null @@ -1,12 +0,0 @@ -SpeechEmbedding2Vec ----------------------------------------- - - - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.audio.tfhub.speech_embedding - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.audio.trill2vec.rst b/docsrc/encoders.audio.trill2vec.rst deleted file mode 100644 index b210d44a..00000000 --- a/docsrc/encoders.audio.trill2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -Trill2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.audio.tfhub.trill - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.audio.vectorai2vec.rst b/docsrc/encoders.audio.vectorai2vec.rst deleted file mode 100644 index 478d456d..00000000 --- a/docsrc/encoders.audio.vectorai2vec.rst +++ /dev/null @@ -1,12 +0,0 @@ -ViAudio2Vec ----------------------------------------- - -Vector AI -========================================== - -For Vector AI users to access to our deployed vector models and evaluate embeddings. - -.. automodule:: vectorhub.encoders.audio.vectorai.vi_encoder - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.audio.vggish2vec.rst b/docsrc/encoders.audio.vggish2vec.rst deleted file mode 100644 index e62d94e0..00000000 --- a/docsrc/encoders.audio.vggish2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -Vggish2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.audio.tfhub.vggish - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.audio.wav2vec.rst b/docsrc/encoders.audio.wav2vec.rst deleted file mode 100644 index 732092d0..00000000 --- a/docsrc/encoders.audio.wav2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -Wav2Vec ----------------------------------------- - -PyTorch -========================================== - -.. automodule:: vectorhub.encoders.audio.pytorch.fairseq - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.audio.yamnet2vec.rst b/docsrc/encoders.audio.yamnet2vec.rst deleted file mode 100644 index 19b7a8f1..00000000 --- a/docsrc/encoders.audio.yamnet2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -Yamnet2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.audio.tfhub.yamnet - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.image.bit2vec.rst b/docsrc/encoders.image.bit2vec.rst deleted file mode 100644 index b97fb90f..00000000 --- a/docsrc/encoders.image.bit2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -Bit2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.image.tfhub.bit - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.image.inception2vec.rst b/docsrc/encoders.image.inception2vec.rst deleted file mode 100644 index d35052fd..00000000 --- a/docsrc/encoders.image.inception2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -Inception2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.image.tfhub.inception - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.image.inception_resnet2vec.rst b/docsrc/encoders.image.inception_resnet2vec.rst deleted file mode 100644 index 7e2b04a7..00000000 --- a/docsrc/encoders.image.inception_resnet2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -InceptionResnet2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.image.tfhub.inception_resnet - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.image.mobilenet2vec.rst b/docsrc/encoders.image.mobilenet2vec.rst deleted file mode 100644 index 2f6dc3f0..00000000 --- a/docsrc/encoders.image.mobilenet2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -MobileNet2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.image.tfhub.mobilenet - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.image.resnet2vec.rst b/docsrc/encoders.image.resnet2vec.rst deleted file mode 100644 index 6a2302e6..00000000 --- a/docsrc/encoders.image.resnet2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -ResNet2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.image.tfhub.resnet - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.image.vectorai2vec.rst b/docsrc/encoders.image.vectorai2vec.rst deleted file mode 100644 index 9ba974a0..00000000 --- a/docsrc/encoders.image.vectorai2vec.rst +++ /dev/null @@ -1,12 +0,0 @@ -ViImage2Vec ----------------------------------------- - -Vector AI -========================================== - -For Vector AI users to access to our deployed vector models and evaluate embeddings. - -.. automodule:: vectorhub.encoders.image.vectorai.vi_encoder - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.text.albert2vec.rst b/docsrc/encoders.text.albert2vec.rst deleted file mode 100644 index ba44abac..00000000 --- a/docsrc/encoders.text.albert2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -AlBert2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.text.tfhub.albert - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.text.bert2vec.rst b/docsrc/encoders.text.bert2vec.rst deleted file mode 100644 index da2a9ceb..00000000 --- a/docsrc/encoders.text.bert2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -Bert2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.text.tfhub.bert - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.text.labse2vec.rst b/docsrc/encoders.text.labse2vec.rst deleted file mode 100644 index 2e09a5a0..00000000 --- a/docsrc/encoders.text.labse2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -LaBSE2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.text.tfhub.labse - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.text.legalbert2vec.rst b/docsrc/encoders.text.legalbert2vec.rst deleted file mode 100644 index 85a3906e..00000000 --- a/docsrc/encoders.text.legalbert2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -LegalBert2Vec ----------------------------------------- - -Transformers -========================================== - -.. automodule:: vectorhub.encoders.text.torch_transformers.legal_bert - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.text.sentencetransformer2vec.rst b/docsrc/encoders.text.sentencetransformer2vec.rst deleted file mode 100644 index 663a7a20..00000000 --- a/docsrc/encoders.text.sentencetransformer2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -SentenceTransformer2Vec ----------------------------------------- - -Sentence-Transformers -========================================== - -.. automodule:: vectorhub.encoders.text.sentence_transformers.sentence_auto_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.text.transformer2vec.rst b/docsrc/encoders.text.transformer2vec.rst deleted file mode 100644 index d134efb5..00000000 --- a/docsrc/encoders.text.transformer2vec.rst +++ /dev/null @@ -1,18 +0,0 @@ -Transformer2Vec ----------------------------------------- - -PyTorch Transformers -========================================== - -.. automodule:: vectorhub.encoders.text.torch_transformers.torch_auto_transformers - :members: - :undoc-members: - :show-inheritance: - -Tensorflow Transformers -========================================== - -.. automodule:: vectorhub.encoders.text.tf_transformers.tf_auto_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.text.use2vec.rst b/docsrc/encoders.text.use2vec.rst deleted file mode 100644 index 8e91f555..00000000 --- a/docsrc/encoders.text.use2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -USE2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.text.tfhub.use - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.text.use_multi2vec.rst b/docsrc/encoders.text.use_multi2vec.rst deleted file mode 100644 index b241643b..00000000 --- a/docsrc/encoders.text.use_multi2vec.rst +++ /dev/null @@ -1,10 +0,0 @@ -USEMulti2Vec ----------------------------------------- - -TFHub -========================================== - -.. automodule:: vectorhub.encoders.text.tfhub.use_multi - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/encoders.text.vectorai2vec.rst b/docsrc/encoders.text.vectorai2vec.rst deleted file mode 100644 index 3bb50aab..00000000 --- a/docsrc/encoders.text.vectorai2vec.rst +++ /dev/null @@ -1,12 +0,0 @@ -ViText2Vec ----------------------------------------- - -Vector AI -========================================== - -For Vector AI users to access to our deployed vector models and evaluate embeddings. - -.. automodule:: vectorhub.encoders.text.vectorai.vi_encoder - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/how_to_add_a_model.rst b/docsrc/how_to_add_a_model.rst deleted file mode 100644 index 2a1b00ed..00000000 --- a/docsrc/how_to_add_a_model.rst +++ /dev/null @@ -1,60 +0,0 @@ - -How To Add Your Model To Vector Hub -===================================== - -We have written a simple 7-step guide to help you add your models here if you have trained them! -This should take approximately 30 minutes - 1 hour. Let us know at dev@vctr.ai if you need any help. - -* 1. Fork the project. - -* 2. Identify the minimum requirements for your model, identify the associated module and then add them to the MODEL_REQUIREMENTS in vectorhub/model_dict. - -* 3. Write a brief description about what your model involves. - -* 4. Create a new branch called new_model/____2vec, replace ___ with the model/domain etc. - -* 5. Identify which directory your model should fall under. Here is a basic directory outline. - -.. code-block:: - - |____ encoders - |________ audio - |________ image - |________ text - |____ bi_encoders - |________ text_text - -If you believe your model falls under a new category than we recommend making a new directory! - -* 6. Once you identify the requirements, find the associated module or create a new one if required. -Use the following code as a base for any new models and add an -`encode` and `bulk_encode` method. Both should return lists. - -.. code-block:: python - - from ....import_utils import * - # Import dictionary for model requirements - from ....models_dict import MODEL_REQUIREMENTS - # Add dependencies in if-statement to avoid import breaks in the library - if is_all_dependency_installed(MODEL_REQUIREMENTS['text-bi-encoder-tfhub-use-qa']): - # add imports here - import bert - import numpy as np - import tensorflow as tf - import tensorflow_hub as hub - import tensorflow_text - - from typing import List - # This decorator returns a default vector in case of an error - from ....base import catch_vector_errors - # Base class that provides basic utilities - from ..base import BaseTextText2Vec - - class USEMultiQA2Vec(BaseTextText2Vec): - ... - # Add decorator in case encoding errors and we need a dummy vector. - @catch_vector_errors - def encode(self, text): - pass - -* 7. Submit a PR! diff --git a/docsrc/index.rst b/docsrc/index.rst deleted file mode 100644 index 93a3b598..00000000 --- a/docsrc/index.rst +++ /dev/null @@ -1,94 +0,0 @@ - -Welcome to VectorHub's documentation! -===================================== - -Vector Hub is your home for ___2Vec models! - - - -The rise of deep learning and encoding has meant that there are now explosion of -open-source and proprietary models and techniques that have allowed for distributed -representation of entities. This means the rise of new ____2Vec models that are: - -1) Model-specific - New architecture is introduced. -2) Domain-specific - Architecture is trained on new domain. -3) Language-specific - Architecture is trained in new language. -4) Task-specific - Architecture is trained on new task. - -In order to allow people to understand what these models do and mean, we aim to provide -a hub for these __2vec models. - -Our vision to build a hub that allows people to store these ____2Vec models and provide explanations -for how to best use these encodings while building a flexible framework that allows these -different models to be used easily. - - - - -.. toctree:: - :maxdepth: 2 - :caption: Contents - - intro - how_to_add_a_model - auto_encoder - -.. toctree:: - :maxdepth: 4 - :caption: Text Encoders - - encoders.text.bert2vec - encoders.text.albert2vec - encoders.text.labse2vec - encoders.text.use2vec - encoders.text.use_multi2vec - encoders.text.legalbert2vec - encoders.text.transformer2vec - encoders.text.sentencetransformer2vec - encoders.text.vectorai2vec - - -.. toctree:: - :maxdepth: 2 - :caption: Image Encoders - - encoders.image.bit2vec - encoders.image.inception2vec - encoders.image.resnet2vec - encoders.image.inception_resnet2vec - encoders.image.mobilenet2vec - encoders.image.vectorai2vec - -.. toctree:: - :maxdepth: 2 - :caption: Audio Encoders - - encoders.audio.speech_embedding2vec - encoders.audio.trill2vec - encoders.audio.vggish2vec - encoders.audio.yamnet2vec - encoders.audio.wav2vec - encoders.audio.vectorai2vec - - -.. toctree:: - :maxdepth: 4 - :caption: Text Bi-Encoders - - bi_encoders.text_text.use_qa2vec - bi_encoders.text_text.lareqa_qa2vec - bi_encoders.text_text.dpr2vec - -.. toctree:: - :maxdepth: 4 - :caption: Modules - - modules - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docsrc/intro.rst b/docsrc/intro.rst deleted file mode 100644 index a8895187..00000000 --- a/docsrc/intro.rst +++ /dev/null @@ -1,20 +0,0 @@ -What is Vector Hub? -============================== - -The home of ___2Vec Models. - -The rise of deep learning and encoding has meant that there are now explosion of -open-source and proprietary models and techniques that have allowed for distributed -representation of entities. This means the rise of new ____2Vec models that are: - -1) Model-specific - New architecture is introduced. -2) Domain-specific - Architecture is trained on new domain. -3) Language-specific - Architecture is trained in new language. -4) Task-specific - Architecture is trained on new task. - -In order to allow people to understand what these models do and mean, we aim to provide -a hub for these __2vec models. - -Our vision to build a hub that allows people to store these ____2Vec models and provide explanations -for how to best use these encodings while building a flexible framework that allows these -different models to be used easily. diff --git a/docsrc/modules.rst b/docsrc/modules.rst deleted file mode 100644 index b457194d..00000000 --- a/docsrc/modules.rst +++ /dev/null @@ -1,7 +0,0 @@ -vectorhub -========= - -.. toctree:: - :maxdepth: 4 - - vectorhub diff --git a/docsrc/vectorhub.bi_encoders.qa.rst b/docsrc/vectorhub.bi_encoders.qa.rst deleted file mode 100644 index 6730e43d..00000000 --- a/docsrc/vectorhub.bi_encoders.qa.rst +++ /dev/null @@ -1,31 +0,0 @@ -vectorhub.bi\_encoders.qa package -================================= - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.bi_encoders.qa.sentence_transformers - vectorhub.bi_encoders.qa.tfhub - vectorhub.bi_encoders.qa.torch_transformers - -Submodules ----------- - -vectorhub.bi\_encoders.qa.base module -------------------------------------- - -.. automodule:: vectorhub.bi_encoders.qa.base - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.qa - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.bi_encoders.qa.sentence_transformers.rst b/docsrc/vectorhub.bi_encoders.qa.sentence_transformers.rst deleted file mode 100644 index 09245b91..00000000 --- a/docsrc/vectorhub.bi_encoders.qa.sentence_transformers.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.bi\_encoders.qa.sentence\_transformers package -======================================================== - -Submodules ----------- - -vectorhub.bi\_encoders.qa.sentence\_transformers.distilroberta\_qa module -------------------------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.qa.sentence_transformers.distilroberta_qa - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.qa.sentence_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.bi_encoders.qa.tfhub.rst b/docsrc/vectorhub.bi_encoders.qa.tfhub.rst deleted file mode 100644 index 7628dc6c..00000000 --- a/docsrc/vectorhub.bi_encoders.qa.tfhub.rst +++ /dev/null @@ -1,37 +0,0 @@ -vectorhub.bi\_encoders.qa.tfhub package -======================================= - -Submodules ----------- - -vectorhub.bi\_encoders.qa.tfhub.lareqa\_qa module -------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.qa.tfhub.lareqa_qa - :members: - :undoc-members: - :show-inheritance: - -vectorhub.bi\_encoders.qa.tfhub.use\_multi\_qa module ------------------------------------------------------ - -.. automodule:: vectorhub.bi_encoders.qa.tfhub.use_multi_qa - :members: - :undoc-members: - :show-inheritance: - -vectorhub.bi\_encoders.qa.tfhub.use\_qa module ----------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.qa.tfhub.use_qa - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.qa.tfhub - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.bi_encoders.qa.torch_transformers.rst b/docsrc/vectorhub.bi_encoders.qa.torch_transformers.rst deleted file mode 100644 index 876b24df..00000000 --- a/docsrc/vectorhub.bi_encoders.qa.torch_transformers.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.bi\_encoders.qa.torch\_transformers package -===================================================== - -Submodules ----------- - -vectorhub.bi\_encoders.qa.torch\_transformers.dpr module --------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.qa.torch_transformers.dpr - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.qa.torch_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.bi_encoders.rst b/docsrc/vectorhub.bi_encoders.rst deleted file mode 100644 index 01b15752..00000000 --- a/docsrc/vectorhub.bi_encoders.rst +++ /dev/null @@ -1,18 +0,0 @@ -vectorhub.bi\_encoders package -============================== - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.bi_encoders.text_text - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.bi_encoders.text_image.rst b/docsrc/vectorhub.bi_encoders.text_image.rst deleted file mode 100644 index ea446af9..00000000 --- a/docsrc/vectorhub.bi_encoders.text_image.rst +++ /dev/null @@ -1,18 +0,0 @@ -vectorhub.bi\_encoders.text\_image package -========================================== - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.bi_encoders.text_image.torch - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.text_image - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.bi_encoders.text_image.torch.rst b/docsrc/vectorhub.bi_encoders.text_image.torch.rst deleted file mode 100644 index b1627ee4..00000000 --- a/docsrc/vectorhub.bi_encoders.text_image.torch.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.bi\_encoders.text\_image.torch package -================================================ - -Submodules ----------- - -vectorhub.bi\_encoders.text\_image.torch.clip2vec module --------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_image.torch.clip2vec - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.text_image.torch - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.bi_encoders.text_text.rst b/docsrc/vectorhub.bi_encoders.text_text.rst deleted file mode 100644 index 843d4c67..00000000 --- a/docsrc/vectorhub.bi_encoders.text_text.rst +++ /dev/null @@ -1,31 +0,0 @@ -vectorhub.bi\_encoders.text\_text package -========================================= - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.bi_encoders.text_text.sentence_transformers - vectorhub.bi_encoders.text_text.tfhub - vectorhub.bi_encoders.text_text.torch_transformers - -Submodules ----------- - -vectorhub.bi\_encoders.text\_text.base module ---------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_text.base - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.text_text - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.bi_encoders.text_text.sentence_transformers.rst b/docsrc/vectorhub.bi_encoders.text_text.sentence_transformers.rst deleted file mode 100644 index 1f198dc1..00000000 --- a/docsrc/vectorhub.bi_encoders.text_text.sentence_transformers.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.bi\_encoders.text\_text.sentence\_transformers package -================================================================ - -Submodules ----------- - -vectorhub.bi\_encoders.text\_text.sentence\_transformers.distilroberta\_qa module ---------------------------------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_text.sentence_transformers.distilroberta_qa - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.text_text.sentence_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.bi_encoders.text_text.tfhub.rst b/docsrc/vectorhub.bi_encoders.text_text.tfhub.rst deleted file mode 100644 index 6f4e811d..00000000 --- a/docsrc/vectorhub.bi_encoders.text_text.tfhub.rst +++ /dev/null @@ -1,37 +0,0 @@ -vectorhub.bi\_encoders.text\_text.tfhub package -=============================================== - -Submodules ----------- - -vectorhub.bi\_encoders.text\_text.tfhub.lareqa\_qa module ---------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_text.tfhub.lareqa_qa - :members: - :undoc-members: - :show-inheritance: - -vectorhub.bi\_encoders.text\_text.tfhub.use\_multi\_qa module -------------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_text.tfhub.use_multi_qa - :members: - :undoc-members: - :show-inheritance: - -vectorhub.bi\_encoders.text\_text.tfhub.use\_qa module ------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_text.tfhub.use_qa - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.text_text.tfhub - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.bi_encoders.text_text.torch_transformers.rst b/docsrc/vectorhub.bi_encoders.text_text.torch_transformers.rst deleted file mode 100644 index bcb8cdd7..00000000 --- a/docsrc/vectorhub.bi_encoders.text_text.torch_transformers.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.bi\_encoders.text\_text.torch\_transformers package -============================================================= - -Submodules ----------- - -vectorhub.bi\_encoders.text\_text.torch\_transformers.dpr module ----------------------------------------------------------------- - -.. automodule:: vectorhub.bi_encoders.text_text.torch_transformers.dpr - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.bi_encoders.text_text.torch_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.audio.pytorch.rst b/docsrc/vectorhub.encoders.audio.pytorch.rst deleted file mode 100644 index 8861664b..00000000 --- a/docsrc/vectorhub.encoders.audio.pytorch.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.audio.pytorch package -======================================== - -Submodules ----------- - -vectorhub.encoders.audio.pytorch.wav2vec module ------------------------------------------------ - -.. automodule:: vectorhub.encoders.audio.pytorch.wav2vec - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.audio.pytorch - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.audio.rst b/docsrc/vectorhub.encoders.audio.rst deleted file mode 100644 index db186037..00000000 --- a/docsrc/vectorhub.encoders.audio.rst +++ /dev/null @@ -1,31 +0,0 @@ -vectorhub.encoders.audio package -================================ - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.encoders.audio.pytorch - vectorhub.encoders.audio.tfhub - vectorhub.encoders.audio.vectorai - -Submodules ----------- - -vectorhub.encoders.audio.base module ------------------------------------- - -.. automodule:: vectorhub.encoders.audio.base - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.audio - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.audio.tfhub.rst b/docsrc/vectorhub.encoders.audio.tfhub.rst deleted file mode 100644 index 8a7f9cef..00000000 --- a/docsrc/vectorhub.encoders.audio.tfhub.rst +++ /dev/null @@ -1,53 +0,0 @@ -vectorhub.encoders.audio.tfhub package -====================================== - -Submodules ----------- - -vectorhub.encoders.audio.tfhub.speech\_embedding module -------------------------------------------------------- - -.. automodule:: vectorhub.encoders.audio.tfhub.speech_embedding - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.audio.tfhub.trill module -------------------------------------------- - -.. automodule:: vectorhub.encoders.audio.tfhub.trill - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.audio.tfhub.trill\_distilled module ------------------------------------------------------- - -.. automodule:: vectorhub.encoders.audio.tfhub.trill_distilled - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.audio.tfhub.vggish module --------------------------------------------- - -.. automodule:: vectorhub.encoders.audio.tfhub.vggish - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.audio.tfhub.yamnet module --------------------------------------------- - -.. automodule:: vectorhub.encoders.audio.tfhub.yamnet - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.audio.tfhub - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.audio.vectorai.rst b/docsrc/vectorhub.encoders.audio.vectorai.rst deleted file mode 100644 index 061ef40b..00000000 --- a/docsrc/vectorhub.encoders.audio.vectorai.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.audio.vectorai package -========================================= - -Submodules ----------- - -vectorhub.encoders.audio.vectorai.vi\_encoder module ----------------------------------------------------- - -.. automodule:: vectorhub.encoders.audio.vectorai.vi_encoder - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.audio.vectorai - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.code.rst b/docsrc/vectorhub.encoders.code.rst deleted file mode 100644 index 70612ffc..00000000 --- a/docsrc/vectorhub.encoders.code.rst +++ /dev/null @@ -1,18 +0,0 @@ -vectorhub.encoders.code package -=============================== - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.encoders.code.transformers - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.code - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.code.transformers.rst b/docsrc/vectorhub.encoders.code.transformers.rst deleted file mode 100644 index f02c9aa2..00000000 --- a/docsrc/vectorhub.encoders.code.transformers.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.code.transformers package -============================================ - -Submodules ----------- - -vectorhub.encoders.code.transformers.codebert module ----------------------------------------------------- - -.. automodule:: vectorhub.encoders.code.transformers.codebert - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.code.transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.face.rst b/docsrc/vectorhub.encoders.face.rst deleted file mode 100644 index 5322a010..00000000 --- a/docsrc/vectorhub.encoders.face.rst +++ /dev/null @@ -1,18 +0,0 @@ -vectorhub.encoders.face package -=============================== - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.encoders.face.tf - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.face - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.face.tf.rst b/docsrc/vectorhub.encoders.face.tf.rst deleted file mode 100644 index f766ba0a..00000000 --- a/docsrc/vectorhub.encoders.face.tf.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.face.tf package -================================== - -Submodules ----------- - -vectorhub.encoders.face.tf.face2vec module ------------------------------------------- - -.. automodule:: vectorhub.encoders.face.tf.face2vec - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.face.tf - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.image.fastai.rst b/docsrc/vectorhub.encoders.image.fastai.rst deleted file mode 100644 index 3787eb39..00000000 --- a/docsrc/vectorhub.encoders.image.fastai.rst +++ /dev/null @@ -1,29 +0,0 @@ -vectorhub.encoders.image.fastai package -======================================= - -Submodules ----------- - -vectorhub.encoders.image.fastai.base module -------------------------------------------- - -.. automodule:: vectorhub.encoders.image.fastai.base - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.fastai.resnet module ---------------------------------------------- - -.. automodule:: vectorhub.encoders.image.fastai.resnet - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.image.fastai - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.image.rst b/docsrc/vectorhub.encoders.image.rst deleted file mode 100644 index 59b7b5ce..00000000 --- a/docsrc/vectorhub.encoders.image.rst +++ /dev/null @@ -1,31 +0,0 @@ -vectorhub.encoders.image package -================================ - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.encoders.image.fastai - vectorhub.encoders.image.tfhub - vectorhub.encoders.image.vectorai - -Submodules ----------- - -vectorhub.encoders.image.base module ------------------------------------- - -.. automodule:: vectorhub.encoders.image.base - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.image - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.image.tensorflow.rst b/docsrc/vectorhub.encoders.image.tensorflow.rst deleted file mode 100644 index 537a0cfd..00000000 --- a/docsrc/vectorhub.encoders.image.tensorflow.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.image.tensorflow package -=========================================== - -Submodules ----------- - -vectorhub.encoders.image.tensorflow.face2vec module ---------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tensorflow.face2vec - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.image.tensorflow - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.image.tfhub.rst b/docsrc/vectorhub.encoders.image.tfhub.rst deleted file mode 100644 index 638c8809..00000000 --- a/docsrc/vectorhub.encoders.image.tfhub.rst +++ /dev/null @@ -1,93 +0,0 @@ -vectorhub.encoders.image.tfhub package -====================================== - -Submodules ----------- - -vectorhub.encoders.image.tfhub.bit module ------------------------------------------ - -.. automodule:: vectorhub.encoders.image.tfhub.bit - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.bit\_medium module -------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.bit_medium - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.inception\_resnet module -------------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.inception_resnet - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.inceptionv1 module -------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.inceptionv1 - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.inceptionv2 module -------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.inceptionv2 - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.inceptionv3 module -------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.inceptionv3 - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.mobilenet module ------------------------------------------------ - -.. automodule:: vectorhub.encoders.image.tfhub.mobilenet - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.mobilenetv2 module -------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.mobilenetv2 - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.resnet module --------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.resnet - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.image.tfhub.resnetv2 module ----------------------------------------------- - -.. automodule:: vectorhub.encoders.image.tfhub.resnetv2 - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.image.tfhub - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.image.vectorai.rst b/docsrc/vectorhub.encoders.image.vectorai.rst deleted file mode 100644 index 2d954655..00000000 --- a/docsrc/vectorhub.encoders.image.vectorai.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.image.vectorai package -========================================= - -Submodules ----------- - -vectorhub.encoders.image.vectorai.vi\_encoder module ----------------------------------------------------- - -.. automodule:: vectorhub.encoders.image.vectorai.vi_encoder - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.image.vectorai - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.rst b/docsrc/vectorhub.encoders.rst deleted file mode 100644 index d3ded0ab..00000000 --- a/docsrc/vectorhub.encoders.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders package -========================== - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.encoders.audio - vectorhub.encoders.image - vectorhub.encoders.text - vectorhub.encoders.video - -Module contents ---------------- - -.. automodule:: vectorhub.encoders - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.text.rst b/docsrc/vectorhub.encoders.text.rst deleted file mode 100644 index a6ac652d..00000000 --- a/docsrc/vectorhub.encoders.text.rst +++ /dev/null @@ -1,33 +0,0 @@ -vectorhub.encoders.text package -=============================== - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.encoders.text.sentence_transformers - vectorhub.encoders.text.tf_transformers - vectorhub.encoders.text.tfhub - vectorhub.encoders.text.torch_transformers - vectorhub.encoders.text.vectorai - -Submodules ----------- - -vectorhub.encoders.text.base module ------------------------------------ - -.. automodule:: vectorhub.encoders.text.base - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.text - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.text.sentence_transformers.rst b/docsrc/vectorhub.encoders.text.sentence_transformers.rst deleted file mode 100644 index 8a3e1b17..00000000 --- a/docsrc/vectorhub.encoders.text.sentence_transformers.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.text.sentence\_transformers package -====================================================== - -Submodules ----------- - -vectorhub.encoders.text.sentence\_transformers.sentence\_auto\_transformers module ----------------------------------------------------------------------------------- - -.. automodule:: vectorhub.encoders.text.sentence_transformers.sentence_auto_transformers - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.text.sentence_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.text.tf_transformers.rst b/docsrc/vectorhub.encoders.text.tf_transformers.rst deleted file mode 100644 index 26b8d6dd..00000000 --- a/docsrc/vectorhub.encoders.text.tf_transformers.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.text.tf\_transformers package -================================================ - -Submodules ----------- - -vectorhub.encoders.text.tf\_transformers.tf\_auto\_transformers module ----------------------------------------------------------------------- - -.. automodule:: vectorhub.encoders.text.tf_transformers.tf_auto_transformers - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.text.tf_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.text.tfhub.rst b/docsrc/vectorhub.encoders.text.tfhub.rst deleted file mode 100644 index ff2e5ea2..00000000 --- a/docsrc/vectorhub.encoders.text.tfhub.rst +++ /dev/null @@ -1,69 +0,0 @@ -vectorhub.encoders.text.tfhub package -===================================== - -Submodules ----------- - -vectorhub.encoders.text.tfhub.albert module -------------------------------------------- - -.. automodule:: vectorhub.encoders.text.tfhub.albert - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.tfhub.bert module ------------------------------------------ - -.. automodule:: vectorhub.encoders.text.tfhub.bert - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.tfhub.elmo module ------------------------------------------ - -.. automodule:: vectorhub.encoders.text.tfhub.elmo - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.tfhub.labse module ------------------------------------------- - -.. automodule:: vectorhub.encoders.text.tfhub.labse - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.tfhub.use module ----------------------------------------- - -.. automodule:: vectorhub.encoders.text.tfhub.use - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.tfhub.use\_lite module ----------------------------------------------- - -.. automodule:: vectorhub.encoders.text.tfhub.use_lite - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.tfhub.use\_multi module ------------------------------------------------ - -.. automodule:: vectorhub.encoders.text.tfhub.use_multi - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.text.tfhub - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.text.torch_transformers.rst b/docsrc/vectorhub.encoders.text.torch_transformers.rst deleted file mode 100644 index 65b05a97..00000000 --- a/docsrc/vectorhub.encoders.text.torch_transformers.rst +++ /dev/null @@ -1,37 +0,0 @@ -vectorhub.encoders.text.torch\_transformers package -=================================================== - -Submodules ----------- - -vectorhub.encoders.text.torch\_transformers.legal\_bert module --------------------------------------------------------------- - -.. automodule:: vectorhub.encoders.text.torch_transformers.legal_bert - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.torch\_transformers.torch\_auto\_transformers module ----------------------------------------------------------------------------- - -.. automodule:: vectorhub.encoders.text.torch_transformers.torch_auto_transformers - :members: - :undoc-members: - :show-inheritance: - -vectorhub.encoders.text.torch\_transformers.torch\_longformers module ---------------------------------------------------------------------- - -.. automodule:: vectorhub.encoders.text.torch_transformers.torch_longformers - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.text.torch_transformers - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.text.vectorai.rst b/docsrc/vectorhub.encoders.text.vectorai.rst deleted file mode 100644 index c6463a14..00000000 --- a/docsrc/vectorhub.encoders.text.vectorai.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.text.vectorai package -======================================== - -Submodules ----------- - -vectorhub.encoders.text.vectorai.vi\_encoder module ---------------------------------------------------- - -.. automodule:: vectorhub.encoders.text.vectorai.vi_encoder - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.text.vectorai - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.encoders.video.rst b/docsrc/vectorhub.encoders.video.rst deleted file mode 100644 index a68e2bbc..00000000 --- a/docsrc/vectorhub.encoders.video.rst +++ /dev/null @@ -1,21 +0,0 @@ -vectorhub.encoders.video package -================================ - -Submodules ----------- - -vectorhub.encoders.video.sampler module ---------------------------------------- - -.. automodule:: vectorhub.encoders.video.sampler - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub.encoders.video - :members: - :undoc-members: - :show-inheritance: diff --git a/docsrc/vectorhub.rst b/docsrc/vectorhub.rst deleted file mode 100644 index 7442ecb6..00000000 --- a/docsrc/vectorhub.rst +++ /dev/null @@ -1,78 +0,0 @@ -vectorhub package -================= - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - vectorhub.bi_encoders - vectorhub.encoders - -Submodules ----------- - -vectorhub.auto\_encoder module ------------------------------- - -.. automodule:: vectorhub.auto_encoder - :members: - :undoc-members: - :show-inheritance: - -vectorhub.base module ---------------------- - -.. automodule:: vectorhub.base - :members: - :undoc-members: - :show-inheritance: - -vectorhub.doc\_utils module ---------------------------- - -.. automodule:: vectorhub.doc_utils - :members: - :undoc-members: - :show-inheritance: - -vectorhub.errors module ------------------------ - -.. automodule:: vectorhub.errors - :members: - :undoc-members: - :show-inheritance: - -vectorhub.import\_utils module ------------------------------- - -.. automodule:: vectorhub.import_utils - :members: - :undoc-members: - :show-inheritance: - -vectorhub.models\_dict module ------------------------------ - -.. automodule:: vectorhub.models_dict - :members: - :undoc-members: - :show-inheritance: - -vectorhub.utils module ----------------------- - -.. automodule:: vectorhub.utils - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: vectorhub - :members: - :undoc-members: - :show-inheritance: diff --git a/examples/Quickstart.ipynb b/examples/Quickstart.ipynb deleted file mode 100644 index 28725f75..00000000 --- a/examples/Quickstart.ipynb +++ /dev/null @@ -1,5855 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "VectorHub.ipynb", - "provenance": [], - "collapsed_sections": [], - "machine_shape": "hm" - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "accelerator": "GPU", - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "10a7a326a9d24d72a0e912a02b07dc7b": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_e8d18ff4879647288637fe05bcfcd326", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_8c7b1b6de51640f2baccfb0c6613c3f6", - "IPY_MODEL_f74d902ee7914d9e92e3ee1499b95f9b" - ] - } - }, - "e8d18ff4879647288637fe05bcfcd326": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "8c7b1b6de51640f2baccfb0c6613c3f6": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_eba2ff9d10a849a4bd67ac768d39eeda", - "_dom_classes": [], - "description": "Downloading: 100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 433, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 433, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_3a6c1bb3c8334654963c2cde9ae78e86" - } - }, - "f74d902ee7914d9e92e3ee1499b95f9b": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_12e86dc0691c4c4f8110069ebccacbd8", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 433/433 [00:10<00:00, 42.4B/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_e62e753741ac499cb50597d0cb14136b" - } - }, - "eba2ff9d10a849a4bd67ac768d39eeda": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "3a6c1bb3c8334654963c2cde9ae78e86": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "12e86dc0691c4c4f8110069ebccacbd8": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "DescriptionStyleModel", - "description_width": "", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "_model_module": "@jupyter-widgets/controls" - } - }, - "e62e753741ac499cb50597d0cb14136b": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "11043cb2bde147fba0bab6ceef446a35": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_6aed205579554f818283128a75b630b6", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_5093f3fca112492f815574f75a059afc", - "IPY_MODEL_f9f33ccfcb254fccadf70eb33a9fb244" - ] - } - }, - "6aed205579554f818283128a75b630b6": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "5093f3fca112492f815574f75a059afc": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_ff36308704064ae7a41464bc4c407e65", - "_dom_classes": [], - "description": "Downloading: 100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 440473133, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 440473133, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_532600a5a4f446de8af1f8dbe503caa2" - } - }, - "f9f33ccfcb254fccadf70eb33a9fb244": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_4bfc0f8acb2f4c068561c743eae5e67e", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 440M/440M [00:06<00:00, 66.2MB/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_a8d1f129e80b411aabae0dae733584ca" - } - }, - "ff36308704064ae7a41464bc4c407e65": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "532600a5a4f446de8af1f8dbe503caa2": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "4bfc0f8acb2f4c068561c743eae5e67e": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "DescriptionStyleModel", - "description_width": "", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "_model_module": "@jupyter-widgets/controls" - } - }, - "a8d1f129e80b411aabae0dae733584ca": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "59f65145880f4c3ea975643978c1544a": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_6119357bc98245bab1a9f8b7724e97b5", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_c2678746f9a04e0fa1968833318ff25e", - "IPY_MODEL_7f85413cfdc4424a9fb18f4d279f8dc6" - ] - } - }, - "6119357bc98245bab1a9f8b7724e97b5": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "c2678746f9a04e0fa1968833318ff25e": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_e9d93263cf984496b52cf2280c85bdc4", - "_dom_classes": [], - "description": "Downloading: 100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 231508, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 231508, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_bff44fc5c6194fa19343cda7bfea99ad" - } - }, - "7f85413cfdc4424a9fb18f4d279f8dc6": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_04d0c10e9f2644dd99b2e484b80621ce", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 232k/232k [00:00<00:00, 927kB/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_a4c3f97532d24f349af8f145c997ab4d" - } - }, - "e9d93263cf984496b52cf2280c85bdc4": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "bff44fc5c6194fa19343cda7bfea99ad": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "04d0c10e9f2644dd99b2e484b80621ce": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "DescriptionStyleModel", - "description_width": "", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "_model_module": "@jupyter-widgets/controls" - } - }, - "a4c3f97532d24f349af8f145c997ab4d": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "27b4458473054050a1997b160d502d8d": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_689844b256034e14a6ea9aaaeaad34b4", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_fd428e37db8741afaa5a9805dcd29e5d", - "IPY_MODEL_31f65c6982954df8a00f2ff17872e5cb" - ] - } - }, - "689844b256034e14a6ea9aaaeaad34b4": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "fd428e37db8741afaa5a9805dcd29e5d": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_c7668fa4508849c7bef7e15165f89826", - "_dom_classes": [], - "description": "Downloading: 100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 536063208, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 536063208, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_728a6cd994d34503890f634cd4670c56" - } - }, - "31f65c6982954df8a00f2ff17872e5cb": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_a3ca4abf8e684658b1f1ed9c1dfa8f03", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 536M/536M [00:07<00:00, 67.6MB/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_ad049ccdefd74201ae674782a16cbadc" - } - }, - "c7668fa4508849c7bef7e15165f89826": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "728a6cd994d34503890f634cd4670c56": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "a3ca4abf8e684658b1f1ed9c1dfa8f03": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "DescriptionStyleModel", - "description_width": "", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "_model_module": "@jupyter-widgets/controls" - } - }, - "ad049ccdefd74201ae674782a16cbadc": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "6c0756b756814056a98b6fea42067eb7": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_6aad6086cec746ecb35e3ee9686e9ecb", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_00deaf3ae9d14416a0c1c3af2b4ed892", - "IPY_MODEL_b46e2e27e6da427bba35d37fa07c56cb" - ] - } - }, - "6aad6086cec746ecb35e3ee9686e9ecb": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "00deaf3ae9d14416a0c1c3af2b4ed892": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_69cda9906f0b44e7bf077de9069198b1", - "_dom_classes": [], - "description": "100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 950500491, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 950500491, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_09142704d53248938668d64d44920f88" - } - }, - "b46e2e27e6da427bba35d37fa07c56cb": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_a8599cb5c4974d6799d72435337ac40e", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 906M/906M [00:09<00:00, 96.3MB/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_faf957de7821413c94538361daf1b155" - } - }, - "69cda9906f0b44e7bf077de9069198b1": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "09142704d53248938668d64d44920f88": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "a8599cb5c4974d6799d72435337ac40e": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "DescriptionStyleModel", - "description_width": "", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "_model_module": "@jupyter-widgets/controls" - } - }, - "faf957de7821413c94538361daf1b155": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "468ffb10cdee4cddb44c4d391e2331d7": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_99a03679273848dc890e7aadde50d358", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_03bb795fb145491ca6585aff703d02bb", - "IPY_MODEL_4f337836c475426ba59049432e3ccfbd" - ] - } - }, - "99a03679273848dc890e7aadde50d358": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "03bb795fb145491ca6585aff703d02bb": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_0cefb6cc80f846129dae724baf6377ed", - "_dom_classes": [], - "description": "Downloading: 100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 466062, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 466062, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_059caadbf542453cae0cf54b8076fd09" - } - }, - "4f337836c475426ba59049432e3ccfbd": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_504bf105d52f4892bb6fd47934609d55", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 466k/466k [00:00<00:00, 1.37MB/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_55f3f97c2bd544abb0d51a70de53ead2" - } - }, - "0cefb6cc80f846129dae724baf6377ed": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "059caadbf542453cae0cf54b8076fd09": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "504bf105d52f4892bb6fd47934609d55": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "DescriptionStyleModel", - "description_width": "", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "_model_module": "@jupyter-widgets/controls" - } - }, - "55f3f97c2bd544abb0d51a70de53ead2": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "a3dceaeb4dc24736a71e556ace34e8b7": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_e36128b755d543d7972b5db7a4514252", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_88ae11f8dfa44216bca6b8e99fbf2ff9", - "IPY_MODEL_1089b49caf6d4d7cb39ab4221b510564" - ] - } - }, - "e36128b755d543d7972b5db7a4514252": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "88ae11f8dfa44216bca6b8e99fbf2ff9": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_e059e18a12634bb184cff8d10276ab3b", - "_dom_classes": [], - "description": "Downloading: 100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 492, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 492, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_fc48e1fcdd9e40dd8f167c8f6c49c43b" - } - }, - "1089b49caf6d4d7cb39ab4221b510564": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_686d89d247a6430c885cb8d905abd152", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 492/492 [00:06<00:00, 72.6B/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_78ee3448ea56418ca3a8c923dffa2e69" - } - }, - "e059e18a12634bb184cff8d10276ab3b": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "fc48e1fcdd9e40dd8f167c8f6c49c43b": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "686d89d247a6430c885cb8d905abd152": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "DescriptionStyleModel", - "description_width": "", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "_model_module": "@jupyter-widgets/controls" - } - }, - "78ee3448ea56418ca3a8c923dffa2e69": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "54675ff29cf24d948721d5444fb3991b": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_71023333f39a4ef2a39924d659b3c2dd", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_1913b874f4cc42b59756598ee2b880ee", - "IPY_MODEL_9fdf013a3aea40dd80ba7b63c8c34236" - ] - } - }, - "71023333f39a4ef2a39924d659b3c2dd": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "1913b874f4cc42b59756598ee2b880ee": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_cbcb794b99334009b58d11a38eaf30be", - "_dom_classes": [], - "description": "Downloading: 100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 437983985, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 437983985, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_c1e01112215443908508793186d59a1b" - } - }, - "9fdf013a3aea40dd80ba7b63c8c34236": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_90b7271078064895b05e3d131e436478", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 438M/438M [00:06<00:00, 66.2MB/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_0127827e68c6474e84c8b36c629801ea" - } - }, - "cbcb794b99334009b58d11a38eaf30be": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "c1e01112215443908508793186d59a1b": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "90b7271078064895b05e3d131e436478": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "DescriptionStyleModel", - "description_width": "", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "_model_module": "@jupyter-widgets/controls" - } - }, - "0127827e68c6474e84c8b36c629801ea": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "6bd5b04a04c54b3e9fc243435f45b54f": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_b4d1a3eff5464d55a4ba35fa2b9fa8d2", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_0e2d496b16b14df8980a28ead333beb2", - "IPY_MODEL_40afc45883c5440a8e12b140b86a2acc" - ] - } - }, - "b4d1a3eff5464d55a4ba35fa2b9fa8d2": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "0e2d496b16b14df8980a28ead333beb2": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_2ef07b02c6cd41b7addd14e3a59cc469", - "_dom_classes": [], - "description": "Downloading: 100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 493, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 493, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_07ff7c7abacc43cbba21b6ca74d35b81" - } - }, - "40afc45883c5440a8e12b140b86a2acc": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_8ede83b5635f4983a41d2fdac2f16f47", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 493/493 [00:06<00:00, 73.0B/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_1c109a0a990644f7a9e061e9dbbf99a8" - } - }, - "2ef07b02c6cd41b7addd14e3a59cc469": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "07ff7c7abacc43cbba21b6ca74d35b81": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "8ede83b5635f4983a41d2fdac2f16f47": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "DescriptionStyleModel", - "description_width": "", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "_model_module": "@jupyter-widgets/controls" - } - }, - "1c109a0a990644f7a9e061e9dbbf99a8": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "1d3e1828b1174221835589f86d2c4435": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_c99999cdf5104c4093bab35a58d74164", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_70358afa3edf4693b243f5e25034a824", - "IPY_MODEL_cbfd90f162e24d5b95c7f2048e732202" - ] - } - }, - "c99999cdf5104c4093bab35a58d74164": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "70358afa3edf4693b243f5e25034a824": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_1950b7a29fe04faa858921f1c1a8d17d", - "_dom_classes": [], - "description": "Downloading: 100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 437986065, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 437986065, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_ea3f4f587199405e83da54bc225050ef" - } - }, - "cbfd90f162e24d5b95c7f2048e732202": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_ebcb2b2a0e124960b39e75615efab973", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 438M/438M [00:06<00:00, 66.3MB/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_7cd24fc74ecf4cb9b1c0093aa8c9e36e" - } - }, - "1950b7a29fe04faa858921f1c1a8d17d": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "ea3f4f587199405e83da54bc225050ef": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "ebcb2b2a0e124960b39e75615efab973": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "DescriptionStyleModel", - "description_width": "", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "_model_module": "@jupyter-widgets/controls" - } - }, - "7cd24fc74ecf4cb9b1c0093aa8c9e36e": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "3adf17fa083f454b9405ee91b197cf51": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_40729fd817904e1fbf04e66ca3c1a5f3", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_b6561082a18943578cf4f3049a6dc032", - "IPY_MODEL_f8dcfb84ac8e43aa9dcb2456e631c0de" - ] - } - }, - "40729fd817904e1fbf04e66ca3c1a5f3": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "b6561082a18943578cf4f3049a6dc032": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_5caa05889472498e9de1ab80d5caced3", - "_dom_classes": [], - "description": "Downloading: 100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 484, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 484, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_244050e4d61c414a92cd42a2e9e9d866" - } - }, - "f8dcfb84ac8e43aa9dcb2456e631c0de": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_7f4fca8b171044a083c7201c4c20499e", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 484/484 [00:07<00:00, 65.9B/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_78d1423b47b045d1968b30f3d43a6f52" - } - }, - "5caa05889472498e9de1ab80d5caced3": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "244050e4d61c414a92cd42a2e9e9d866": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "7f4fca8b171044a083c7201c4c20499e": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "DescriptionStyleModel", - "description_width": "", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "_model_module": "@jupyter-widgets/controls" - } - }, - "78d1423b47b045d1968b30f3d43a6f52": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "da00be1bbbef4e3482e4c89756ecb473": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_ce390e69a3c24766ad1cf1b4edaa5a50", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_1e9f0f242e2649d284fcfe805efef50b", - "IPY_MODEL_b17aa131e1b24942815d1fa3412b6fc4" - ] - } - }, - "ce390e69a3c24766ad1cf1b4edaa5a50": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "1e9f0f242e2649d284fcfe805efef50b": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_afbb11a6437b4ac885532a274310772e", - "_dom_classes": [], - "description": "Downloading: 100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 437998572, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 437998572, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_dc8dceabae6b499d8978469269946f4b" - } - }, - "b17aa131e1b24942815d1fa3412b6fc4": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_2fcb76bc3e9c41758067530b8523b97f", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 438M/438M [00:07<00:00, 62.4MB/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_efd7ee1d9d3b42db81d1e6153e614810" - } - }, - "afbb11a6437b4ac885532a274310772e": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "dc8dceabae6b499d8978469269946f4b": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "2fcb76bc3e9c41758067530b8523b97f": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "DescriptionStyleModel", - "description_width": "", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "_model_module": "@jupyter-widgets/controls" - } - }, - "efd7ee1d9d3b42db81d1e6153e614810": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - } - } - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "1OjKDSQdl1DU" - }, - "source": [ - "# Install Vector Hub\n", - "Feel free to contact the vector hub dev team at dev@vctr.ai for support" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "jWvYdKHZsAbW" - }, - "source": [ - "# #for installing vectorhub from source\n", - "# %%capture\n", - "# !git clone https://github.com/vector-ai/vectorhub\n", - "\n", - "# %cd vectorhub\n", - "# !pip install -e .[all]" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "fOYg9Hhgy9uM" - }, - "source": [ - "%%capture\n", - "!pip install vectorhub[all]" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "Wl-c-nfD_mxH", - "outputId": "5f932e5c-562a-427c-88dc-0ceca7dcc13b", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 35 - } - }, - "source": [ - "import vectorhub\n", - "vectorhub.__version__" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "application/vnd.google.colaboratory.intrinsic+json": { - "type": "string" - }, - "text/plain": [ - "'1.0.5'" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 4 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2cF3fOdPj9qU" - }, - "source": [ - "# Text2Vec - Turn Text to Vectors" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SOyrTKAXkDkH" - }, - "source": [ - "## TFHub Models\n", - "!pip install vectorhub[encoders-text-tfhub]" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qfq5gOkpkGtq" - }, - "source": [ - "### BERT2Vec\n", - "Bidirectional Encoder Representations from Transformers" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "f0NQEzXJkJ6Y", - "outputId": "560f297e-0d29-437a-e90f-96167f42b84b", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.text.tfhub import Bert2Vec\n", - "model = Bert2Vec()\n", - "vector = model.encode(\"I enjoy taking long walks along the beach with my dog.\")\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Using /tmp/tfhub_modules to cache modules.\n", - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/2'.\n", - "INFO:absl:Downloaded https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/2, Total size: 1.26GB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/2'.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(1024,\n", - " [-0.032492902129888535,\n", - " -0.02965797670185566,\n", - " 0.03366885706782341,\n", - " -0.03354906663298607,\n", - " -0.03365884721279144,\n", - " 0.011130149476230145,\n", - " -0.03366675600409508,\n", - " 0.03364669159054756,\n", - " 0.033499013632535934,\n", - " -0.03342165797948837])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 5 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ewWbLuoBkU02" - }, - "source": [ - "### Albert2Vec\n", - "A Lite Bert" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "zLslSstkjVdc", - "outputId": "c7d15239-188d-45bc-8156-4fab7e543529", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.text.tfhub import Albert2Vec\n", - "model = Albert2Vec()\n", - "vector = model.encode(\"I enjoy taking long walks along the beach with my dog.\")\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/tensorflow/albert_en_base/1'.\n", - "INFO:absl:Downloaded https://tfhub.dev/tensorflow/albert_en_base/1, Total size: 48.10MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/tensorflow/albert_en_base/1'.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(768,\n", - " [0.03405428305268288,\n", - " -0.03524768725037575,\n", - " 0.031237035989761353,\n", - " -0.03513311222195625,\n", - " -0.02884356677532196,\n", - " -0.03916792944073677,\n", - " 0.03403925895690918,\n", - " -0.03415806218981743,\n", - " 0.03527441993355751,\n", - " -0.04166470468044281])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 6 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "dk2blUVakwtx" - }, - "source": [ - "### LaBSE2Vec\n", - "Language-agnostic BERT Sentence Embedding" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "oJ_D7lFyjl7j", - "outputId": "012f49ce-25c6-4ede-ecb8-e53c2b7f2356", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.text.tfhub import LaBSE2Vec\n", - "model = LaBSE2Vec()\n", - "vector = model.encode(\"I enjoy taking long walks along the beach with my dog.\")\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/LaBSE/1'.\n", - "INFO:absl:Downloading https://tfhub.dev/google/LaBSE/1: 1.16GB\n", - "INFO:absl:Downloaded https://tfhub.dev/google/LaBSE/1, Total size: 1.76GB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/LaBSE/1'.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(768,\n", - " [0.027167297899723053,\n", - " -0.005929017439484596,\n", - " 0.019955776631832123,\n", - " -0.06078488379716873,\n", - " -0.026142442598938942,\n", - " 0.03975269943475723,\n", - " 0.029897591099143028,\n", - " -0.04460829496383667,\n", - " -0.037575941532850266,\n", - " 0.058584559708833694])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 7 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "W4zQF22zlC2k" - }, - "source": [ - "### USE2Vec\n", - "Universal Sentence Encoder" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "yDX_7qfllCPe", - "outputId": "1bf9074a-6596-43df-dbfb-9f2085480960", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.text.tfhub import USE2Vec\n", - "model = USE2Vec()\n", - "vector = model.encode(\"I enjoy taking long walks along the beach with my dog.\")\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/universal-sentence-encoder/4'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/universal-sentence-encoder/4, Total size: 987.47MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/universal-sentence-encoder/4'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:5 out of the last 5 calls to .restored_function_body at 0x7faf9317cd90> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:5 out of the last 5 calls to .restored_function_body at 0x7faf9317cd90> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(512,\n", - " [-0.021693523973226547,\n", - " -0.003992983140051365,\n", - " 0.06716524064540863,\n", - " -0.028918830677866936,\n", - " -0.008241272531449795,\n", - " 0.03175508603453636,\n", - " -0.09062724560499191,\n", - " -0.018945898860692978,\n", - " 0.0801311507821083,\n", - " 0.0006432731752283871])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 8 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "BVIipsAqlPva" - }, - "source": [ - "### USEMulti2Vec\n", - "Universal Sentence Encoder Multilingual" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "rfQQl6JjlOgt", - "outputId": "e3ce3898-f8bf-4d0f-e362-18f557ed7a6f", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.text.tfhub import USEMulti2Vec\n", - "model = USEMulti2Vec()\n", - "vector = model.encode(\"I enjoy taking long walks along the beach with my dog.\")\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/universal-sentence-encoder-multilingual/3'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/universal-sentence-encoder-multilingual/3, Total size: 266.88MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/universal-sentence-encoder-multilingual/3'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:6 out of the last 6 calls to .restored_function_body at 0x7fad8bfe5400> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:6 out of the last 6 calls to .restored_function_body at 0x7fad8bfe5400> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:7 out of the last 7 calls to .restored_function_body at 0x7fad8bee7268> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:7 out of the last 7 calls to .restored_function_body at 0x7fad8bee7268> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:8 out of the last 8 calls to .restored_function_body at 0x7faf955247b8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:8 out of the last 8 calls to .restored_function_body at 0x7faf955247b8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(512,\n", - " [-0.0055238655768334866,\n", - " 0.0059953066520392895,\n", - " -0.014805448241531849,\n", - " 0.010989478789269924,\n", - " -0.09355521947145462,\n", - " -0.08445048332214355,\n", - " -0.028026795014739037,\n", - " -0.05219321325421333,\n", - " -0.0675998106598854,\n", - " 0.031273119151592255])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 9 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "QVVsLapQqst8" - }, - "source": [ - "### USELite2Vec (Requires disabling tf2)\n", - "Universal Sentence Encoder Lite (uncomment to run, the rest of the code might not work though)" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "98pIIjC3p3CW" - }, - "source": [ - "# from vectorhub.encoders.text.tfhub import USELite2Vec\n", - "# model = USELite2Vec()\n", - "# model.init()\n", - "# vector = model.encode(\"I enjoy taking long walks along the beach with my dog.\")\n", - "# len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "tLx8z-i-ru94" - }, - "source": [ - "## Transformers Models\n", - "NLP Models made by hugging face\n", - "\n", - "For Pytorch\n", - "\n", - "!pip install vectorhub[encoders-text-torch-transformers]\n", - "\n", - "For Tensorflow\n", - "\n", - "!pip install vectorhub[encoders-text-tf-transformers]" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "1Ihm8m40wYOT" - }, - "source": [ - "### PyTorch Transformers Bert" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "9iZtNarertqO", - "outputId": "9671b995-8e34-4a01-c111-1ffee3b2d9af", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 439, - "referenced_widgets": [ - "10a7a326a9d24d72a0e912a02b07dc7b", - "e8d18ff4879647288637fe05bcfcd326", - "8c7b1b6de51640f2baccfb0c6613c3f6", - "f74d902ee7914d9e92e3ee1499b95f9b", - "eba2ff9d10a849a4bd67ac768d39eeda", - "3a6c1bb3c8334654963c2cde9ae78e86", - "12e86dc0691c4c4f8110069ebccacbd8", - "e62e753741ac499cb50597d0cb14136b", - "11043cb2bde147fba0bab6ceef446a35", - "6aed205579554f818283128a75b630b6", - "5093f3fca112492f815574f75a059afc", - "f9f33ccfcb254fccadf70eb33a9fb244", - "ff36308704064ae7a41464bc4c407e65", - "532600a5a4f446de8af1f8dbe503caa2", - "4bfc0f8acb2f4c068561c743eae5e67e", - "a8d1f129e80b411aabae0dae733584ca", - "59f65145880f4c3ea975643978c1544a", - "6119357bc98245bab1a9f8b7724e97b5", - "c2678746f9a04e0fa1968833318ff25e", - "7f85413cfdc4424a9fb18f4d279f8dc6", - "e9d93263cf984496b52cf2280c85bdc4", - "bff44fc5c6194fa19343cda7bfea99ad", - "04d0c10e9f2644dd99b2e484b80621ce", - "a4c3f97532d24f349af8f145c997ab4d" - ] - } - }, - "source": [ - "from vectorhub.encoders.text.torch_transformers import Transformer2Vec\n", - "model = Transformer2Vec('bert-base-uncased')\n", - "vector = model.encode(\"I enjoy taking long walks along the beach with my dog.\")\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "display_data", - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "10a7a326a9d24d72a0e912a02b07dc7b", - "version_minor": 0, - "version_major": 2 - }, - "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=433.0, style=ProgressStyle(description_…" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "11043cb2bde147fba0bab6ceef446a35", - "version_minor": 0, - "version_major": 2 - }, - "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=440473133.0, style=ProgressStyle(descri…" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "59f65145880f4c3ea975643978c1544a", - "version_minor": 0, - "version_major": 2 - }, - "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=231508.0, style=ProgressStyle(descripti…" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Transformer2Vec uses the AutoModel to allow for easier models.\n", - "Therefore, not all models will worked but most do. Call the list of tested transformer models using list_tested_models.\n", - "WARNING:tensorflow:9 out of the last 9 calls to .restored_function_body at 0x7faf0c6ee9d8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:9 out of the last 9 calls to .restored_function_body at 0x7faf0c6ee9d8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(768,\n", - " [0.4650784134864807,\n", - " 0.11635451763868332,\n", - " 0.08455784618854523,\n", - " 0.17221440374851227,\n", - " 0.12946262955665588,\n", - " -0.4637109637260437,\n", - " -0.14737677574157715,\n", - " 0.7618239521980286,\n", - " -0.2946079671382904,\n", - " 0.1236734613776207])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 11 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hjPfQtFdwbSr" - }, - "source": [ - "### Tensorflow Transformers Bert" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "FOuZzgdOrtt2", - "outputId": "95b01751-f1c5-436b-a267-a460d090384d", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 358, - "referenced_widgets": [ - "27b4458473054050a1997b160d502d8d", - "689844b256034e14a6ea9aaaeaad34b4", - "fd428e37db8741afaa5a9805dcd29e5d", - "31f65c6982954df8a00f2ff17872e5cb", - "c7668fa4508849c7bef7e15165f89826", - "728a6cd994d34503890f634cd4670c56", - "a3ca4abf8e684658b1f1ed9c1dfa8f03", - "ad049ccdefd74201ae674782a16cbadc" - ] - } - }, - "source": [ - "from vectorhub.encoders.text.tf_transformers import TFTransformer2Vec\n", - "model = TFTransformer2Vec('bert-base-uncased')\n", - "vector = model.encode(\"I enjoy taking long walks along the beach with my dog.\")\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "display_data", - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "27b4458473054050a1997b160d502d8d", - "version_minor": 0, - "version_major": 2 - }, - "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=536063208.0, style=ProgressStyle(descri…" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "Some layers from the model checkpoint at bert-base-uncased were not used when initializing TFBertModel: ['nsp___cls', 'mlm___cls']\n", - "- This IS expected if you are initializing TFBertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).\n", - "- This IS NOT expected if you are initializing TFBertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", - "All the layers of TFBertModel were initialized from the model checkpoint at bert-base-uncased.\n", - "If your task is similar to the task the model of the checkpoint was trained on, you can already use TFBertModel for predictions without further training.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(768,\n", - " [0.4650786519050598,\n", - " 0.11635440587997437,\n", - " 0.08455801010131836,\n", - " 0.17221422493457794,\n", - " 0.12946268916130066,\n", - " -0.463710755109787,\n", - " -0.1473766714334488,\n", - " 0.7618241906166077,\n", - " -0.29460790753364563,\n", - " 0.12367347627878189])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 12 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "EteWJa7AspHW" - }, - "source": [ - "## Sentence Transformers Models\n", - "NLP Models made by UKPLab\n", - "\n", - "!pip install vectorhub[encoders-text-sentence-transformers]" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SeF_PuWWtXfD" - }, - "source": [ - "### XLM DistilRoberta \n", - "Trained on Paraphrase data" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "rR76yNN9szQY", - "outputId": "c3e2a836-e811-4ab0-feac-fd7e9c962144", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.text.sentence_transformers import SentenceTransformer2Vec\n", - "\n", - "model = SentenceTransformer2Vec('xlm-r-distilroberta-base-paraphrase-v1')\n", - "vector = model.encode(\"I enjoy taking long walks along the beach with my dog.\")\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(768,\n", - " [0.031084515154361725,\n", - " 0.6847583055496216,\n", - " 0.11592256277799606,\n", - " 0.273422509431839,\n", - " -0.15308502316474915,\n", - " 0.3989627957344055,\n", - " 0.008972018957138062,\n", - " -0.23263610899448395,\n", - " 0.07519139349460602,\n", - " 0.2651992440223694])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 13 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "851pmkW6tfV_" - }, - "source": [ - "### Roberta Large\n", - "Trained on Semantic Textual Similarity" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "AxwAeV9hszio", - "outputId": "11207992-b957-4cc8-fcc9-1bc1188e59ac", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.text.sentence_transformers import SentenceTransformer2Vec\n", - "\n", - "model = SentenceTransformer2Vec('roberta-large-nli-stsb-mean-tokens')\n", - "vector = model.encode(\"I enjoy taking long walks along the beach with my dog.\")\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(1024,\n", - " [-1.050469160079956,\n", - " -0.3698076605796814,\n", - " -1.1695324182510376,\n", - " 0.5883575081825256,\n", - " 0.6294163465499878,\n", - " -0.5596891641616821,\n", - " -0.1414034068584442,\n", - " 0.5867344737052917,\n", - " -1.0417426824569702,\n", - " 0.5338972210884094])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 14 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "AGaoMs8_xsH-" - }, - "source": [ - "# Image2Vec - Turn Image to Vectors" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Eel8tptfx0F0" - }, - "source": [ - "## TFHub Models\n", - "!pip install vectorhub[encoders-image-tfhub]" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "3fEuiI8jx0LX" - }, - "source": [ - "### BIT2Vec\n", - "BiT - Big Transfer, General Visual Representation Learning" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "ppo-2-8HxqcL", - "outputId": "036edb11-6c9f-4a49-f911-e7539518ef17", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.image.tfhub import BitSmall2Vec\n", - "model = BitSmall2Vec()\n", - "sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/bit/s-r50x1/1'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/bit/s-r50x1/1, Total size: 99.69MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/bit/s-r50x1/1'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:10 out of the last 10 calls to .restored_function_body at 0x7fad4291c6a8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:10 out of the last 10 calls to .restored_function_body at 0x7fad4291c6a8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(2048,\n", - " [0.2968096137046814,\n", - " 0.04607969522476196,\n", - " 0.01748301088809967,\n", - " 0.0,\n", - " 0.13056597113609314,\n", - " 1.5501508712768555,\n", - " 0.0008884340641088784,\n", - " 0.623866617679596,\n", - " 0.17756488919258118,\n", - " 0.1549355536699295])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 15 - } - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "QH9uutquyq3K", - "outputId": "b79899b8-d045-4edc-c0fd-67f9cb9276d5", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.image.tfhub import BitMedium2Vec\n", - "model = BitMedium2Vec()\n", - "sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/bit/m-r50x1/1'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/bit/m-r50x1/1, Total size: 99.69MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/bit/m-r50x1/1'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad30400d90> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad30400d90> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(2048,\n", - " [0.30790072679519653,\n", - " 0.0,\n", - " 0.7188968658447266,\n", - " 0.07674463093280792,\n", - " 0.13097591698169708,\n", - " 1.5629138946533203,\n", - " 0.9586008787155151,\n", - " 0.0,\n", - " 0.0033455684315413237,\n", - " 0.026440966874361038])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 16 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "z7Lh6K4xysan" - }, - "source": [ - "### Inception2Vec\n", - "Inception" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "IWGEffZOysap", - "outputId": "e42a99ac-72b5-4264-a3f7-73e3510b0ad5", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.image.tfhub import InceptionV12Vec\n", - "model = InceptionV12Vec()\n", - "sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/imagenet/inception_v1/feature_vector/4'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/imagenet/inception_v1/feature_vector/4, Total size: 22.81MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/imagenet/inception_v1/feature_vector/4'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad4293c6a8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad4293c6a8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(1024,\n", - " [23.6610050201416,\n", - " 0.33202308416366577,\n", - " 4.330678939819336,\n", - " 29.95806884765625,\n", - " 4.646924018859863,\n", - " 0.0,\n", - " 44.74129104614258,\n", - " 2.7335867881774902,\n", - " 11.655862808227539,\n", - " 0.007075734436511993])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 17 - } - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "WuTr3vv1ysD-", - "outputId": "8f0434ef-62a0-4e82-a8cf-9f4a57d5795d", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.image.tfhub import InceptionV22Vec\n", - "model = InceptionV22Vec()\n", - "sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/imagenet/inception_v2/feature_vector/4'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/imagenet/inception_v2/feature_vector/4, Total size: 40.51MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/imagenet/inception_v2/feature_vector/4'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad302b3730> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad302b3730> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(1024,\n", - " [0.0009709870209917426,\n", - " 0.0,\n", - " 2.7877917289733887,\n", - " 23.551044464111328,\n", - " 0.0,\n", - " 57.676780700683594,\n", - " 23.372207641601562,\n", - " 27.082447052001953,\n", - " 1.4271756410598755,\n", - " 4.103132247924805])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 18 - } - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "Ffa-U63XysKS", - "outputId": "cca5b52d-acce-4875-db6f-33787a78fddf", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.image.tfhub import InceptionV32Vec\n", - "model = InceptionV32Vec()\n", - "sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/4'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/imagenet/inception_v3/feature_vector/4, Total size: 85.49MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/4'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad303642f0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad303642f0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(2048,\n", - " [0.7830013036727905,\n", - " 5.030923366546631,\n", - " 7.418567180633545,\n", - " 50.96066665649414,\n", - " 46.93928146362305,\n", - " 19.61231231689453,\n", - " 5.568574905395508,\n", - " 32.246246337890625,\n", - " 7.484476566314697,\n", - " 21.118955612182617])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 19 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "fC4kpr6gzxf2" - }, - "source": [ - "### Resnet2Vec\n", - "Resnet" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "2Iwf-oPgzxf4", - "outputId": "d8a5ea53-ae76-4a78-ba9d-b7ee5a7c015b", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.image.tfhub import ResnetV12Vec\n", - "model = ResnetV12Vec()\n", - "sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/imagenet/resnet_v1_50/feature_vector/4'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/imagenet/resnet_v1_50/feature_vector/4, Total size: 91.36MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/imagenet/resnet_v1_50/feature_vector/4'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad2ebcc1e0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad2ebcc1e0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(2048,\n", - " [0.0,\n", - " 0.0,\n", - " 44.77119827270508,\n", - " 2.9527504444122314,\n", - " 0.0,\n", - " 3.1913535594940186,\n", - " 0.2675448954105377,\n", - " 31.209884643554688,\n", - " 0.0,\n", - " 0.06867393106222153])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 20 - } - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "GgxYSsBlz9Hp", - "outputId": "1a665832-9b7b-403f-e7e0-ad1682741089", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.image.tfhub import ResnetV22Vec\n", - "model = ResnetV22Vec()\n", - "sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/4'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/4, Total size: 91.35MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/4'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad2e0e76a8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad2e0e76a8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(2048,\n", - " [15.045275688171387,\n", - " 4.418089866638184,\n", - " 7.827340602874756,\n", - " 77.42793273925781,\n", - " 20.45443344116211,\n", - " 0.0009433465311303735,\n", - " 12.800081253051758,\n", - " 0.0,\n", - " 6.376217842102051,\n", - " 0.0])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 21 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "GH-vWB9zz4v1" - }, - "source": [ - "### InceptionResnet2Vec\n", - "Inception Resnet" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "KtdTT1k2z4v1", - "outputId": "6515e90d-4991-4124-a136-6d55f45f3f25", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.image.tfhub import InceptionResnet2Vec\n", - "model = InceptionResnet2Vec()\n", - "sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4, Total size: 213.55MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad3039c488> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad3039c488> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(1536,\n", - " [1.4016010761260986,\n", - " 0.5517287850379944,\n", - " 11.911657333374023,\n", - " 380.9639587402344,\n", - " 26.46406364440918,\n", - " 33.08391189575195,\n", - " 345.525146484375,\n", - " 0.7065975666046143,\n", - " 135.16529846191406,\n", - " 107.48884582519531])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 22 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Z4fiXY2r0GCE" - }, - "source": [ - "### MobileNet2Vec\n", - "MobileNet" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "iZ9m22zm0GCF", - "outputId": "b385467e-4709-4e8f-a763-b24a3a9a3a7a", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.image.tfhub import MobileNetV12Vec\n", - "model = MobileNetV12Vec('https://tfhub.dev/google/imagenet/mobilenet_v1_025_224/feature_vector/4')\n", - "sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_224/feature_vector/4'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/imagenet/mobilenet_v1_025_224/feature_vector/4, Total size: 1.50MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_224/feature_vector/4'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad3378a400> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad3378a400> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(256,\n", - " [1.1166342496871948,\n", - " 5.3948184358887374e-05,\n", - " 0.6680822372436523,\n", - " 0.15086999535560608,\n", - " 0.5920364856719971,\n", - " 0.21992206573486328,\n", - " 1.2151236534118652,\n", - " 0.8825837969779968,\n", - " 0.03866421431303024,\n", - " 5.821349620819092])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 23 - } - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "kV0Kc50U0Liu", - "outputId": "c5a21b51-1edb-44cc-fc0a-efb89f78453a", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.image.tfhub import MobileNetV22Vec\n", - "model = MobileNetV22Vec()\n", - "sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/feature_vector/4'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/feature_vector/4, Total size: 17.94MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/feature_vector/4'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad42cb5268> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad42cb5268> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(1792,\n", - " [0.02413051947951317,\n", - " 1.7939774990081787,\n", - " 0.0,\n", - " 0.38433465361595154,\n", - " 0.012855364009737968,\n", - " 0.0,\n", - " 0.0,\n", - " 1.8257256746292114,\n", - " 0.0,\n", - " 1.8062987327575684])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 24 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "YhrHSHu71-av" - }, - "source": [ - "# Audio2Vec - Turn Audio to Vectors" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "F6Brb36M2Ci_" - }, - "source": [ - "## TFHub Models\n", - "!pip install vectorhub[encoders-audio-tfhub]" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "wsAD7g-S2GNm" - }, - "source": [ - "### SpeechEmbedding2Vec\n", - "Speech Embedding" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "OJ9YCCau0NNS", - "outputId": "8dfc6d9a-cd1a-4f73-f534-4c85bb8f880a", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.audio.tfhub import SpeechEmbedding2Vec\n", - "model = SpeechEmbedding2Vec()\n", - "sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/speech_embedding/1'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/speech_embedding/1, Total size: 2.66MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/speech_embedding/1'.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(96,\n", - " [-1.626917839050293,\n", - " 13.96774959564209,\n", - " 5.01875114440918,\n", - " 7.006755828857422,\n", - " 10.417956352233887,\n", - " 25.99224090576172,\n", - " 14.100224494934082,\n", - " 4.132189750671387,\n", - " 6.522117614746094,\n", - " 20.945070266723633])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 25 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "KKKsWl3T2dM7" - }, - "source": [ - "### Trill2Vec\n", - "Triplet Loss Network" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "s77eflZ12dM7", - "outputId": "c04f31b6-b28d-4158-e8b0-d656ed367eed", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.audio.tfhub import Trill2Vec\n", - "model = Trill2Vec()\n", - "sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill/3'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/nonsemantic-speech-benchmark/trill/3, Total size: 94.15MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill/3'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad31200d08> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad31200d08> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad31200378> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad31200378> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(512,\n", - " [-0.08206441253423691,\n", - " 0.003198703285306692,\n", - " 0.023471159860491753,\n", - " 0.03290205076336861,\n", - " 0.026588117703795433,\n", - " 0.04537850618362427,\n", - " 0.007081276271492243,\n", - " 0.03602985292673111,\n", - " -0.006469187792390585,\n", - " 0.026163343340158463])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 26 - } - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "aE7zzZqy2i_L", - "outputId": "8614a4cf-9e2a-4587-be8b-eeeef9e096c2", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.audio.tfhub import TrillDistilled2Vec\n", - "model = TrillDistilled2Vec()\n", - "sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill-distilled/3'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/nonsemantic-speech-benchmark/trill-distilled/3, Total size: 198.74MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill-distilled/3'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad2e2e4a60> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad2e2e4a60> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad43536268> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad43536268> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad2e2e40d0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad2e2e40d0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(2048,\n", - " [0.11910650879144669,\n", - " 0.22943904995918274,\n", - " 0.12266576290130615,\n", - " 0.19800463318824768,\n", - " 0.20083436369895935,\n", - " 0.21807503700256348,\n", - " 0.34325283765792847,\n", - " 0.16280651092529297,\n", - " 0.16411736607551575,\n", - " 0.1719944328069687])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 27 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "n3sLZdSF2dmF" - }, - "source": [ - "### Vggish2Vec\n", - "Vggish" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "AJJYj3Ar2dmF", - "outputId": "6da0ab79-82c8-4c58-9d72-1a22a946cb12", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.audio.tfhub import Vggish2Vec\n", - "model = Vggish2Vec()\n", - "sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/vggish/1'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/vggish/1, Total size: 275.33MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/vggish/1'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad2e2e4f28> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad2e2e4f28> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad43536400> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad43536400> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(128,\n", - " [1.0297094583511353,\n", - " -0.33734169602394104,\n", - " 0.14990638196468353,\n", - " -0.43253546953201294,\n", - " -0.02132938802242279,\n", - " -0.2508460581302643,\n", - " -0.5012649297714233,\n", - " -0.15058597922325134,\n", - " -0.5810864567756653,\n", - " -0.21230646967887878])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 28 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hJaz-wJQ4CY4" - }, - "source": [ - "### Yamnet2Vec\n", - "Yamnet" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "UzR1aUP64CY5", - "outputId": "56f149ff-f6fa-4050-ad0f-8cdd0e28c2c8", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.encoders.audio.tfhub import Yamnet2Vec\n", - "model = Yamnet2Vec()\n", - "sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/yamnet/1'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/yamnet/1, Total size: 17.43MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/yamnet/1'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad3088bb70> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad3088bb70> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(1024,\n", - " [0.017622822895646095,\n", - " 0.013449881225824356,\n", - " 0.09929609298706055,\n", - " 0.008586863987147808,\n", - " 0.0,\n", - " 0.0,\n", - " 0.010829833336174488,\n", - " 0.05098574236035347,\n", - " 0.031329721212387085,\n", - " 0.03376280516386032])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 29 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "O65zP25r4Yih" - }, - "source": [ - "## Pytorch Fairseq Models\n", - "!pip install vectorhub[encoders-audio-pytorch]" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "PHPBhmAg4kPq" - }, - "source": [ - "### Wav2Vec\n", - "Wav2Vec 2.0" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "di_lTXIZ4emd", - "outputId": "452bedb2-f7b2-4ba6-c55e-76d1be976a18", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000, - "referenced_widgets": [ - "6c0756b756814056a98b6fea42067eb7", - "6aad6086cec746ecb35e3ee9686e9ecb", - "00deaf3ae9d14416a0c1c3af2b4ed892", - "b46e2e27e6da427bba35d37fa07c56cb", - "69cda9906f0b44e7bf077de9069198b1", - "09142704d53248938668d64d44920f88", - "a8599cb5c4974d6799d72435337ac40e", - "faf957de7821413c94538361daf1b155" - ] - } - }, - "source": [ - "from vectorhub.encoders.audio.pytorch import Wav2Vec\n", - "model = Wav2Vec()\n", - "sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav')\n", - "vector = model.encode(sample)\n", - "len(vector), vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "Downloading: \"https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small.pt\" to /root/.cache/torch/hub/checkpoints/wav2vec_small.pt\n" - ], - "name": "stderr" - }, - { - "output_type": "display_data", - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "6c0756b756814056a98b6fea42067eb7", - "version_minor": 0, - "version_major": 2 - }, - "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, max=950500491.0), HTML(value='')))" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Wav2VecModel(\n", - " (feature_extractor): ConvFeatureExtractionModel(\n", - " (conv_layers): ModuleList(\n", - " (0): Sequential(\n", - " (0): Conv1d(1, 512, kernel_size=(10,), stride=(5,), bias=False)\n", - " (1): Dropout(p=0.0, inplace=False)\n", - " (2): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (3): ReLU()\n", - " )\n", - " (1): Sequential(\n", - " (0): Conv1d(512, 512, kernel_size=(3,), stride=(2,), bias=False)\n", - " (1): Dropout(p=0.0, inplace=False)\n", - " (2): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (3): ReLU()\n", - " )\n", - " (2): Sequential(\n", - " (0): Conv1d(512, 512, kernel_size=(3,), stride=(2,), bias=False)\n", - " (1): Dropout(p=0.0, inplace=False)\n", - " (2): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (3): ReLU()\n", - " )\n", - " (3): Sequential(\n", - " (0): Conv1d(512, 512, kernel_size=(3,), stride=(2,), bias=False)\n", - " (1): Dropout(p=0.0, inplace=False)\n", - " (2): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (3): ReLU()\n", - " )\n", - " (4): Sequential(\n", - " (0): Conv1d(512, 512, kernel_size=(3,), stride=(2,), bias=False)\n", - " (1): Dropout(p=0.0, inplace=False)\n", - " (2): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (3): ReLU()\n", - " )\n", - " (5): Sequential(\n", - " (0): Conv1d(512, 512, kernel_size=(2,), stride=(2,), bias=False)\n", - " (1): Dropout(p=0.0, inplace=False)\n", - " (2): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (3): ReLU()\n", - " )\n", - " (6): Sequential(\n", - " (0): Conv1d(512, 512, kernel_size=(2,), stride=(2,), bias=False)\n", - " (1): Dropout(p=0.0, inplace=False)\n", - " (2): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (3): ReLU()\n", - " )\n", - " )\n", - " )\n", - " (feature_aggregator): ConvAggegator(\n", - " (conv_layers): Sequential(\n", - " (0): Sequential(\n", - " (0): ReplicationPad1d((2, 0))\n", - " (1): Conv1d(512, 512, kernel_size=(3,), stride=(1,))\n", - " (2): Dropout(p=0.1, inplace=False)\n", - " (3): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (4): ReLU()\n", - " )\n", - " (1): Sequential(\n", - " (0): ReplicationPad1d((2, 0))\n", - " (1): Conv1d(512, 512, kernel_size=(3,), stride=(1,))\n", - " (2): Dropout(p=0.1, inplace=False)\n", - " (3): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (4): ReLU()\n", - " )\n", - " (2): Sequential(\n", - " (0): ReplicationPad1d((2, 0))\n", - " (1): Conv1d(512, 512, kernel_size=(3,), stride=(1,))\n", - " (2): Dropout(p=0.1, inplace=False)\n", - " (3): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (4): ReLU()\n", - " )\n", - " (3): Sequential(\n", - " (0): ReplicationPad1d((2, 0))\n", - " (1): Conv1d(512, 512, kernel_size=(3,), stride=(1,))\n", - " (2): Dropout(p=0.1, inplace=False)\n", - " (3): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (4): ReLU()\n", - " )\n", - " (4): Sequential(\n", - " (0): ReplicationPad1d((2, 0))\n", - " (1): Conv1d(512, 512, kernel_size=(3,), stride=(1,))\n", - " (2): Dropout(p=0.1, inplace=False)\n", - " (3): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (4): ReLU()\n", - " )\n", - " (5): Sequential(\n", - " (0): ReplicationPad1d((2, 0))\n", - " (1): Conv1d(512, 512, kernel_size=(3,), stride=(1,))\n", - " (2): Dropout(p=0.1, inplace=False)\n", - " (3): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (4): ReLU()\n", - " )\n", - " (6): Sequential(\n", - " (0): ReplicationPad1d((2, 0))\n", - " (1): Conv1d(512, 512, kernel_size=(3,), stride=(1,))\n", - " (2): Dropout(p=0.1, inplace=False)\n", - " (3): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (4): ReLU()\n", - " )\n", - " (7): Sequential(\n", - " (0): ReplicationPad1d((2, 0))\n", - " (1): Conv1d(512, 512, kernel_size=(3,), stride=(1,))\n", - " (2): Dropout(p=0.1, inplace=False)\n", - " (3): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (4): ReLU()\n", - " )\n", - " (8): Sequential(\n", - " (0): ReplicationPad1d((2, 0))\n", - " (1): Conv1d(512, 512, kernel_size=(3,), stride=(1,))\n", - " (2): Dropout(p=0.1, inplace=False)\n", - " (3): Fp32GroupNorm(1, 512, eps=1e-05, affine=True)\n", - " (4): ReLU()\n", - " )\n", - " )\n", - " (residual_proj): ModuleList(\n", - " (0): None\n", - " (1): None\n", - " (2): None\n", - " (3): None\n", - " (4): None\n", - " (5): None\n", - " (6): None\n", - " (7): None\n", - " (8): None\n", - " )\n", - " )\n", - " (wav2vec_predictions): Wav2VecPredictionsModel(\n", - " (project_to_steps): ConvTranspose2d(512, 512, kernel_size=(1, 12), stride=(1, 1))\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (dropout_feats): Dropout(p=0.1, inplace=False)\n", - " (dropout_agg): Dropout(p=0.0, inplace=False)\n", - ")\n" - ], - "name": "stdout" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(512,\n", - " [0.43996002820388574,\n", - " 0.0009310347277943681,\n", - " 0.340938310394427,\n", - " 0.12880457228293823,\n", - " 0.3568754025916683,\n", - " 0.3878720904723173,\n", - " 0.16845630043720053,\n", - " 0.1152303839076123,\n", - " 0.3012702952198707,\n", - " 0.36476635736436014])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 30 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "vI2baQYw4whr" - }, - "source": [ - "# TextText2Vec - Turn 2 Different Type of Text to vectors. E.g. Question Answering" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "YaVKAIc3489m" - }, - "source": [ - "## QA TFHub Models\n", - "!pip install vectorhub[encoders-text-tfhub]" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "VVnZDk4B5iYr" - }, - "source": [ - "### USEQA2Vec\n", - "Unviersal Sentence Encoder Question Answering" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "7dTrLZFR5o52", - "outputId": "8fb4494a-83e5-4d2d-9731-06454b7489d9", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.bi_encoders.text_text.tfhub import USEQA2Vec\n", - "model = USEQA2Vec()\n", - "q_vector = model.encode_question('How is the weather today?')\n", - "a_vector = model.encode_answer('The weather is great today.')\n", - "len(q_vector), q_vector[:10], len(a_vector), a_vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/universal-sentence-encoder-qa/3'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/universal-sentence-encoder-qa/3, Total size: 588.94MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/universal-sentence-encoder-qa/3'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad4344fae8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad4344fae8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(512,\n", - " [0.016045808792114258,\n", - " -0.043040379881858826,\n", - " 0.07887386530637741,\n", - " 0.04960925877094269,\n", - " -0.03173493966460228,\n", - " 0.013906203210353851,\n", - " 0.03847653046250343,\n", - " 0.03160930797457695,\n", - " -0.04524993896484375,\n", - " 0.030441774055361748],\n", - " 512,\n", - " [0.022999223321676254,\n", - " -0.03275342658162117,\n", - " 0.07618879526853561,\n", - " 0.04189026355743408,\n", - " 0.05775227025151253,\n", - " 0.03411644324660301,\n", - " 0.035125598311424255,\n", - " 0.025239063426852226,\n", - " -0.04660375043749809,\n", - " -0.03390398994088173])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 31 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "YVHdtqZG5rZd" - }, - "source": [ - "### USEMultieQA2Vec\n", - "Unviersal Sentence Encoder Mulitlingual Question Answering" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "DEEAsNNx43Pn", - "outputId": "7235204b-3b0a-4437-b493-3cdccd3fcdce", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.bi_encoders.text_text.tfhub import USEMultiQA2Vec\n", - "model = USEMultiQA2Vec()\n", - "q_vector = model.encode_question('How is the weather today?')\n", - "a_vector = model.encode_answer('The weather is great today.')\n", - "len(q_vector), q_vector[:10], len(a_vector), a_vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3, Total size: 348.55MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad45d3c840> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad45d3c840> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad4344f158> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad4344f158> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(512,\n", - " [0.04586700722575188,\n", - " -0.04745269939303398,\n", - " -0.036448970437049866,\n", - " -0.10308169573545456,\n", - " -0.08364672213792801,\n", - " 0.06672786176204681,\n", - " -0.047646693885326385,\n", - " 0.058453988283872604,\n", - " 0.018309807404875755,\n", - " -0.07734357565641403],\n", - " 512,\n", - " [0.02722945623099804,\n", - " -0.01223964523524046,\n", - " -0.05937158688902855,\n", - " 0.02729908563196659,\n", - " -0.05680926516652107,\n", - " 0.02130836248397827,\n", - " -0.06708989292383194,\n", - " 0.043317489326000214,\n", - " 0.0029923515394330025,\n", - " -0.0535140223801136])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 32 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "vvuiYUCh_glf" - }, - "source": [ - "### LAReQA2Vec\n", - "Language-agnostic Question Answering model for retrieving answers from a multilingual candidate pool" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "-ZU4VYPG5L31", - "outputId": "7d1966d8-d0a7-46d0-b2ee-cff5b3e7e9cd", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.bi_encoders.text_text.tfhub import LAReQA2Vec\n", - "model = LAReQA2Vec()\n", - "q_vector = model.encode_question('How is the weather today?')\n", - "a_vector = model.encode_answer('The weather is great today.')\n", - "len(q_vector), q_vector[:10], len(a_vector), a_vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:absl:Downloading TF-Hub Module 'https://tfhub.dev/google/LAReQA/mBERT_En_En/1'.\n", - "INFO:absl:Downloaded https://tfhub.dev/google/LAReQA/mBERT_En_En/1, Total size: 681.40MB\n", - "INFO:absl:Downloaded TF-Hub Module 'https://tfhub.dev/google/LAReQA/mBERT_En_En/1'.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad8b11e9d8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "WARNING:tensorflow:11 out of the last 11 calls to .restored_function_body at 0x7fad8b11e9d8> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ], - "name": "stderr" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(768,\n", - " [-0.00475115142762661,\n", - " -0.008070685900747776,\n", - " 0.03443632274866104,\n", - " 0.029951388016343117,\n", - " 0.06215948611497879,\n", - " -0.0021374477073550224,\n", - " -0.003773134434595704,\n", - " 0.011240703985095024,\n", - " 0.01533481851220131,\n", - " 0.011144084855914116],\n", - " 768,\n", - " [-0.03146867826581001,\n", - " -0.002026214497163892,\n", - " 0.034710291773080826,\n", - " 0.019098758697509766,\n", - " 0.06205341964960098,\n", - " -0.01791439950466156,\n", - " -0.007370147854089737,\n", - " 0.00870119035243988,\n", - " 0.012205611914396286,\n", - " 0.012876193039119244])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 33 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-zIUEwxg5-n5" - }, - "source": [ - "## Pytorch Transformers" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZYEQfi7K5zz-" - }, - "source": [ - "### DPR2Vec - By Facebook \n", - "Dense Passage Retrieval" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "GFOs1v3Q5zz_", - "outputId": "7efb6ddc-d185-4ed0-cff8-62a85aedc96b", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 734, - "referenced_widgets": [ - "468ffb10cdee4cddb44c4d391e2331d7", - "99a03679273848dc890e7aadde50d358", - "03bb795fb145491ca6585aff703d02bb", - "4f337836c475426ba59049432e3ccfbd", - "0cefb6cc80f846129dae724baf6377ed", - "059caadbf542453cae0cf54b8076fd09", - "504bf105d52f4892bb6fd47934609d55", - "55f3f97c2bd544abb0d51a70de53ead2", - "a3dceaeb4dc24736a71e556ace34e8b7", - "e36128b755d543d7972b5db7a4514252", - "88ae11f8dfa44216bca6b8e99fbf2ff9", - "1089b49caf6d4d7cb39ab4221b510564", - "e059e18a12634bb184cff8d10276ab3b", - "fc48e1fcdd9e40dd8f167c8f6c49c43b", - "686d89d247a6430c885cb8d905abd152", - "78ee3448ea56418ca3a8c923dffa2e69", - "54675ff29cf24d948721d5444fb3991b", - "71023333f39a4ef2a39924d659b3c2dd", - "1913b874f4cc42b59756598ee2b880ee", - "9fdf013a3aea40dd80ba7b63c8c34236", - "cbcb794b99334009b58d11a38eaf30be", - "c1e01112215443908508793186d59a1b", - "90b7271078064895b05e3d131e436478", - "0127827e68c6474e84c8b36c629801ea", - "6bd5b04a04c54b3e9fc243435f45b54f", - "b4d1a3eff5464d55a4ba35fa2b9fa8d2", - "0e2d496b16b14df8980a28ead333beb2", - "40afc45883c5440a8e12b140b86a2acc", - "2ef07b02c6cd41b7addd14e3a59cc469", - "07ff7c7abacc43cbba21b6ca74d35b81", - "8ede83b5635f4983a41d2fdac2f16f47", - "1c109a0a990644f7a9e061e9dbbf99a8", - "1d3e1828b1174221835589f86d2c4435", - "c99999cdf5104c4093bab35a58d74164", - "70358afa3edf4693b243f5e25034a824", - "cbfd90f162e24d5b95c7f2048e732202", - "1950b7a29fe04faa858921f1c1a8d17d", - "ea3f4f587199405e83da54bc225050ef", - "ebcb2b2a0e124960b39e75615efab973", - "7cd24fc74ecf4cb9b1c0093aa8c9e36e", - "3adf17fa083f454b9405ee91b197cf51", - "40729fd817904e1fbf04e66ca3c1a5f3", - "b6561082a18943578cf4f3049a6dc032", - "f8dcfb84ac8e43aa9dcb2456e631c0de", - "5caa05889472498e9de1ab80d5caced3", - "244050e4d61c414a92cd42a2e9e9d866", - "7f4fca8b171044a083c7201c4c20499e", - "78d1423b47b045d1968b30f3d43a6f52", - "da00be1bbbef4e3482e4c89756ecb473", - "ce390e69a3c24766ad1cf1b4edaa5a50", - "1e9f0f242e2649d284fcfe805efef50b", - "b17aa131e1b24942815d1fa3412b6fc4", - "afbb11a6437b4ac885532a274310772e", - "dc8dceabae6b499d8978469269946f4b", - "2fcb76bc3e9c41758067530b8523b97f", - "efd7ee1d9d3b42db81d1e6153e614810" - ] - } - }, - "source": [ - "from vectorhub.bi_encoders.text_text.torch_transformers import DPR2Vec\n", - "model = DPR2Vec()\n", - "q_vector = model.encode_question('How is the weather today?')\n", - "a_vector = model.encode_answer('The weather is great today.')\n", - "len(q_vector), q_vector[:10], len(a_vector), a_vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "display_data", - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "468ffb10cdee4cddb44c4d391e2331d7", - "version_minor": 0, - "version_major": 2 - }, - "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=466062.0, style=ProgressStyle(descripti…" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "a3dceaeb4dc24736a71e556ace34e8b7", - "version_minor": 0, - "version_major": 2 - }, - "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=492.0, style=ProgressStyle(description_…" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "54675ff29cf24d948721d5444fb3991b", - "version_minor": 0, - "version_major": 2 - }, - "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=437983985.0, style=ProgressStyle(descri…" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "6bd5b04a04c54b3e9fc243435f45b54f", - "version_minor": 0, - "version_major": 2 - }, - "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=493.0, style=ProgressStyle(description_…" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "1d3e1828b1174221835589f86d2c4435", - "version_minor": 0, - "version_major": 2 - }, - "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=437986065.0, style=ProgressStyle(descri…" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "3adf17fa083f454b9405ee91b197cf51", - "version_minor": 0, - "version_major": 2 - }, - "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=484.0, style=ProgressStyle(description_…" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "da00be1bbbef4e3482e4c89756ecb473", - "version_minor": 0, - "version_major": 2 - }, - "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=437998572.0, style=ProgressStyle(descri…" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n" - ], - "name": "stdout" - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(768,\n", - " [0.38520655035972595,\n", - " 0.037575557827949524,\n", - " 0.33666062355041504,\n", - " 0.1357431709766388,\n", - " 0.26139581203460693,\n", - " -0.20699873566627502,\n", - " 0.33886364102363586,\n", - " 0.2096884399652481,\n", - " -0.11666563898324966,\n", - " -0.3050388693809509],\n", - " 768,\n", - " [0.12573188543319702,\n", - " 0.09063692390918732,\n", - " 0.22302208840847015,\n", - " -0.22612245380878448,\n", - " 0.00587753439322114,\n", - " -0.40788567066192627,\n", - " 0.6642342805862427,\n", - " 1.0489115715026855,\n", - " -0.6422920823097229,\n", - " -0.9332435131072998])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 34 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gQSGm4_P8xv-" - }, - "source": [ - "## Sentence Transformers" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "CIX1FKd68q4j" - }, - "source": [ - "### DistilRobertaQA2Vec" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "n9Oi7wuF8qcu", - "outputId": "df7ce966-325f-44dd-94e7-1387ad6e83c7", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "source": [ - "from vectorhub.bi_encoders.text_text.sentence_transformers import DistilRobertaQA2Vec\n", - "model = DistilRobertaQA2Vec()\n", - "q_vector = model.encode_question('How is the weather today?')\n", - "a_vector = model.encode_answer('The weather is great today.')\n", - "len(q_vector), q_vector[:10], len(a_vector), a_vector[:10]" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "(768,\n", - " [1.129922866821289,\n", - " 0.5829585194587708,\n", - " 0.17778196930885315,\n", - " -0.09017980098724365,\n", - " 0.3229660987854004,\n", - " -0.8049975633621216,\n", - " 0.3316192030906677,\n", - " -0.3474065065383911,\n", - " 0.5975155234336853,\n", - " 0.2112181931734085],\n", - " 768,\n", - " [0.7734353542327881,\n", - " 0.7911091446876526,\n", - " 0.18971982598304749,\n", - " -0.7204448580741882,\n", - " 0.29347801208496094,\n", - " -0.45650196075439453,\n", - " 0.40947067737579346,\n", - " -0.7629557847976685,\n", - " 1.3929301500320435,\n", - " 0.21254363656044006])" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 35 - } - ] - } - ] -} \ No newline at end of file diff --git a/extra_requirements.json b/extra_requirements.json deleted file mode 100644 index 40961b78..00000000 --- a/extra_requirements.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "numpy": ["core"], - "requests": ["core"], - "PyYAML": ["core"], - "pytest": ["test"], - "document-utils": ["core"], - "sphinx-rtd-theme>=0.5.0": ["test"], - "imageio": ["encoders-image", "encoders-image-tfhub", "clip"], - "scikit-image": ["encoders-image", "encoders-image-tfhub", "clip"], - "soundfile": ["encoders-audio-tfhub"], - "librosa": ["audio-encoder", "encoders-audio-tfhub"], - "tensorflow~=2.4.3": ["encoders-text-tfhub", "encoders-audio-tfhub", "encoders-image-tfhub", "encoders-text-tf-transformers", - "encoders-text-tfhub-windows", "encoders-image-tf-face-detection"], - "tensorflow-hub~=0.12.0": ["encoders-text-tfhub", "encoders-audio-tfhub", "encoders-image-tfhub", "encoders-text-tfhub-windows"], - "tensorflow-text~=2.4.3": ["encoders-text-tfhub"], - "tf-models-official==2.4.0": ["encoders-text-tfhub", "encoders-text-tfhub-windows"], - "bert-for-tf2==0.14.9": ["encoders-text-tfhub", "encoders-text-tfhub-windows"], - "sentence-transformers": ["encoders-text-sentence-transformers"], - "torch>=1.6.0": ["encoders-audio-pytorch", "encoders-text-torch-transformers", "encoders-text-sentence-transformers", - "encoders-image-fastai", "encoders-code-transformers"], - "fairseq": ["encoders-audio-pytorch"], - "transformers": ["encoders-text-torch-transformers", "encoders-text-tf-transformers", "encoders-code-transformers"], - "moviepy": ["encoders-video"], - "opencv-python": ["encoders-video", "encoders-image-tf-face-detection", "encoders-image-cv2", "clip"], - "appdirs": ["encoders-image-tf-face-detection"], - "fastai==2.1.8": ["encoders-image-fastai"], - "mtcnn": ["encoders-image-tf-face-detection"], - "Pillow": ["encoders-image-tf-face-detection", "clip"], - "clip-by-openai": ["clip"], - "pytorch==1.7.1": ["clip"] -} diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index bfbacec9..00000000 --- a/pytest.ini +++ /dev/null @@ -1,5 +0,0 @@ -[pytest] -markers = - audio: mark a test as an audio test - image: mark test as an image test - text: mark test as a text test diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 1a107497..00000000 --- a/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -numpy -requests -pytest diff --git a/setup.py b/setup.py deleted file mode 100644 index b43f7bf1..00000000 --- a/setup.py +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -from __future__ import unicode_literals -import os -import codecs -import sys -import json -import re -from setuptools import setup,find_packages -from collections import defaultdict -from typing import List, Dict -from pathlib import Path - - -def get_extra_requires(path, add_all=True): - if '.json' in path: - try: - requirements_dict = json.load(open(path, 'r')) - return dependency_to_requirement(requirements_dict) - except FileNotFoundError: - print(f"{path} not found") - return {} - -def dependency_to_requirement(requirements_dict: Dict, add_all=True, add_single_package=True): - """ - Invert the index from dependency to requirement. - """ - all_requirements = defaultdict(set) - for library, dependency in requirements_dict.items(): - for d in dependency: - all_requirements[d].add(library) - if add_single_package: - for k in requirements_dict.keys(): - all_requirements[k] = {k} - if add_all: - all_requirements['all'] = set(v for v in requirements_dict.keys()) - return all_requirements - -all_deps = get_extra_requires('extra_requirements.json') -extras_require = {k: list(v) for k, v in all_deps.items()} -print(extras_require) -# Additional files to include - adding model cards -# package_data = [str(x) for x in Path('vectorhub').rglob('*.md')] -package_data = [str(x) for x in list(Path('vectorhub').rglob("*.md"))] - -# Also add the extra_requirements.json file -package_data.append('extra_requirements.json') - -def read(rel_path): - """Read lines from given file""" - here = os.path.abspath(os.path.dirname(__file__)) - with open(os.path.join(here, rel_path), "r") as fp: - return fp.read() - -def get_version(rel_path): - """Read __version__ from given file""" - for line in read(rel_path).splitlines(): - if line.startswith("__version__"): - delim = '"' if '"' in line else "'" - return line.split(delim)[1] - raise RuntimeError(f"Unable to find a valid __version__ string in {rel_path}.") - -version=get_version("vectorhub/__init__.py") - -if 'IS_VECTORHUB_NIGHTLY' in os.environ.keys(): - from datetime import datetime - name = 'vectorhub-nightly' - version = version + '.' + datetime.today().__str__().replace('-', '.').replace(":", '.').replace(' ', '.') -else: - name = 'vectorhub' - -setup( - name=name, - version=version, - author="OnSearch Pty Ltd", - author_email="dev@vctr.ai", - package_data={'vectorhub': package_data, '': ['extra_requirements.json']}, - include_package_data=True, - # data_files=[('vectorhub', package_data)], # puts the markdown files in a new directory - not what we want - description="One liner to encode data into vectors with state-of-the-art models using tensorflow, pytorch and other open source libraries. Word2Vec, Image2Vec, BERT, etc", - long_description=open("README.md", "r", encoding="utf-8").read(), - long_description_content_type="text/markdown", - keywords="vector, embeddings, machinelearning, ai, artificialintelligence, nlp, tensorflow, pytorch, nearestneighbors, search, analytics, clustering, dimensionalityreduction", - url="https://github.com/vector-ai/vectorhub", - license="Apache", - packages=find_packages(exclude=["tests*"]) + ['.'], - python_requires=">=3", - install_requires=list(all_deps['core'].union(all_deps['perf'])), - extras_require=extras_require, - classifiers=[ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", - "Intended Audience :: Education", - "Intended Audience :: Science/Research", - "Intended Audience :: Information Technology", - "Intended Audience :: Financial and Insurance Industry", - "Intended Audience :: Healthcare Industry", - "Intended Audience :: Manufacturing", - "License :: OSI Approved :: Apache Software License", - "Operating System :: OS Independent", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: Implementation :: PyPy", - "Topic :: Multimedia :: Sound/Audio :: Conversion", - "Topic :: Multimedia :: Video :: Conversion", - "Topic :: Scientific/Engineering :: Artificial Intelligence", - "Topic :: Scientific/Engineering :: Image Recognition", - "Topic :: Scientific/Engineering :: Information Analysis", - "Topic :: Scientific/Engineering :: Visualization", - "Topic :: Software Development :: Libraries :: Application Frameworks", - ], -) diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index 2e82b7f7..00000000 --- a/tests/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""Testing suite for the library. -""" \ No newline at end of file diff --git a/tests/base/__init__.py b/tests/base/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/base/test_base.py b/tests/base/test_base.py deleted file mode 100644 index 67b9a47c..00000000 --- a/tests/base/test_base.py +++ /dev/null @@ -1,40 +0,0 @@ -""" - Tests for various base functions occur here. -""" -import pytest -import numpy as np -import os -import vectorhub -from vectorhub.base import catch_vector_errors, Base2Vec -from vectorhub.encoders.audio.tfhub import SpeechEmbedding2Vec -from ..test_utils import is_dummy_vector - -def test_catch_vector_errors(): - """Test the catch vector errors. - """ - encoder = SpeechEmbedding2Vec() - vectors = encoder.encode(np.nan) - assert is_dummy_vector(vectors) - -def test_catch_vector_errors_false(): - """Test catch the vector errors - """ - with pytest.raises(Exception): - vectorhub.options.set_option('catch_vector_errors', False) - encoder = SpeechEmbedding2Vec() - vectors = encoder.encode(np.nan) - -def test_validate_urls_raises_warning(): - enc = Base2Vec() - with pytest.warns(UserWarning): - # Assert this is false - assert not enc.validate_model_url('testing_url', ['testing_url_2', 'fake_url']) - -def test_validate_urls_works_for_tfhub_exception(): - enc = Base2Vec() - assert enc.validate_model_url('https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/3', - ['https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1', 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/2']) - -def test_validate_urls_works_simple(): - enc = Base2Vec() - assert enc.validate_model_url('test', ['test', 'test_2']) diff --git a/tests/base/test_index.py b/tests/base/test_index.py deleted file mode 100644 index b1e60f50..00000000 --- a/tests/base/test_index.py +++ /dev/null @@ -1,40 +0,0 @@ -""" - Tests for various base functions occur here. -""" -import pytest -import os -from vectorhub.encoders.audio.tfhub import SpeechEmbedding2Vec - -class TestIndex: - """ - Testing the ability to use and add to the Vector AI index. - """ - def test_vi_index(audio_url): - num_of_documents = 30 - enc = SpeechEmbedding2Vec() - items = [audio_url] * num_of_documents - response = enc.add_documents( - os.environ['VH_USERNAME'], - os.environ['VH_API_KEY'], - items=items, - collection_name='test_index') - assert response['successfully_inserted'] == num_of_documents - enc.client.delete_collection(enc.collection_name) - - def test_vi_index_with_metadata(audio_url): - """ - Test the Vector AI index with Metadata. - """ - num_of_documents = 30 - enc = SpeechEmbedding2Vec() - items= [audio_url] * num_of_documents - metadata = list(range(num_of_documents)) - response = enc.add_documents( - os.environ['VH_USERNAME'], - os.environ['VH_API_KEY'], - items=items, - metadata=metadata, - collection_name='test_index') - assert response['successfully_inserted'] == num_of_documents - enc.client.delete_collection(enc.collection_name) - diff --git a/tests/bi_encoders/__init__.py b/tests/bi_encoders/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/bi_encoders/qa/__init__.py b/tests/bi_encoders/qa/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/bi_encoders/qa/tfhub/__init__.py b/tests/bi_encoders/qa/tfhub/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/bi_encoders/qa/tfhub/test_lareqa_qa.py b/tests/bi_encoders/qa/tfhub/test_lareqa_qa.py deleted file mode 100644 index bffc444a..00000000 --- a/tests/bi_encoders/qa/tfhub/test_lareqa_qa.py +++ /dev/null @@ -1,9 +0,0 @@ -from vectorhub.bi_encoders.qa.tfhub import LAReQA2Vec -from ....test_utils import assert_encoder_works - -def test_lare_qa_works(): - """ - Testing for LAReQA works - """ - encoder = LAReQA2Vec() - assert_encoder_works(encoder, data_type='text', model_type='bi_encoder') diff --git a/tests/bi_encoders/qa/tfhub/test_use_qa.py b/tests/bi_encoders/qa/tfhub/test_use_qa.py deleted file mode 100644 index 953e26de..00000000 --- a/tests/bi_encoders/qa/tfhub/test_use_qa.py +++ /dev/null @@ -1,36 +0,0 @@ -import numpy as np -from vectorhub.bi_encoders.qa.tfhub import USEMultiQA2Vec, USEQA2Vec -from ....test_utils import assert_encoder_works - -def test_use_multi_qa_initialize(): - """ - Testing for USE-Multi-QA initialize - """ - encoder = USEMultiQA2Vec() - assert_encoder_works(encoder, data_type='text', model_type='bi_encoder') - -def test_use_multi_qa_single_encode(): - """ - Testing for USE-Multi-QA single encode - """ - encoder = USEMultiQA2Vec() - assert_encoder_works(encoder, data_type='text', model_type='bi_encoder') - -def test_use_multi_qa_bulk_encode(): - """ - Testing for USE-Multi-QA bulk encode - """ - client = USEMultiQA2Vec() - question_emb = client.bulk_encode_questions(['What is your age?']) - answer_emb = client.bulk_encode_answers(["I am 20 years old.", "good morning"], [ - "I will be 21 next year.", "great day."]) - assert len(question_emb) == 1 - assert len(answer_emb) == 2 - - -def test_use_qa_initialize(): - """ - Testing for USE-QA. - """ - encoder = USEQA2Vec() - assert_encoder_works(encoder, vector_length=512, data_type='text', model_type='bi_encoder') diff --git a/tests/bi_encoders/text_image/__init__.py b/tests/bi_encoders/text_image/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/bi_encoders/text_image/torch/__init__.py b/tests/bi_encoders/text_image/torch/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/bi_encoders/text_image/torch/test_clip.py b/tests/bi_encoders/text_image/torch/test_clip.py deleted file mode 100644 index b8ed967c..00000000 --- a/tests/bi_encoders/text_image/torch/test_clip.py +++ /dev/null @@ -1,9 +0,0 @@ -from vectorhub.bi_encoders.text_image.torch import Clip2Vec -from ....test_utils import assert_encoder_works - -def test_clip2vec_works(): - """ - Testing for Clip2Vec works - """ - encoder = Clip2Vec() - assert_encoder_works(encoder, data_type='text_image', model_type='bi_encoder') diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index 8cf6028b..00000000 --- a/tests/conftest.py +++ /dev/null @@ -1,6 +0,0 @@ -""" -An audio URL. -""" -def audio_url(): - return 'https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav' - diff --git a/tests/encoders/__init__.py b/tests/encoders/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/audio/__init__.py b/tests/encoders/audio/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/audio/pytorch/__init__.py b/tests/encoders/audio/pytorch/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/audio/pytorch/test_fairseq.py b/tests/encoders/audio/pytorch/test_fairseq.py deleted file mode 100644 index 982ed9f7..00000000 --- a/tests/encoders/audio/pytorch/test_fairseq.py +++ /dev/null @@ -1,10 +0,0 @@ -import numpy as np -from vectorhub.encoders.audio.pytorch.wav2vec import Wav2Vec -from ....test_utils import assert_encoder_works - -def test_fairseq_works(): - """ - Simple testing for Fairseq working. - """ - enc = Wav2Vec() - assert_encoder_works(enc, vector_length=512, data_type='audio') diff --git a/tests/encoders/audio/tfhub/__init__.py b/tests/encoders/audio/tfhub/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/audio/tfhub/test_speech_embedding.py b/tests/encoders/audio/tfhub/test_speech_embedding.py deleted file mode 100644 index aa410ae7..00000000 --- a/tests/encoders/audio/tfhub/test_speech_embedding.py +++ /dev/null @@ -1,9 +0,0 @@ -from vectorhub.encoders.audio.tfhub.speech_embedding import SpeechEmbedding2Vec -from ....test_utils import assert_encoder_works - -def test_speech_embedding_works(): - """ - Testing for speech embedding initialization - """ - encoder = SpeechEmbedding2Vec() - assert_encoder_works(encoder, vector_length=96, data_type='audio') diff --git a/tests/encoders/audio/tfhub/test_trill.py b/tests/encoders/audio/tfhub/test_trill.py deleted file mode 100644 index b8c4ec5a..00000000 --- a/tests/encoders/audio/tfhub/test_trill.py +++ /dev/null @@ -1,14 +0,0 @@ -import numpy as np -from vectorhub.encoders.audio.tfhub import Trill2Vec, TrillDistilled2Vec -from ....test_utils import assert_encoder_works - -def test_trill_works(): - """ - Testing for speech embedding initialization - """ - enc = Trill2Vec() - assert_encoder_works(enc, vector_length=512, data_type='audio') - -def test_trill_distilled_works(): - enc = TrillDistilled2Vec() - assert_encoder_works(enc, vector_length=2048, data_type='audio') diff --git a/tests/encoders/audio/tfhub/test_vggish.py b/tests/encoders/audio/tfhub/test_vggish.py deleted file mode 100644 index d4a57def..00000000 --- a/tests/encoders/audio/tfhub/test_vggish.py +++ /dev/null @@ -1,10 +0,0 @@ -import numpy as np -from vectorhub.encoders.audio.tfhub.vggish import Vggish2Vec -from ....test_utils import assert_encoder_works - -def test_vggish_initialize(): - """ - Testing for the vggish initialize - """ - model = Vggish2Vec() - assert_encoder_works(model, vector_length=128, data_type='audio') diff --git a/tests/encoders/audio/tfhub/test_yamnet.py b/tests/encoders/audio/tfhub/test_yamnet.py deleted file mode 100644 index 492a6e7b..00000000 --- a/tests/encoders/audio/tfhub/test_yamnet.py +++ /dev/null @@ -1,11 +0,0 @@ -import numpy as np -from vectorhub.encoders.audio.tfhub.yamnet import Yamnet2Vec -from ....test_utils import assert_encoder_works - - -def test_yamnet_initialize(): - """ - Testing for the yamnet initialize - """ - model = Yamnet2Vec() - assert_encoder_works(model, vector_length=1024, data_type='audio') diff --git a/tests/encoders/audio/vectorai/test_vi_audio2vec.py b/tests/encoders/audio/vectorai/test_vi_audio2vec.py deleted file mode 100644 index cfc9f930..00000000 --- a/tests/encoders/audio/vectorai/test_vi_audio2vec.py +++ /dev/null @@ -1,11 +0,0 @@ -""" - Test audio encoding -""" -from vectorhub.encoders.audio.vectorai import ViAudio2Vec -import os - -def test_encode(): - enc = ViAudio2Vec(os.environ['VI_USERNAME'], os.environ['VI_API_KEY']) - vector = enc.encode('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') - assert len(vector) > 10 - \ No newline at end of file diff --git a/tests/encoders/code/__init__.py b/tests/encoders/code/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/code/test_code2vec.py b/tests/encoders/code/test_code2vec.py deleted file mode 100644 index 44e93974..00000000 --- a/tests/encoders/code/test_code2vec.py +++ /dev/null @@ -1,13 +0,0 @@ -""" -Face 2 Vec -""" -import numpy as np -from vectorhub.encoders.code.transformers import Code2Vec -from ...test_utils import assert_encoder_works - -def test_code_2_vec_works(): - """ - Testing FaceNet works - """ - model = Code2Vec() - assert_encoder_works(model, 768, data_type='text') diff --git a/tests/encoders/face/__init__.py b/tests/encoders/face/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/face/test_face2vec.py b/tests/encoders/face/test_face2vec.py deleted file mode 100644 index 6a6e9689..00000000 --- a/tests/encoders/face/test_face2vec.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -Face 2 Vec -""" -import numpy as np -from vectorhub.encoders.face.tf import Face2Vec -from ...test_utils import assert_encoder_works - -def test_face_2_vec_works(): - """ - Testing FaceNet works - """ - model = Face2Vec() - assert_encoder_works(model, 128, data_type='image', - image_url='https://www.thestatesman.com/wp-content/uploads/2017/08/1493458748-beauty-face-517.jpg') diff --git a/tests/encoders/image/__init__.py b/tests/encoders/image/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/image/fastai/__init__.py b/tests/encoders/image/fastai/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/image/fastai/test_resnet.py b/tests/encoders/image/fastai/test_resnet.py deleted file mode 100644 index a3d37fbd..00000000 --- a/tests/encoders/image/fastai/test_resnet.py +++ /dev/null @@ -1,9 +0,0 @@ -""" - Test code for encoding with FastAI. -""" -from vectorhub.encoders.image.fastai import FastAIResnet2Vec -from ....test_utils import assert_encoder_works - -def test_fastai_encoder(): - enc = FastAIResnet2Vec() - assert_encoder_works(enc, 1024, data_type='image') diff --git a/tests/encoders/image/tfhub/__init__.py b/tests/encoders/image/tfhub/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/image/tfhub/test_bit.py b/tests/encoders/image/tfhub/test_bit.py deleted file mode 100644 index 453eaf79..00000000 --- a/tests/encoders/image/tfhub/test_bit.py +++ /dev/null @@ -1,17 +0,0 @@ -import numpy as np -from vectorhub.encoders.image.tfhub import BitMedium2Vec, BitSmall2Vec -from ....test_utils import assert_encoder_works - -def test_bit_medium_works(): - """ - Testing BIT medium works - """ - model = BitMedium2Vec() - assert_encoder_works(model, 2048, data_type='image') - -def test_bit_small_works(): - """ - Testing BIT small works - """ - model = BitSmall2Vec() - assert_encoder_works(model, 2048, data_type='image') diff --git a/tests/encoders/image/tfhub/test_inception.py b/tests/encoders/image/tfhub/test_inception.py deleted file mode 100644 index e4571bda..00000000 --- a/tests/encoders/image/tfhub/test_inception.py +++ /dev/null @@ -1,24 +0,0 @@ -import numpy as np -from vectorhub.encoders.image.tfhub import InceptionV12Vec, InceptionV22Vec, InceptionV32Vec -from ....test_utils import assert_encoder_works - -def test_inception_v1_works(): - """ - Test that mobilenet v1 works. - """ - model = InceptionV12Vec() - assert_encoder_works(model, 1024, data_type='image') - -def test_inception_v2_works(): - """ - Test that mobilenet v1 works. - """ - model = InceptionV22Vec() - assert_encoder_works(model, 1024, data_type='image') - -def test_inception_v3_works(): - """ - Testing for inception v3 initialize - """ - model = InceptionV32Vec() - assert_encoder_works(model, 2048, data_type='image') diff --git a/tests/encoders/image/tfhub/test_inception_resnet.py b/tests/encoders/image/tfhub/test_inception_resnet.py deleted file mode 100644 index 6f3d37e3..00000000 --- a/tests/encoders/image/tfhub/test_inception_resnet.py +++ /dev/null @@ -1,7 +0,0 @@ -import numpy as np -from vectorhub.encoders.image.tfhub import InceptionResnet2Vec -from ....test_utils import assert_encoder_works - -def test_test_inception_resnet_works(): - model = InceptionResnet2Vec() - assert_encoder_works(model, 1536, data_type='image') diff --git a/tests/encoders/image/tfhub/test_mobilenet.py b/tests/encoders/image/tfhub/test_mobilenet.py deleted file mode 100644 index 14a8358a..00000000 --- a/tests/encoders/image/tfhub/test_mobilenet.py +++ /dev/null @@ -1,18 +0,0 @@ -import numpy as np -from vectorhub.encoders.image.tfhub import MobileNetV12Vec, MobileNetV22Vec -from ....test_utils import assert_encoder_works - - -def test_mobilenet_model_works(): - """ - Test that mobilenet v1 works. - """ - model = MobileNetV12Vec() - assert_encoder_works(model, 1024, data_type='image') - -def test_mobilenet_v2_model_works(): - """ - Test that mobilenet v2 works. - """ - model = MobileNetV22Vec() - assert_encoder_works(model, 1792, data_type='image') diff --git a/tests/encoders/image/tfhub/test_resnet.py b/tests/encoders/image/tfhub/test_resnet.py deleted file mode 100644 index c0310f1f..00000000 --- a/tests/encoders/image/tfhub/test_resnet.py +++ /dev/null @@ -1,17 +0,0 @@ -import numpy as np -from vectorhub.encoders.image.tfhub import ResnetV12Vec, ResnetV22Vec -from ....test_utils import assert_encoder_works - -def test_resnet_v1_works(): - """ - Test that mobilenet v2 works. - """ - model = ResnetV12Vec() - assert_encoder_works(model, 2048, 'image') - -def test_resnet_v2_initialize(): - """ - Testing for resnet v2 initialize - """ - model = ResnetV22Vec() - assert_encoder_works(model, 2048, 'image') diff --git a/tests/encoders/image/vectorai/__init__.py b/tests/encoders/image/vectorai/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/image/vectorai/test_vi_image2vec.py b/tests/encoders/image/vectorai/test_vi_image2vec.py deleted file mode 100644 index 4f064a64..00000000 --- a/tests/encoders/image/vectorai/test_vi_image2vec.py +++ /dev/null @@ -1,9 +0,0 @@ -import os -import pytest -from vectorhub.encoders.image.vectorai import ViImage2Vec -from ....test_utils import assert_encoder_works - -@pytest.mark.skip(reason="Bulk encode not implemented for ViImage2Vec") -def test_encode(): - enc = ViImage2Vec(os.environ['VI_USERNAME'], os.environ['VI_API_KEY']) - assert_encoder_works(enc) diff --git a/tests/encoders/text/__init__.py b/tests/encoders/text/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/text/sentence_transformers/__init__.py b/tests/encoders/text/sentence_transformers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/text/sentence_transformers/test_sentence_transformers.py b/tests/encoders/text/sentence_transformers/test_sentence_transformers.py deleted file mode 100644 index 31209953..00000000 --- a/tests/encoders/text/sentence_transformers/test_sentence_transformers.py +++ /dev/null @@ -1,14 +0,0 @@ -import gc -import pytest -from vectorhub.encoders.text.sentence_transformers.sentence_auto_transformers import SentenceTransformer2Vec, LIST_OF_URLS -from ....test_utils import assert_encoder_works - -@pytest.mark.skip("URL errors with WSL linux containers.") -@pytest.mark.parametrize("model_name", list(LIST_OF_URLS.keys())) -def test_sentence_transformers(model_name): - """ - Sentence Transformer - """ - enc = SentenceTransformer2Vec(model_name=model_name) - assert_encoder_works(enc, data_type='text') - gc.collect(); diff --git a/tests/encoders/text/tf_transformers/__init__.py b/tests/encoders/text/tf_transformers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/text/tf_transformers/test_tf_transformers.py b/tests/encoders/text/tf_transformers/test_tf_transformers.py deleted file mode 100644 index 6cf56fcd..00000000 --- a/tests/encoders/text/tf_transformers/test_tf_transformers.py +++ /dev/null @@ -1,14 +0,0 @@ -""" - Testing for Transformers with TF requirement -""" -import pytest -from vectorhub.encoders.text.tf_transformers import TFTransformer2Vec -from ....test_utils import assert_encoder_works - -@pytest.mark.parametrize("model_name",["distilbert-base-uncased"]) -def test_tf_transformer_encode(model_name): - """ - Test for encoding transformer models - """ - model = TFTransformer2Vec(model_name) - assert_encoder_works(model, data_type='text') diff --git a/tests/encoders/text/tfhub/__init__.py b/tests/encoders/text/tfhub/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/text/tfhub/test_albert.py b/tests/encoders/text/tfhub/test_albert.py deleted file mode 100644 index fc149ec1..00000000 --- a/tests/encoders/text/tfhub/test_albert.py +++ /dev/null @@ -1,10 +0,0 @@ -import numpy as np -from vectorhub.encoders.text.tfhub import Albert2Vec -from ....test_utils import assert_encoder_works - -def test_albert_encode(): - """ - Testing for albert initialize - """ - enc = Albert2Vec() - assert_encoder_works(enc, 768, 'text') diff --git a/tests/encoders/text/tfhub/test_bert.py b/tests/encoders/text/tfhub/test_bert.py deleted file mode 100644 index 7c3e994b..00000000 --- a/tests/encoders/text/tfhub/test_bert.py +++ /dev/null @@ -1,10 +0,0 @@ -import numpy as np -from vectorhub.encoders.text.tfhub import Bert2Vec -from ....test_utils import assert_encoder_works - -def test_bert_encode(): - """ - Testing for bert encoding - """ - encoder = Bert2Vec() - assert_encoder_works(encoder, vector_length=1024, data_type='text') diff --git a/tests/encoders/text/tfhub/test_elmo.py b/tests/encoders/text/tfhub/test_elmo.py deleted file mode 100644 index bf0d968f..00000000 --- a/tests/encoders/text/tfhub/test_elmo.py +++ /dev/null @@ -1,18 +0,0 @@ -import pytest -from vectorhub.encoders.text.tfhub import Elmo2Vec -from ....test_utils import assert_encoder_works - -def test_elmo_encode(): - """ - Testing for Elmo encoding - """ - enc = Elmo2Vec() - assert_encoder_works(enc, 1024, data_type='text') - -@pytest.mark.parametrize('output_layer', ['lstm_outputs1','lstm_outputs2', 'default']) -def test_all_elmo_encoding_methods(output_layer): - """ - Check that the elmo signatures work. - """ - enc = Elmo2Vec() - assert_encoder_works(enc, vector_length=1024, data_type='text') diff --git a/tests/encoders/text/tfhub/test_labse.py b/tests/encoders/text/tfhub/test_labse.py deleted file mode 100644 index 70eb828f..00000000 --- a/tests/encoders/text/tfhub/test_labse.py +++ /dev/null @@ -1,14 +0,0 @@ -import numpy as np -from vectorhub.encoders.text.tfhub import LaBSE2Vec -from ....test_utils import assert_encoder_works - -def test_labse_encode(): - """ - Testing for labse encode - """ - import tensorflow as tf - if hasattr(tf, 'executing_eagerly'): - if not tf.executing_eagerly(): - tf.compat.v1.enable_eager_execution() - encoder = LaBSE2Vec() - assert_encoder_works(encoder, vector_length=768, data_type='text') diff --git a/tests/encoders/text/tfhub/test_use.py b/tests/encoders/text/tfhub/test_use.py deleted file mode 100644 index bb205349..00000000 --- a/tests/encoders/text/tfhub/test_use.py +++ /dev/null @@ -1,25 +0,0 @@ -import pytest -from vectorhub.encoders.text.tfhub import USE2Vec, USEMulti2Vec, USELite2Vec -from ....test_utils import assert_encoder_works - -def test_use_encode(): - """ - Testing for labse encode - """ - encoder = USE2Vec() - assert_encoder_works(encoder, vector_length=512, data_type='text') - -def test_use_multi_encode(): - """ - Testing for labse encode - """ - encoder = USEMulti2Vec() - assert_encoder_works(encoder, vector_length=512, data_type='text') - -@pytest.mark.skip("Skip pytest due to tensorflow compatibility.") -def test_use_lite_works(): - """ - Testing for USE encoder - """ - encoder = USELite2Vec() - assert_encoder_works(encoder, vector_length=512, data_type='text') diff --git a/tests/encoders/text/tfhub/test_use_multi_transformer.py b/tests/encoders/text/tfhub/test_use_multi_transformer.py deleted file mode 100644 index c5eca82e..00000000 --- a/tests/encoders/text/tfhub/test_use_multi_transformer.py +++ /dev/null @@ -1,12 +0,0 @@ -import numpy as np -from vectorhub.encoders.text.tfhub import USEMultiTransformer2Vec -from ....test_utils import assert_encoder_works - -def test_use_multi_transformer(): - """ - Testing for USE encode - """ - import tensorflow as tf - encoder = USEMultiTransformer2Vec() - assert_encoder_works(encoder, vector_length=1024, data_type='text') - diff --git a/tests/encoders/text/tfhub/test_use_transformer.py b/tests/encoders/text/tfhub/test_use_transformer.py deleted file mode 100644 index d6d53a16..00000000 --- a/tests/encoders/text/tfhub/test_use_transformer.py +++ /dev/null @@ -1,17 +0,0 @@ -import numpy as np -from vectorhub.encoders.text.tfhub import USETransformer2Vec -from ....test_utils import assert_encoder_works - -def test_labse_encode(): - """ - Testing for USE encode - """ - import tensorflow as tf - encoder = USETransformer2Vec() - assert_encoder_works(encoder, vector_length=1024, data_type='text') - -def test_access_urls(): - """Test Access to the URLS. - """ - urls = USETransformer2Vec.urls - assert isinstance(urls, dict) diff --git a/tests/encoders/text/torch_transformers/__init__.py b/tests/encoders/text/torch_transformers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/text/torch_transformers/test_legal_bert.py b/tests/encoders/text/torch_transformers/test_legal_bert.py deleted file mode 100644 index 3241d703..00000000 --- a/tests/encoders/text/torch_transformers/test_legal_bert.py +++ /dev/null @@ -1,11 +0,0 @@ -""" - Testing for Longformers with Torch requirement -""" -import pytest -import numpy as np -from vectorhub.encoders.text.torch_transformers import LegalBert2Vec -from ....test_utils import assert_encoder_works - -def test_torch_transformer_encode(): - model = LegalBert2Vec() - assert_encoder_works(model, data_type='text') diff --git a/tests/encoders/text/torch_transformers/test_torch_longformers.py b/tests/encoders/text/torch_transformers/test_torch_longformers.py deleted file mode 100644 index 4543c227..00000000 --- a/tests/encoders/text/torch_transformers/test_torch_longformers.py +++ /dev/null @@ -1,23 +0,0 @@ -""" - Testing for Longformers with Torch requirement -""" -import pytest -import numpy as np -from vectorhub.encoders.text.torch_transformers import Longformer2Vec -from ....test_utils import assert_encoder_works - -MODEL_LIST = [ - "allenai/longformer-base-4096", - "allenai/longformer-large-4096", -] - -VECTOR_OUTPUT = { - "allenai/longformer-base-4096": 768, - "allenai/longformer-large-4096": 1024, -} - -@pytest.mark.skip(reason="Model too big.") -@pytest.mark.parametrize("model_name", MODEL_LIST) -def test_torch_transformer_encode(model_name): - model = Longformer2Vec(model_name) - assert_encoder_works(model, vector_Length=VECTOR_OUTPUT[model_name], data_type='text') diff --git a/tests/encoders/text/torch_transformers/test_torch_transformers.py b/tests/encoders/text/torch_transformers/test_torch_transformers.py deleted file mode 100644 index 590b79f6..00000000 --- a/tests/encoders/text/torch_transformers/test_torch_transformers.py +++ /dev/null @@ -1,19 +0,0 @@ -""" - Testing for Transformers with Torch requirement -""" -import pytest -from vectorhub.encoders.text.torch_transformers import Transformer2Vec -from ....test_utils import assert_encoder_works - -MODEL_LIST = [ - # "bert-base-uncased", - "distilbert-base-uncased", - # "facebook/bart-base" -] - -# TODO: Add vector output into the model name and type - -@pytest.mark.parametrize("model_name", MODEL_LIST) -def test_torch_transformer_encode(model_name): - model = Transformer2Vec(model_name) - assert_encoder_works(model, data_type='text') diff --git a/tests/encoders/text/vectorai/__init__.py b/tests/encoders/text/vectorai/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/encoders/text/vectorai/test_vi_text2vec.py b/tests/encoders/text/vectorai/test_vi_text2vec.py deleted file mode 100644 index 522a79b1..00000000 --- a/tests/encoders/text/vectorai/test_vi_text2vec.py +++ /dev/null @@ -1,9 +0,0 @@ -import os -from vectorhub.encoders.text.vectorai import ViText2Vec - -def test_encoder(): - enc = ViText2Vec(os.environ['VI_USERNAME'], os.environ['VI_API_KEY']) - vector = enc.encode("HI") - assert len(vector) > 10 - vectors = enc.bulk_encode(["Hey", "Stranger!"]) - assert len(vectors) == 2 diff --git a/tests/test_autoencoder.py b/tests/test_autoencoder.py deleted file mode 100644 index 1a6af29b..00000000 --- a/tests/test_autoencoder.py +++ /dev/null @@ -1,53 +0,0 @@ -import warnings -import pytest -from vectorhub.auto_encoder import AutoEncoder, ENCODER_MAPPINGS, list_all_auto_models, BIENCODER_MAPPINGS, AutoBiEncoder -from .test_utils import * - -@pytest.mark.audio -@pytest.mark.parametrize('name', list(ENCODER_MAPPINGS.keys())[0:3]) -def test_encoders_instantiation_audio(name): - if 'audio' in name: - encoder = AutoEncoder.from_model(name) - assert_encoder_works(encoder, data_type='audio') - else: - # Default to test passing otherwise - assert True - -@pytest.mark.text -@pytest.mark.parametrize('name', list(ENCODER_MAPPINGS.keys())[0:3]) -def test_encoders_instantiation_text(name): - if name not in ['text/use-lite', 'text/elmo']: - if 'text' in name: - encoder = AutoEncoder.from_model(name) - assert_encoder_works(encoder, data_type='text') - else: - # Default to test passing otherwise - assert True - - -@pytest.mark.image -@pytest.mark.parametrize('name', list(ENCODER_MAPPINGS.keys())[0:3]) -def test_encoders_instantiation_image(name): - if 'image' in name: - encoder = AutoEncoder.from_model(name) - assert_encoder_works(encoder, data_type='image') - if 'fastai' not in name: - sample = encoder.to_grayscale(encoder.read('https://getvectorai.com/_nuxt/img/dog-1.3cc5fe1.png')) - result = encoder.encode(sample) - assert not is_dummy_vector(result) - else: - # Default to test passing otherwise - assert True - -@pytest.mark.text -@pytest.mark.parametrize('name', list(BIENCODER_MAPPINGS.keys())[0:3]) -def test_auto_biencoders(name): - if 'qa' in name: - bi_encoder = AutoBiEncoder.from_model(name) - assert_encoder_works(bi_encoder, data_type='text', model_type='bi_encoder') - -def test_listing_all_models(): - """ - Simple test to ensure model listing works. - """ - assert len(list_all_auto_models()) > 1 diff --git a/tests/test_encode_chunk_documents.py b/tests/test_encode_chunk_documents.py deleted file mode 100644 index 59e9688a..00000000 --- a/tests/test_encode_chunk_documents.py +++ /dev/null @@ -1,37 +0,0 @@ -import pytest -from vectorhub.encoders.text.tfhub import USE2Vec - -enc = USE2Vec() -enc.__name__ = "sample" - -@pytest.fixture -def chunk_docs(): - return [{ - "value": [ - { - "text": "hey" - }, - - { - "text": "weirdo" - } - ]}, - {"value": [ - { - "text": "hello" - }, - - { - "text": "stranger" - } - ]}, - ] - -def assert_vectors_in_docs(docs): - for d in docs: - assert "text_sample_chunkvector_" in d['value'][0], "misssing vector" - -def test_encode_documents_in_docs(chunk_docs): - chunk_docs = enc.encode_chunk_documents(chunk_field="value", fields=["text"], documents=chunk_docs) - assert_vectors_in_docs(chunk_docs) - diff --git a/tests/test_encode_document.py b/tests/test_encode_document.py deleted file mode 100644 index bf2c4346..00000000 --- a/tests/test_encode_document.py +++ /dev/null @@ -1,89 +0,0 @@ -import pytest -from vectorhub.encoders.text.tfhub import USE2Vec - -enc = USE2Vec() -enc.__name__ = "sample" - -@pytest.fixture -def docs(): - return [ - { - "text": "hey" - }, - - { - "text": "weirdo" - } - ] - - -@pytest.fixture -def chunk_docs(): - return { - "value": [ - { - "text": "hey" - }, - - { - "text": "weirdo" - } - ], - "value": [ - { - "text": "hello" - }, - - { - "text": "stranger" - } - ], - } - -@pytest.fixture -def docs_with_errors(): - return [ - { - "text": "hey" - }, - - { - "text": None - } - ] - -def assert_vectors_in_docs(docs): - for d in docs: - assert "text_sample_vector_" in d, "misssing vector" - -def test_encode_documents_in_docs(docs): - docs = enc.encode_documents(["text"], docs) - assert_vectors_in_docs(docs) - -def test_encode_documents_in_docs_2(docs): - docs = enc.encode_documents_in_bulk(["text"], docs, - vector_error_treatment="zero_vector") - assert_vectors_in_docs(docs) - -def test_encode_documents_in_docs_3(docs): - docs = enc.encode_documents_in_bulk(["text"], docs, - vector_error_treatment="do_not_include") - assert_vectors_in_docs(docs) - -def test_error_tests(docs_with_errors): - docs = enc.encode_documents(["text"], docs_with_errors, - vector_error_treatment="zero_vector") - assert_vectors_in_docs(docs) - -def test_error_tests_2(docs_with_errors): - docs = enc.encode_documents_in_bulk(["text"], docs_with_errors, - vector_error_treatment="zero_vector") - assert_vectors_in_docs(docs) - -def test_error_tests_3(docs_with_errors): - docs = enc.encode_documents_in_bulk(["text"], docs_with_errors, - vector_error_treatment="do_not_include") - assert "text_sample_vector_" in docs[0] - assert "text_sample_vector_" not in docs[-1] - assert isinstance(docs[0]['text_sample_vector_'], list) - diff --git a/tests/test_import_utils.py b/tests/test_import_utils.py deleted file mode 100644 index 805a2e96..00000000 --- a/tests/test_import_utils.py +++ /dev/null @@ -1,12 +0,0 @@ -""" - Test the import utilities. -""" -from vectorhub.import_utils import * -import unittest - -def assert_lists_contain_same_elements(list_1, list_2): - case = unittest.TestCase() - assert case.assertCountEqual(list_1, list_2) is None - -def test_get_package_requirements(): - assert_lists_contain_same_elements(get_package_requirements('encoders-text-tf-transformers'), ['tensorflow', 'transformers']) diff --git a/tests/test_model_to_dict.py b/tests/test_model_to_dict.py deleted file mode 100644 index 824ad44f..00000000 --- a/tests/test_model_to_dict.py +++ /dev/null @@ -1,6 +0,0 @@ -from vectorhub.auto_encoder import * - -def test_get_model_definitions(): - assert isinstance(get_model_definitions(json_fn=None), list) - assert isinstance(get_model_definitions(json_fn=None)[0], dict) - assert len(get_model_definitions(json_fn=None)) > 0 diff --git a/tests/test_utils.py b/tests/test_utils.py deleted file mode 100644 index 2dc07ac8..00000000 --- a/tests/test_utils.py +++ /dev/null @@ -1,282 +0,0 @@ -import numpy as np -import os -import random -import time -import string -import gc -from vectorhub.utils import list_models, list_installed_models -from vectorai import ViClient, ViCollectionClient - -class TempClient: - """Client For a temporary collection - """ - def __init__(self, client, collection_name: str=None): - if client is None: - raise ValueError("Client cannot be None.") - self.client = client - if isinstance(client, ViClient): - self.collection_name = collection_name - elif isinstance(client, ViCollectionClient): - self.collection_name = self.client.collection_name - else: - self.collection_name = collection_name - - def teardown_collection(self): - if self.collection_name in self.client.list_collections(): - time.sleep(2) - if isinstance(self.client, ViClient): - self.client.delete_collection(self.collection_name) - elif isinstance(self.client, ViCollectionClient): - self.client.delete_collection() - - def __enter__(self): - self.teardown_collection() - return self.client - - def __exit__(self, *exc): - self.teardown_collection() - - -def test_list_models(): - assert len(list_models()) > 0 - -def test_list_installed_models(): - # Vector AI deployed models should be immediately usable - assert len(list_installed_models()) > 0 - -def is_dummy_vector(vector, vector_length=None): - """ - Return True if the vector is the default vector, False if it is not. - """ - if vector_length is None: - vector_length = len(vector) - return vector == [1e-7] * vector_length - -def assert_vector_works(vector, vector_length=None): - """ - Assert that the vector works as intended. - """ - assert isinstance(vector, list), "Not the right data type - needs to be a list!" - # Assert that the vector works if this is in bulk - if isinstance(vector[0], list): - # If this is a list of vectors as opposed to just one - for v in vector: - assert not is_dummy_vector(vector[0], vector_length), "Is a dummy vector" - if vector_length is not None: - assert len(vector[0]) == vector_length, f"Does not match vector length of {vector_length}" - else: - # Assert vector works if it is just 1 vector. - assert not is_dummy_vector(vector, vector_length), "Is a dummy vector" - if vector_length is not None: - assert len(vector) == vector_length, f"Does not match vector length of {vector_length}" - -class AssertModelWorks: - def __init__(self, model, vector_length, data_type='image', model_type='encoder', - image_url: str='https://getvectorai.com/_nuxt/img/dog-1.3cc5fe1.png', - audio_url: str='https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav', - sample_sentence: str= "Cats enjoy purring in the nature.", - sample_question: str= "Where do cats enjoy purring?"): - assert data_type in ['image', 'audio', 'text', 'qa', 'text_image'], "data_type needs to be image, audio, text, qa or text_image" - assert model_type in ['bi_encoder', 'encoder'], "model_type needs to be bi_encoder or encoder" - self.model = model - self.vector_length = vector_length - self.model_type = model_type - self.data_type = data_type - self.image_url = image_url - self.audio_url = audio_url - self.audio_sample_rate = 16000 - self.sentence = sample_sentence - self.question = sample_question - - def assert_encode_works(self): - if self.data_type == 'image': - assert_vector_works(self.model.encode(self.image_url), self.vector_length) - elif self.data_type == 'audio': - assert_vector_works(self.model.encode(self.audio_url), self.vector_length) - elif self.data_type == 'text': - assert_vector_works(self.model.encode(self.sentence), self.vector_length) - elif self.data_type == 'qa': - assert_vector_works(self.model.encode_question(self.question), self.vector_length) - assert_vector_works(self.model.encode_answer(self.sentence), self.vector_length) - elif self.data_type == 'text_image': - assert_vector_works(self.model.encode_text(self.question), self.vector_length) - assert_vector_works(self.model.encode_image(self.image_url), self.vector_length) - - def assert_black_and_white_images_have_different_vectors(self): - # Black and white images - image_1 = "https://cdn.mos.cms.futurecdn.net/5PMe5hr8tjSS9Nq5d6Cebe.jpg" - image_2 = "https://blinq.art/blog/wp-content/uploads/2018/04/blinq-art-black-white-default.jpg" - vec_1 = self.model.encode(image_1) - vec_2 = self.model.encode(image_2) - assert vec_1 != vec_2, "Black and white image encoding does not work." - - def assert_bulk_encode_works(self): - if self.data_type == 'image': - assert_vector_works(self.model.bulk_encode([self.image_url, self.image_url, self.image_url]), self.vector_length) - elif self.data_type == 'audio': - assert_vector_works(self.model.bulk_encode([self.audio_url, self.audio_url, self.audio_url]), self.vector_length) - elif self.data_type == 'text': - assert_vector_works(self.model.bulk_encode([self.sentence, self.sentence, self.sentence]), self.vector_length) - elif self.data_type == 'qa': - assert_vector_works(self.model.encode_answer(self.sentence), self.vector_length) - elif self.data_type == 'text_image': - assert_vector_works(self.model.encode_image(self.image_url), self.vector_length) - - def assert_encoding_methods_work(self): - if self.model_type == 'encoder': - self.assert_encode_works() - self.assert_bulk_encode_works() - elif self.model_type == 'bi_encoder': - self.assert_biencode_works() - self.assert_bulk_biencode_works() - - def assert_biencode_works(self): - if self.data_type == 'qa': - assert_vector_works(self.model.encode_question(self.question), self.vector_length) - assert_vector_works(self.model.encode_answer(self.sentence), self.vector_length) - elif self.data_type == 'text_image': - assert_vector_works(self.model.encode_text(self.sentence), self.vector_length) - assert_vector_works(self.model.encode_image(self.image_url), self.vector_length) - - def assert_bulk_biencode_works(self): - if self.data_type == 'text': - assert_vector_works(self.model.encode_answer(self.sentence), self.vector_length) - - @property - def sample_document(self): - """Sample documents. - """ - return { - 'image_url': 'https://getvectorai.com/_nuxt/img/dog-1.3cc5fe1.png', - 'audio_url': 'https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav', - 'text': "Cats love purring on the beach.", - 'question': "Where do cats love purring?" - } - - @property - def sample_documents(self): - return [self.sample_document] * 30 - - @property - def field_to_encode_mapping(self): - if self.data_type == 'text': - return 'text' - if self.data_type == 'image': - return 'image_url' - if self.data_type == 'audio': - return 'audio_url' - if self.data_type == 'qa': - return 'question' - if self.data_type == 'text_image': - return 'image_url' - - @property - def field_to_search_mapping(self): - if self.data_type == 'text': - return 'text' - if self.data_type == 'image': - return 'image_url' - if self.data_type == 'audio': - return 'audio_url' - if self.data_type == 'qa': - return 'question' - if self.data_type == 'text_image': - return 'question' - - @property - def random_string(self, length=8): - letters = string.ascii_lowercase - return ''.join(random.choice(letters) for i in range(length)) - - @property - def vi_client(self): - url = "https://vectorai-development-api-vectorai-test-api.azurewebsites.net/" - if 'VH_USERNAME' in os.environ.keys(): - return ViClient(os.environ['VH_USERNAME'], os.environ['VH_API_KEY'], url=url) - elif 'VI_USERNAME' in os.environ.keys(): - return ViClient(os.environ['VI_USERNAME'], os.environ['VI_API_KEY'], url=url) - return ViClient(url=url) - - def assert_insert_vectorai_simple(self): - CN = 'test_vectorhub_' + self.random_string - with TempClient(self.vi_client, CN) as client: - response = client.insert_documents(CN, self.sample_documents, - {self.field_to_encode_mapping: self.model}) - assert len(response['failed_document_ids']) == 0 - - def assert_insert_vectorai_bulk_encode(self): - CN = 'test_vectorhub_' + self.random_string - with TempClient(self.vi_client, CN) as client: - if self.model_type == 'encoder': - response = client.insert_documents(CN, - self.sample_documents, - {self.field_to_encode_mapping: self.model}, - use_bulk_encode=True) - assert len(response['failed_document_ids']) == 0 - elif self.model_type =='bi_encoder': - response = client.insert_documents(CN, - self.sample_documents, - {self.field_to_encode_mapping: self.model}, - use_bulk_encode=True) - - def assert_insert_vectorai_with_multiprocessing(self): - CN = 'test_vectorhub_' + self.random_string - with TempClient(self.vi_client, CN) as client: - response = client.insert_documents(CN, - self.sample_documents, - {self.field_to_encode_mapping: self.model}, - use_bulk_encode=False, workers=4) - assert len(response['failed_document_ids']) == 0 - - def assert_insert_vectorai_with_multiprocessing_with_bulk_encode(self): - CN = 'test_vectorhub_' + self.random_string - with TempClient(self.vi_client, CN) as client: - response = client.insert_documents( - CN, - self.sample_documents, - {self.field_to_encode_mapping: self.model}, - use_bulk_encode=True, workers=4) - assert len(response['failed_document_ids']) == 0 - - def assert_simple_insertion_works(self): - # Ensure that inserting in a collection works normally - cn = 'test_vectorhub_' + self.random_string - items = self.vi_client.get_field_across_documents( - self.field_to_encode_mapping, self.sample_documents - ) - self.model.add_documents(self.vi_client.username, self.vi_client.api_key, items, collection_name=cn) - time.sleep(2) - response = self.model.search(self.sample_document[self.field_to_search_mapping]) - self.vi_client.delete_collection(cn) - assert len(response['results']) > 0 - - - def assert_insertion_into_vectorai_works(self): - self.assert_simple_insertion_works() - self.assert_insert_vectorai_simple() - self.assert_insert_vectorai_bulk_encode() - # Remove tests for now due to local object pickling - # self.assert_insert_vectorai_with_multiprocessing() - # self.assert_insert_vectorai_with_multiprocessing_with_bulk_encode() - - -def assert_encoder_works(model, vector_length=None, data_type='image', model_type='encoder', - image_url: str='https://getvectorai.com/_nuxt/img/dog-1.3cc5fe1.png', - audio_url: str='https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav', - sample_sentence: str= "Cats enjoy purring in the nature.", - sample_question: str= "Where do cats enjoy purring?"): - """ - Assert that an encoder works - """ - if vector_length is None: - try: - # Use the embedded URL module for now. - vector_length = model.urls[model.model_url]['vector_length'] - except: - pass - model_check = AssertModelWorks(model=model, vector_length=vector_length, - data_type=data_type, model_type=model_type, image_url=image_url,audio_url=audio_url, - sample_sentence=sample_sentence, sample_question=sample_question) - model_check.assert_encoding_methods_work() - if os.getenv('GITHUB_ACTOR') == 'vector-ai': - model_check.assert_insertion_into_vectorai_works() diff --git a/utils/download_badges.py b/utils/download_badges.py deleted file mode 100644 index aca42edc..00000000 --- a/utils/download_badges.py +++ /dev/null @@ -1,18 +0,0 @@ -import requests -def download_image(url, output_image_file): - r = requests.get(url) - with open(output_image_file, 'w') as f: - if isinstance(r.content, bytes): - content = r.content.decode() - else: - content = r.content - f.write(content) - -if __name__=="__main__": - - download_image("https://static.pepy.tech/personalized-badge/vectorhub-nightly?period=total&units=none&left_color=black&right_color=purple&left_text=Total%20Downloads", - "assets/total_downloads.svg") - download_image("https://static.pepy.tech/personalized-badge/vectorhub-nightly?period=week&units=none&left_color=black&right_color=purple&left_text=Weekly%20Downloads", - "assets/weekly_downloads.svg") - download_image("https://static.pepy.tech/personalized-badge/vectorhub-nightly?period=month&units=none&left_color=black&right_color=purple&left_text=Monthly%20Downloads", - "assets/monthly_downloads.svg") diff --git a/utils/template.md b/utils/template.md deleted file mode 100644 index b7df8c1e..00000000 --- a/utils/template.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -model_id: 'Must be unique - check AutoEncoder' -model_name: "Fill in" -vector_length: 'Fill in here.' -paper: Preferably link to arxiv paper goes here. -repo: Repository link. -release_date: date format is 4 digit year - 2 digit month - 2 digit day -installation: pip install vectorhub[vectorhub_version] ---- - -The base template for the markdown file goes here. - -## Description - -We have a splitter that splits on the hashtags for each subsection. This creates a field called model_description. - -## Limitations - -This creates a field called limitations. The value in this field goes in the contents below it. - -## Other Notes - -This creates a field called other_notes. - - -## Example - -Note: Keep this at the bottom to ensure UI consistency on vectorhub. This creates a field called example. diff --git a/utils/upload_cards.py b/utils/upload_cards.py deleted file mode 100755 index b631b350..00000000 --- a/utils/upload_cards.py +++ /dev/null @@ -1,93 +0,0 @@ -""" - Script to create model cards. Uploads to VectorHub collection. -""" -import os -import argparse -import time -import re -import logging -from typing import List -# Wildcard import to get all classes - -FORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s' -LOGGER = logging.getLogger(__name__) -logging.basicConfig(format=FORMAT, level=logging.WARNING) -c_handler = logging.StreamHandler() -c_handler.setLevel(logging.DEBUG) -c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s') -c_handler.setFormatter(c_format) -LOGGER.addHandler(c_handler) - -def remove_example_from_description(text): - # Remove the Example if it is in the middle of the document - text = re.sub(r'## Example(.*?)##', '##', text, flags=re.DOTALL) - if '## Example' in text: - # text = re.sub(r'## Example(.*)', '', text) - text = re.sub(r"## Example(.*)\`\`\`.*?\`\`\`", '', text, flags=re.DOTALL, count=1) - return text - -if __name__=="__main__": - parser = argparse.ArgumentParser() - parser.add_argument('--collection_name', default='vh_markdown') - parser.add_argument('--quick_run', action='store_true') - parser.add_argument('--reset_collection', action='store_true') - parser.add_argument('--evaluate_results', action='store_true') - args = parser.parse_args() - - from vectorhub.auto_encoder import * - from vectorai import ViClient - from vectorai.models.deployed.text import ViText2Vec - - docs = get_model_definitions(None) - LOGGER.debug("Number of documents are: ") - LOGGER.debug(len(docs)) - # Get _id across all documents - - LOGGER.debug("Marksdowns without example:") - - for i, doc in enumerate(docs): - markdown_without_example = remove_example_from_description(doc['markdown_description']) - docs[i]['markdown_without_example'] = markdown_without_example - # LOGGER.debug(markdown_without_example) - - # Generate 1 sentence summaries for the models - if not args.quick_run: - from transformers import PegasusTokenizer, PegasusForConditionalGeneration - mname = "google/pegasus-large" - model = PegasusForConditionalGeneration.from_pretrained(mname) - tok = PegasusTokenizer.from_pretrained(mname) - - def summarise(text): - batch = tok.prepare_seq2seq_batch(src_texts=[text]) # don't need tgt_text for inference - gen = model.generate(**batch) - return tok.batch_decode(gen, skip_special_tokens=True)[0] - - for i, doc in enumerate(docs): - if 'short_description' not in docs[i].keys(): - short_description = summarise(doc['description']) - docs[i]['short_description'] = short_description - # LOGGER.debug(short_description) - - vi_client = ViClient(os.environ['VH_USERNAME'], os.environ['VH_API_KEY']) - ids = vi_client.get_field_across_documents('_id', docs) - if args.reset_collection: - if args.collection_name in vi_client.list_collections(): - vi_client.delete_collection(args.collection_name) - time.sleep(5) - text_encoder = ViText2Vec(os.environ['VH_USERNAME'], os.environ['VH_API_KEY']) - - response = vi_client.insert_documents(args.collection_name, docs, models={'description': text_encoder}, overwrite=True) - - LOGGER.debug(response) - print(response) - if response['failed'] != 0: - raise ValueError("Failed IDs") - - if args.evaluate_results: - LOGGER.debug("Checking Documents:") - LOGGER.debug(vi_client.head(args.collection_name)) - LOGGER.debug(vi_client.head(args.collection_name)['vector_length']) - LOGGER.debug(vi_client.collection_schema(args.collection_name)) - import pandas as pd - pd.set_option('display.max_colwidth', None) - LOGGER.debug(vi_client.show_json(vi_client.random_documents(args.collection_name), selected_fields=['markdown_without_example'])) diff --git a/vectorhub/__init__.py b/vectorhub/__init__.py deleted file mode 100644 index 14ea6cdf..00000000 --- a/vectorhub/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .base import * -from .errors import * -from .utils import * - -__version__ = "1.7.7" diff --git a/vectorhub/auto_encoder.py b/vectorhub/auto_encoder.py deleted file mode 100644 index 888a44c5..00000000 --- a/vectorhub/auto_encoder.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -Class for AutoEncoders. -""" -import warnings -from collections import defaultdict -from .models_dict import MODEL_REQUIREMENTS -import json - -with warnings.catch_warnings(): - warnings.filterwarnings("ignore") - from .encoders.text.tfhub import * - from .encoders.text.vectorai import * - from .encoders.text.tf_transformers import * - from .encoders.text.torch_transformers import * - from .encoders.text.sentence_transformers import * - from .encoders.audio.vectorai import * - from .encoders.audio.tfhub import * - from .encoders.audio.pytorch import * - from .encoders.image.tfhub import * - from .encoders.image.vectorai import * - from .encoders.image.fastai import * - from .encoders.face.tf import * - from .encoders.code.transformers import * - from .bi_encoders.qa.tfhub import * - from .bi_encoders.qa.torch_transformers import * - from .bi_encoders.qa.sentence_transformers import * - from .bi_encoders.text_image.torch import * - -# Include the class and then the requirements key from models_dict.py -ENCODER_MAPPINGS = defaultdict(tuple, { - # Text models - Albert2Vec.definition.model_id : (Albert2Vec, "encoders-text-tfhub-albert"), - ExpertsBert2Vec.definition.model_id : (ExpertsBert2Vec, "encoders-text-tfhub-experts-bert"), - Bert2Vec.definition.model_id : (Bert2Vec, "encoders-text-tfhub-bert"), - Elmo2Vec.definition.model_id: (Elmo2Vec, "encoders-text-tfhub-elmo"), - LaBSE2Vec.definition.model_id : (LaBSE2Vec, "encoders-text-tfhub-labse"), - LegalBert2Vec.definition.model_id: (LegalBert2Vec, "encoders-text-torch-transformers-legalbert"), - Longformer2Vec.definition.model_id: (Longformer2Vec, "encoders-text-torch-transformers-longformer"), - USE2Vec.definition.model_id : (USE2Vec, "encoders-text-tfhub-use"), - USEMulti2Vec.definition.model_id : (USEMulti2Vec, "encoders-text-tfhub-use"), - USELite2Vec.definition.model_id : (USELite2Vec, "encoders-text-tfhub-use"), - - - # "text/tf-transformers" : (TFTransformer2Vec, "encoders-text-tf-transformers"), - # "text/torch-transformers" : (Transformer2Vec, "encoders-text-torch-transformers"), - - # Audio models - Wav2Vec.definition.model_id : (Wav2Vec, "encoders-audio-pytorch-fairseq"), - SpeechEmbedding2Vec.definition.model_id : (SpeechEmbedding2Vec, "encoders-audio-tfhub-speech_embedding"), - Trill2Vec.definition.model_id : (Trill2Vec, 'encoders-audio-tfhub-trill'), - TrillDistilled2Vec.definition.model_id : (TrillDistilled2Vec, 'encoders-audio-tfhub-trill'), - Vggish2Vec.definition.model_id : (Vggish2Vec, 'encoders-audio-tfhub-vggish'), - Yamnet2Vec.definition.model_id : (Yamnet2Vec, "encoders-audio-tfhub-vggish"), - - # Image models - BitSmall2Vec.definition.model_id : (BitSmall2Vec, "encoders-image-tfhub-bit"), - BitMedium2Vec.definition.model_id : (BitMedium2Vec, "encoders-image-tfhub-bit"), - InceptionV12Vec.definition.model_id : (InceptionV12Vec, "encoders-image-tfhub-inception"), - InceptionV22Vec.definition.model_id : (InceptionV22Vec, "encoders-image-tfhub-inception"), - InceptionV32Vec.definition.model_id : (InceptionV32Vec, "encoders-image-tfhub-inception"), - InceptionResnet2Vec.definition.model_id : (InceptionResnet2Vec, "encoders-image-tfhub-inception-resnet"), - MobileNetV12Vec.definition.model_id : (MobileNetV12Vec, "encoders-image-tfhub-mobilenet"), - MobileNetV22Vec.definition.model_id : (MobileNetV22Vec, "encoders-image-tfhub-mobilenet"), - ResnetV12Vec.definition.model_id : (ResnetV12Vec, "encoders-image-tfhub-resnet"), - ResnetV22Vec.definition.model_id : (ResnetV22Vec, "encoders-image-tfhub-resnet"), - FastAIResnet2Vec.definition.model_id: (FastAIResnet2Vec, "encoders-image-fastai-resnet"), - - # Face Model - Face2Vec.definition.model_id: (Face2Vec, "encoders-image-tf-face-detection"), - - # Code model - Code2Vec.definition.model_id: (Code2Vec, "encoders-text-tranfsormers"), -}) - -class AutoEncoder: - """ - The AutoEncoder class. To view the AutoEncoder's model, this means - - Example: - >>> from vectorhub.auto_encoder import * - >>> model = AutoEncoder.from_model('text/bert') - >>> # To view a list of models use the follow - >>> list_all_auto_models() - """ - def __init__(self): - pass - - @staticmethod - def from_model(model_id, *args, **kwargs): - """ - The AutoEncoder class. To view the AutoEncoder's model, this means - Args: - model_id: The ID of the model - these can be found in list_all_auto_models() - args: The arguments for the model instantiation - kwargs: The keyword arguments for the model instantiation - Example: - >>> from vectorhub.auto_encoder import * - >>> model = AutoEncoder.from_model('text/bert') - >>> # To view a list of models use the follow - >>> list_all_auto_models() - """ - model_callable, requirements = ENCODER_MAPPINGS[model_id] - assert is_all_dependency_installed(MODEL_REQUIREMENTS[requirements]), "Missing requirements! Please install." - model = model_callable(*args, **kwargs) - return model - -BIENCODER_MAPPINGS = { - USEMultiQA2Vec.definition.model_id : (USEMultiQA2Vec, "text-bi-encoder-tfhub-use-qa"), - USEQA2Vec.definition.model_id : (USEQA2Vec, "text-bi-encoder-tfhub-use-qa"), - LAReQA2Vec.definition.model_id: (LAReQA2Vec, "text-bi-encoder-tfhub-lareqa-qa"), - DPR2Vec.definition.model_id : (DPR2Vec, "text-bi-encoder-torch-dpr"), - Clip2Vec.definition.model_id: (Clip2Vec, "text-image-clip") -} - -class AutoBiEncoder: - def __init__(self): - pass - - - @staticmethod - def from_model(model_id, *args, **kwargs): - model_callable, requirements = BIENCODER_MAPPINGS[model_id] - assert is_all_dependency_installed(MODEL_REQUIREMENTS[requirements]), "Missing requirements! Please install." - model = model_callable(*args, **kwargs) - return model - - -def list_all_auto_models(): - """ - List all available models available for auto models. - """ - return list(ENCODER_MAPPINGS.keys()) + list(BIENCODER_MAPPINGS.keys()) - -def get_model_definitions(json_fn='models.json'): - """ - Get a list of dictionaries with the definitions of the models. - """ - all_models = [] - global_vars = globals().items() - for k, v in global_vars: - if hasattr(v, 'definition'): - values = v.definition.to_dict() - values.update({"_id": values['model_id']}) - all_models.append(values) - if json_fn is not None: - with open(json_fn, 'w') as f: - json.dump(all_models, f) - else: - return all_models diff --git a/vectorhub/base.py b/vectorhub/base.py deleted file mode 100644 index 52c17116..00000000 --- a/vectorhub/base.py +++ /dev/null @@ -1,309 +0,0 @@ -import functools -import warnings -import traceback -import numpy as np -import requests -from .options import get_option, set_option, IfErrorReturns -from .indexer import ViIndexer -from .errors import ModelError -from typing import Any, List -from doc_utils import DocUtils -from abc import ABC, abstractmethod - -BASE_2VEC_DEFINITON = { - "vector_length": None, - "description": None, - "paper": None, - "repo": None, - "model_name": None, - "architecture": None, - "tasks": None, - "limitations": None, - "download_required": None, - "training_required": None, - "finetunable": None, -} - -def catch_vector_errors(func): - """ - Decorate function and avoid vector errors. - Example: - class A: - @catch_vector_errors - def encode(self): - return [1, 2, 3] - """ - @functools.wraps(func) - def catch_vector(*args, **kwargs): - if get_option('if_error') == IfErrorReturns.RAISE_ERROR: - return func(*args, **kwargs) - else: - try: - return func(*args, **kwargs) - except: - # Bulk encode the functions as opposed to encode to accelerate the - # actual function call - if hasattr(func, "__name__"): - if "bulk_encode" in func.__name__: - # Rerun with manual encoding - try: - encode_fn = getattr(args[0], func.__name__.replace("bulk_encode", "encode")) - if len(args) > 1 and isinstance(args[1], list): - return [encode_fn(x, **kwargs) for x in args[1]] - if kwargs: - # Take the first input! - for v in kwargs.values(): - if isinstance(v, list): - return [encode_fn(x, **kwargs) for x in v] - except: - traceback.print_exc() - pass - if IfErrorReturns.RETURN_EMPTY_VECTOR: - warnings.warn("Unable to encode. Filling in with dummy vector.") - traceback.print_exc() - # get the vector length from the self body - vector_length = args[0].vector_length - if isinstance(args[1], str): - return [1e-7] * vector_length - elif isinstance(args[1], list): - # Return the list of vectors - return [[1e-7] * vector_length] * len(args[1]) - else: - return [1e-7] * vector_length - elif IfErrorReturns.RETURN_NONE: - return None - return - return catch_vector - -class Base2Vec(ViIndexer, DocUtils): - """ - Base class for vector - """ - def __init__(self): - self.__dict__.update(BASE_2VEC_DEFINITON) - - @classmethod - def validate_model_url(cls, model_url: str, list_of_urls: List[str]): - """ - Validate the model url belongs in the list of urls. This is to help - users to avoid mis-spelling the name of the model. - - # TODO: - Improve model URL validation to not include final number in URl string. - - Args: - model_url: The URl of the the model in question - list_of_urls: The list of URLS for the model in question - - """ - if model_url in list_of_urls: - return True - - if 'tfhub' in model_url: - # If the url has a number in it then we can take that into account - for url in list_of_urls: - if model_url[:-1] in url: - return True - # TODO: Write documentation link to debugging the Model URL. - warnings.warn("We have not tested this url. Please use URL at your own risk." + \ - "Please use the is_url_working method to test if this is a working url if " + \ - "this is not a local directory.", UserWarning) - - @staticmethod - def is_url_working(url): - response = requests.head(url) - if response.status_code == 200: - return True - return False - - @classmethod - def chunk(self, lst: List, chunksize: int): - """ - Chunk an iterable object in Python but not a pandas DataFrame. - Args: - lst: - Python List - chunksize: - The chunk size of an object. - Example: - >>> documents = [{...}] - >>> ViClient.chunk(documents) - """ - for i in range(0, len(lst), chunksize): - yield lst[i: i + chunksize] - - def _vector_operation(self, vectors, vector_operation: str = "mean", axis=0): - """ - Args: - Vectors: the list of vectors to include - vector_operation: One of ['mean', 'minus', 'sum', 'min', 'max'] - axis: The axis to which to perform the operation - """ - if vector_operation == "mean": - return np.mean(vectors, axis=axis).tolist() - elif vector_operation == 'minus': - return np.subtract(vectors, axis=axis).tolist() - elif vector_operation == "sum": - return np.sum(vectors, axis=axis).tolist() - elif vector_operation == "min": - return np.min(vectors, axis=axis).tolist() - elif vector_operation == "max": - return np.max(vectors, axis=axis).tolist() - else: - return np.mean(vectors, axis=axis).tolist() - - @property - def __name__(self): - """ - Return the name of the model. If name is not set, returns the - model_id. - """ - if hasattr(self, '_name'): - return self._name.replace('-', '_') - elif hasattr(self, 'definition'): - if '/' in self.definition.model_id: - return self.definition.model_id.split('/')[1].replace('-', '_') - return self.definition.model_id - return '' - - @__name__.setter - def __name__(self, value): - """ - Set the name. - """ - setattr(self, '_name', value) - - @property - def zero_vector(self): - if hasattr(self, "vector_length"): - return self.vector_length * [1e-7] - else: - raise ValueError("Please set attribute vector_length") - - def is_empty_vector(self, vector): - return all([x == 1e-7 for x in vector]) - - def get_default_vector_field_name(self, field, field_type = "vector"): - if field_type == "vector": - return field + "_" + self.__name__ + "_vector_" - elif field_type == "chunkvector": - return field + "_" + self.__name__ + "_chunkvector_" - - def _encode_document(self, field, doc, vector_error_treatment='zero_vector', - field_type: str="vector"): - """Encode document""" - vector = self.encode(self.get_field(field, doc)) - if vector_error_treatment == "zero_vector": - self.set_field(self.get_default_vector_field_name(field, field_type=field_type), doc, vector) - return - elif vector_error_treatment == "do_not_include": - return - else: - if vector is None or self.is_empty_vector(vector): - vector = vector_error_treatment - self.set_field( - self.get_default_vector_field_name(field), - doc, vector) - - def _encode_chunk_document(self, chunk_field, field, doc, - vector_error_treatment='zero_vector', field_type: str="chunkvector"): - """Encode a chunk document""" - chunk_docs = self.get_field(chunk_field, doc) - if hasattr(self, "bulk_encode"): - return self.encode_documents_in_bulk([field], chunk_docs, field_type=field_type, - vector_error_treatment=vector_error_treatment) - elif hasattr(self, "encode"): - return self.encode_documents([field], chunk_docs, field_type=field_type, - vector_error_treatment=vector_error_treatment) - - def _bulk_encode_document(self, field, docs, vector_error_treatment: str='zero_vector', - field_type="vector"): - """bulk encode documents""" - vectors = self.bulk_encode(self.get_field_across_documents(field, docs)) - if vector_error_treatment == "zero_vector": - self.set_field_across_documents( - self.get_default_vector_field_name(field, field_type=field_type), - vectors, docs) - return - elif vector_error_treatment == "do_not_include": - [self.set_field( - self.get_default_vector_field_name(field, field_type=field_type), - value=vectors[i], doc=d) \ - for i, d in enumerate(docs) if \ - not self.is_empty_vector(vectors[i])] - else: - [self.set_field( - self.get_default_vector_field_name(field, field_type=field_type), d) - if not self.is_empty_vector(vectors[i]) - else vector_error_treatment - for i, d in enumerate(docs)] - return - - - def encode_documents(self, fields: list, documents: list, - vector_error_treatment='zero_vector', field_type="vector"): - """ - Encode documents and their specific fields. Note that this runs off the - default `encode` method. If there is a specific function that you want run, ensure - that it is set to the encode function. - - Parameters: - missing_treatment: - Missing treatment can be one of ["do_not_include", "zero_vector", value]. - documents: - The documents that are being used - fields: - The list of fields to be used - field_type: - Accepts "vector" or "chunkvector" - """ - for f in fields: - # Replace with case-switch in future - [self._encode_document(f, d, vector_error_treatment=vector_error_treatment, field_type=field_type) \ - for d in documents if self.is_field(f, d)] - return documents - - def encode_chunk_documents(self, chunk_field, fields: list, documents: list, - vector_error_treatment: str="zero_vector"): - """Encode chunk documents. Loops through every field and then every document. - - Parameters: - chunk_field: The field for chunking - fields: A list of fields for chunk documents - documents: a list of documents - vector_error_treatment: Vector Error Treatment - - Example: - >>> chunk_docs = enc.encode_chunk_documents(chunk_field="value", fields=["text"], documents=chunk_docs) - - """ - # Replace with case-switch in future - for f in fields: - [self._encode_chunk_document(chunk_field=chunk_field, field=f, doc=d, - vector_error_treatment=vector_error_treatment, field_type="chunkvector") \ - for d in documents if self.is_field(chunk_field, d)] - return documents - - def encode_documents_in_bulk(self, fields: list, - documents: list, vector_error_treatment='zero_vector', field_type="vector"): - """ - Encode documents and their specific fields. Note that this runs off the - default `encode` method. If there is a specific function that you want run, ensure - that it is set to the encode function. - - Parameters: - missing_treatment: - Missing treatment can be one of ["do_not_include", "zero_vector", value]. - documents: - The documents that are being used - fields: - The list of fields to be used - """ - for f in fields: - # Replace with case-switch in future - contained_docs = [d for d in documents if self.is_field(f, d)] - self._bulk_encode_document(f, contained_docs, - vector_error_treatment=vector_error_treatment, - field_type=field_type) - return documents - diff --git a/vectorhub/bi_encoders/__init__.py b/vectorhub/bi_encoders/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/vectorhub/bi_encoders/qa/__init__.py b/vectorhub/bi_encoders/qa/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/vectorhub/bi_encoders/qa/base.py b/vectorhub/bi_encoders/qa/base.py deleted file mode 100644 index 1c616ae6..00000000 --- a/vectorhub/bi_encoders/qa/base.py +++ /dev/null @@ -1,14 +0,0 @@ -from ...encoders.text.base import BaseText2Vec -from abc import ABC, abstractmethod - -class BaseQA2Vec(BaseText2Vec, ABC): - def encode(self): - pass - - @abstractmethod - def encode_question(self): - pass - - @abstractmethod - def encode_answer(self): - pass diff --git a/vectorhub/bi_encoders/qa/sentence_transformers/__init__.py b/vectorhub/bi_encoders/qa/sentence_transformers/__init__.py deleted file mode 100644 index 1189834a..00000000 --- a/vectorhub/bi_encoders/qa/sentence_transformers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .distilroberta_qa import * \ No newline at end of file diff --git a/vectorhub/bi_encoders/qa/sentence_transformers/distilroberta_qa.md b/vectorhub/bi_encoders/qa/sentence_transformers/distilroberta_qa.md deleted file mode 100644 index 7dba6b52..00000000 --- a/vectorhub/bi_encoders/qa/sentence_transformers/distilroberta_qa.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -model_id: 'qa/distilled-roberta-qa' -model_name: "Distilled Roberta QA" -vector_length: "768 (default)" -paper: "https://arxiv.org/abs/1908.10084" -repo: "https://github.com/UKPLab/sentence-transformers" -release_date: "2019-08-27" -installation: "pip install vectorhub[encoders-text-sentence-transformers]" -category: question-answer - ---- - -## Description - -These are Distilled Roberta QA trained on MSMACRO dataset from sbert.net by UKPLab. - -## Example - - -``` -#pip install vectorhub[encoders-text-sentence-transformers] -from vectorhub.encoders.qa.sentence_transformers import DistilRobertaQA2Vec -model = DistilRobertaQA2Vec('bert-base-uncased') -model.encode("I enjoy taking long walks along the beach with my dog.") -``` diff --git a/vectorhub/bi_encoders/qa/sentence_transformers/distilroberta_qa.py b/vectorhub/bi_encoders/qa/sentence_transformers/distilroberta_qa.py deleted file mode 100644 index 45e1b790..00000000 --- a/vectorhub/bi_encoders/qa/sentence_transformers/distilroberta_qa.py +++ /dev/null @@ -1,81 +0,0 @@ -from typing import List -from ..base import BaseQA2Vec -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -from datetime import date -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-sentence-transformers']): - from sentence_transformers import SentenceTransformer - -DistilRobertaQAModelDefinition = ModelDefinition(markdown_filepath='bi_encoders/qa/sentence_transformers/distilroberta_qa.md') - -__doc__ = DistilRobertaQAModelDefinition.create_docs() - - -class DistilRobertaQA2Vec(BaseQA2Vec): - definition = DistilRobertaQAModelDefinition - urls = { - 'distilroberta-base-msmarco-v1': {'vector_length': 768} - } - def __init__(self, model_url='distilroberta-base-msmarco-v1'): - self.model_url = model_url - self.model = SentenceTransformer(model_url) - self.vector_length = self.urls[model_url] - - @property - def __name__(self): - return "distilroberta_qa" - - @catch_vector_errors - def encode_question(self, question: str): - return self.model.encode(["[QRY] "+ question])[0].tolist() - - @catch_vector_errors - def bulk_encode_question(self, questions: list): - return [self.encode(q) for q in questions] - - @catch_vector_errors - def encode_answer(self, answer: str, context: str=None): - return self.model.encode(["[DOC] "+ answer])[0].tolist() - - @catch_vector_errors - def bulk_encode_answers(self, answers: List[str]): - return [self.encode(a) for a in answers] - - @catch_vector_errors - def encode(self, string: str, context_string: str=None, string_type: str='answer'): - """ - Encode question/answer using LAReQA model. - Args: - String: Any string - Context_string: The context of the string. - string_type: question/answer. - - Example: - >>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * - >>> model = LAReQA2Vec() - >>> model.encode_answer("Why?") - """ - if string_type.lower() == 'answer': - return self.encode_answer(string, context=context_string) - elif string_type.lower() == 'question': - return self.encode_question(string, context=context_string) - - @catch_vector_errors - def bulk_encode(self, strings: List[str], context_strings: List[str]=None, string_type: str='answer'): - """ - Bulk encode question/answer using LAReQA model. - Args: - String: List of strings. - Context_string: List of context of the strings. - string_type: question/answer. - - Example: - >>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * - >>> model = LAReQA2Vec() - >>> model.bulk_encode("Why?", string_type='answer') - """ - if context_strings is not None: - return [self.encode(x, context_strings[i], string_type=string_type) for i, x in enumerate(strings)] - return [self.encode(x, string_type=string_type) for x in enumerate(strings)] diff --git a/vectorhub/bi_encoders/qa/tfhub/__init__.py b/vectorhub/bi_encoders/qa/tfhub/__init__.py deleted file mode 100644 index a4e1f0f0..00000000 --- a/vectorhub/bi_encoders/qa/tfhub/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .use_qa import * -from .lareqa_qa import * -from .use_multi_qa import * \ No newline at end of file diff --git a/vectorhub/bi_encoders/qa/tfhub/lareqa_qa.md b/vectorhub/bi_encoders/qa/tfhub/lareqa_qa.md deleted file mode 100644 index 0d736b56..00000000 --- a/vectorhub/bi_encoders/qa/tfhub/lareqa_qa.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -model_id: 'qa/lareqa-qa' -model_name: "LAReQA: Language-agnostic answer retrieval from a multilingual pool" -vector_length: '512 (default)' -paper: "https://arxiv.org/abs/2004.05484" -repo: "https://tfhub.dev/google/LAReQA/mBERT_En_En/1" -release_date: "2020-04-11" -installation: "pip install vectorhub[encoders-text-tfhub]" -category: question-answer ---- - -## Description - -We present LAReQA, a challenging new benchmark for language-agnostic answer retrieval from a multilingual candidate pool. Unlike previous cross-lingual tasks, LAReQA tests for "strong" cross-lingual alignment, requiring semantically related cross-language pairs to be closer in representation space than unrelated same-language pairs. Building on multilingual BERT (mBERT), we study different strategies for achieving strong alignment. We find that augmenting training data via machine translation is effective, and improves significantly over using mBERT out-of-the-box. Interestingly, the embedding baseline that performs the best on LAReQA falls short of competing baselines on zero-shot variants of our task that only target "weak" alignment. This finding underscores our claim that languageagnostic retrieval is a substantively new kind of cross-lingual evaluation. - -## Example - -``` -#pip install vectorhub[encoders-text-tfhub] -from vectorhub.bi_encoders.qa.tfhub import LAReQA2Vec -model = LAReQA2Vec() -model.encode_question('How is the weather today?') -model.encode_answer('The weather is great today.') -``` diff --git a/vectorhub/bi_encoders/qa/tfhub/lareqa_qa.py b/vectorhub/bi_encoders/qa/tfhub/lareqa_qa.py deleted file mode 100644 index 7aeef591..00000000 --- a/vectorhub/bi_encoders/qa/tfhub/lareqa_qa.py +++ /dev/null @@ -1,129 +0,0 @@ -from datetime import date -from typing import List -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -from ..base import BaseQA2Vec -if is_all_dependency_installed(MODEL_REQUIREMENTS['text-bi-encoder-tfhub-lareqa-qa']): - import bert - import numpy as np - import tensorflow.compat.v2 as tf - import tensorflow_hub as hub - from tensorflow.python.framework.errors_impl import NotFoundError - try: - import tensorflow_text - except NotFoundError: - print('The installed Tensorflow Text version is not aligned with tensorflow, make sure that tensorflow-text version is same version as tensorflow') - -LAReQAModelDefinition = ModelDefinition(markdown_filepath='bi_encoders/qa/tfhub/lareqa_qa') -__doc__ = LAReQAModelDefinition.create_docs() - -class LAReQA2Vec(BaseQA2Vec): - definition = LAReQAModelDefinition - urls = { - "https://tfhub.dev/google/LAReQA/mBERT_En_En/1": {}, - "https://tfhub.dev/google/LAReQA/mBERT_X_X/1": {}, - "https://tfhub.dev/google/LAReQA/mBERT_X_Y/1": {}, - "https://tfhub.dev/google/LAReQA/mBERT_X_X_mono/1": {}, - } - def __init__(self, model_url='https://tfhub.dev/google/LAReQA/mBERT_En_En/1', - vector_length=512): - self.validate_model_url(model_url, self.urls) - self.model_url = model_url - self.model = hub.load(self.model_url) - self.model_name = model_url.replace( - 'https://tfhub.dev/google/', '').replace('/', '_') - self.vector_length = vector_length - self.question_encoder = self.model.signatures["query_encoder"] - self.answer_encoder = self.model.signatures['response_encoder'] - - @property - def __name__(self): - return "lareqa_qa" - - @catch_vector_errors - def encode_question(self, question: str): - """ - Encode the question using LAReQA model. - Example: - - >>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * - >>> model = LAReQA2Vec() - >>> model.encode_question("Why?") - """ - return self.question_encoder(input=tf.constant(np.asarray([question])))["outputs"][0].numpy().tolist() - - @catch_vector_errors - def bulk_encode_question(self, questions: list): - """ - Encode questions using LAReQA model. - Example: - - >>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * - >>> model = LAReQA2Vec() - >>> model.encode_question(["Why?", "Who?"]) - """ - return self.question_encoder(input=tf.constant(np.asarray(questions)))["outputs"].numpy().tolist() - - @catch_vector_errors - def encode_answer(self, answer: str, context: str=None): - """ - Encode answer using LAReQA model. - Example: - - >>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * - >>> model = LAReQA2Vec() - >>> model.encode_answer("Why?") - """ - if context is None: - context = answer - return self.answer_encoder( - input=tf.constant(np.asarray([answer])), - context=tf.constant(np.asarray([context])))["outputs"][0].numpy().tolist() - - @catch_vector_errors - def bulk_encode_answers(self, answers: List[str], contexts: List[str]=None): - if contexts is None: - contexts = answers - return self.answer_encoder( - input=tf.constant(np.asarray(answers)), - context=tf.constant(np.asarray(contexts)))["outputs"].numpy().tolist() - - @catch_vector_errors - def encode(self, string: str, context_string: str=None, string_type: str='answer'): - """ - Encode question/answer using LAReQA model. - Args: - String: Any string - Context_string: The context of the string. - string_type: question/answer. - - Example: - >>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * - >>> model = LAReQA2Vec() - >>> model.encode_answer("Why?") - """ - if string_type.lower() == 'answer': - return self.encode_answer(string, context=context_string) - elif string_type.lower() == 'question': - return self.encode_question(string, context=context_string) - - @catch_vector_errors - def bulk_encode(self, strings: List[str], context_strings: List[str]=None, string_type: str='answer'): - """ - Bulk encode question/answer using LAReQA model. - Args: - String: List of strings. - Context_string: List of context of the strings. - string_type: question/answer. - - Example: - >>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * - >>> model = LAReQA2Vec() - >>> model.bulk_encode("Why?", string_type='answer') - """ - if context_strings is not None: - return [self.encode(x, context_strings[i], string_type=string_type) for i, x in enumerate(strings)] - return [self.encode(x, string_type=string_type) for i, x in enumerate(strings)] - diff --git a/vectorhub/bi_encoders/qa/tfhub/use_multi_qa.md b/vectorhub/bi_encoders/qa/tfhub/use_multi_qa.md deleted file mode 100644 index 6b1ded29..00000000 --- a/vectorhub/bi_encoders/qa/tfhub/use_multi_qa.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -model_id: 'qa/use-multi-qa' -model_name: "Universal Sentence Encoder Multilingual Question Answering" -vector_length: "512 (default)" -repo: "https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3" -installation: "pip install vectorhub[encoders-text-tfhub]" -release_date: "2019-07-01" -category: question-answer -short_description: Greater-than-word length multi-lingual text encoder for question answer retrieval. ---- - -## Description - -- Developed by researchers at Google, 2019, v2 [1]. -- Covers 16 languages, strong performance on cross-lingual question answer retrieval. -- It is trained on a variety of data sources and tasks, with the goal of learning text representations that are useful out-of-the-box to retrieve an answer given a question, as well as question and answers across different languages. -- It can also be used in other applications, including any type of text classification, clustering, etc. - - -## Supported Languages - -Arabic, Chinese-simplified, Chinese-traditional, English, French, German, Italian, Japanese, Korean, Dutch, Polish, Portuguese, Spanish, Thai, Turkish, Russian - -## Training Corpora - -Reddit, Wikipedia, Stanford Natural Language Inference and web mined translation pairs. - -## Training Setup - -Question-Answering was trained on 4 unique task types: -i) conversational response prediction -ii) quick thought -iii) natural language inference -iv) tranlsation ranking (bridge task) - -Note: to learn cross-lingual representations, they used translation ranking tasks using parallel corpora for the source-target pairs. - -Multi-task training is performed through different tasks and performed an optimization step for a single task at a time. -All models are trained with a batch size of 100 using SGD with a learning rate of 0.008 and 30million steps. - -## Example - -```python -#pip install vectorhub[encoders-text-tfhub] -from vectorhub.bi_encoders.qa.tfhub import USEMultiQA2Vec -model = USEMultiQA2Vec() -model.encode_question('How is the weather today?') -model.encode_answer('The weather is great today.') -``` diff --git a/vectorhub/bi_encoders/qa/tfhub/use_multi_qa.py b/vectorhub/bi_encoders/qa/tfhub/use_multi_qa.py deleted file mode 100644 index c156158f..00000000 --- a/vectorhub/bi_encoders/qa/tfhub/use_multi_qa.py +++ /dev/null @@ -1,36 +0,0 @@ -from typing import List -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -from ..base import BaseQA2Vec -from .use_qa import USEQA2Vec -if is_all_dependency_installed(MODEL_REQUIREMENTS['text-bi-encoder-tfhub-use-qa']): - import bert - import numpy as np - import tensorflow as tf - import tensorflow_hub as hub - from tensorflow.python.framework.errors_impl import NotFoundError - try: - import tensorflow_text - except NotFoundError: - print('The installed Tensorflow Text version is not aligned with tensorflow, make sure that tensorflow-text version is same version as tensorflow') - -USEMultiQAModelDefinition = ModelDefinition(markdown_filepath='bi_encoders/qa/tfhub/use_multi_qa') - -class USEMultiQA2Vec(USEQA2Vec): - definition = USEMultiQAModelDefinition - urls = { - "https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3": {"vector_length": 512} - } - def __init__(self, model_url="https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3"): - self.model_url = model_url - self.model = hub.load(self.model_url) - self.model_name = self.model_url.replace( - 'https://tfhub.dev/google/', '').replace('/', '_') - self.vector_length = self.urls[model_url] - - @property - def __name__(self): - return "usemulti_qa" diff --git a/vectorhub/bi_encoders/qa/tfhub/use_qa.md b/vectorhub/bi_encoders/qa/tfhub/use_qa.md deleted file mode 100644 index 4cfd1235..00000000 --- a/vectorhub/bi_encoders/qa/tfhub/use_qa.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -model_id: 'qa/use-qa' -model_name: "Universal Sentence Encoder Question Answering" -vector_length: "512 (default)" -release_date: "2020-03-11" -repo: 'https://tfhub.dev/google/universal-sentence-encoder-qa/3' -installation: "pip install vectorhub[encoders-text-tfhub-tftext]" -category: question-answer -short_description: Greater-than-word length text encoder for question answer retrieval. ---- - -## Description - -- Developed by researchers at Google, 2019, v2 [1]. -- It is trained on a variety of data sources and tasks, with the goal of learning text representations that -are useful out-of-the-box to retrieve an answer given a question, as well as question and answers across different languages. -- It can also be used in other applications, including any type of text classification, clustering, etc. -- Multi-task training setup is based on the paper [Learning Cross-lingual Sentence Representations via a Multi-task Dual Encoder](https://arxiv.org/pdf/1810.12836.pdf) -- Achieved 56.1 on dev set in Squad Retrieval and 46.2 on train. - -## Training Corpora - -Reddit, Wikipedia, Stanford Natural Language Inference and web mined translation pairs. - -## Training Setup - -Question-Answering was trained on 4 unique task types: -i) conversational response prediction -ii) quick thought -iii) natural language inference -iv) tranlsation ranking (bridge task) - -Note: to learn cross-lingual representations, they used translation ranking tasks using parallel corpora for the source-target pairs. - -Multi-task training is performed through different tasks and performed an optimization step for a single task at a time. -All models are trained with a batch size of 100 using SGD with a learning rate of 0.008 and 30million steps. - -## Example - -``` -#pip install vectorhub[encoders-text-tfhub] -from vectorhub.bi_encoders.qa.tfhub import USEQA2Vec -model = USEQA2Vec() -model.encode_question('How is the weather today?') -model.encode_answer('The weather is great today.') -``` diff --git a/vectorhub/bi_encoders/qa/tfhub/use_qa.py b/vectorhub/bi_encoders/qa/tfhub/use_qa.py deleted file mode 100644 index cd24d52b..00000000 --- a/vectorhub/bi_encoders/qa/tfhub/use_qa.py +++ /dev/null @@ -1,98 +0,0 @@ -from typing import List -from ..base import BaseQA2Vec -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -from datetime import date - -if is_all_dependency_installed(MODEL_REQUIREMENTS['text-bi-encoder-tfhub-use-qa']): - import bert - import numpy as np - import tensorflow as tf - import tensorflow_hub as hub - from tensorflow.python.framework.errors_impl import NotFoundError - try: - import tensorflow_text - except NotFoundError: - print('The installed Tensorflow Text version is not aligned with tensorflow, make sure that tensorflow-text version is same version as tensorflow') - -USEQAModelDefinition = ModelDefinition(markdown_filepath='bi_encoders/qa/tfhub/use_qa') -__doc__ = USEQAModelDefinition.create_docs() - -class USEQA2Vec(BaseQA2Vec): - definition = USEQAModelDefinition - urls = { - "https://tfhub.dev/google/universal-sentence-encoder-qa/3": {"vector_length": 512} - } - def __init__(self, model_url="https://tfhub.dev/google/universal-sentence-encoder-qa/3"): - self.model_url = model_url - self.model = hub.load(self.model_url) - self.model_name = self.model_url.replace( - 'https://tfhub.dev/google/', '').replace('/', '_') - self.vector_length = 512 - - @catch_vector_errors - def encode_question(self, question: str): - return self.model.signatures['question_encoder'](tf.constant([question]))['outputs'].numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode_questions(self, questions: List[str]): - return self.model.signatures['question_encoder'](tf.constant([questions]))['outputs'].numpy().tolist() - - @catch_vector_errors - def encode_answer(self, answer: str, context: str=None): - if context is None: - context = answer - return self.model.signatures['response_encoder']( - input=tf.constant([answer]), - context=tf.constant([context]))['outputs'].numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode_answers(self, answers: List[str], contexts: List[str]=None): - if contexts is None: - contexts = answers - return self.model.signatures['response_encoder']( - input=tf.constant(answers), - context=tf.constant(contexts))['outputs'].numpy().tolist() - - @catch_vector_errors - def encode(self, string: str, context_string: str=None, string_type: str='answer'): - """ - Encode question/answer using LAReQA model. - Args: - String: Any string - Context_string: The context of the string. - string_type: question/answer. - - Example: - >>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * - >>> model = USEQA2Vec() - >>> model.encode_answer("Why?") - """ - if string_type.lower() == 'answer': - return self.encode_answer(string, context=context_string) - elif string_type.lower() == 'question': - return self.encode_question(string) - - @catch_vector_errors - def bulk_encode(self, strings: List[str], context_strings: List[str]=None, string_type: str='answer'): - """ - Bulk encode question/answer using LAReQA model. - Args: - String: List of strings. - Context_string: List of context of the strings. - string_type: question/answer. - - Example: - >>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * - >>> model = USEQA2Vec() - >>> model.bulk_encode("Why?", string_type='answer') - """ - if context_strings is not None: - return [self.encode(x, context_strings[i], string_type=string_type) for i, x in enumerate(strings)] - return [self.encode(x, string_type=string_type) for i, x in enumerate(strings)] - - @property - def __name__(self): - return "use_qa" diff --git a/vectorhub/bi_encoders/qa/torch_transformers/__init__.py b/vectorhub/bi_encoders/qa/torch_transformers/__init__.py deleted file mode 100644 index 74044849..00000000 --- a/vectorhub/bi_encoders/qa/torch_transformers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .dpr import * diff --git a/vectorhub/bi_encoders/qa/torch_transformers/dpr.md b/vectorhub/bi_encoders/qa/torch_transformers/dpr.md deleted file mode 100644 index 8175ff1d..00000000 --- a/vectorhub/bi_encoders/qa/torch_transformers/dpr.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -model_id: "qa/dpr" -model_name: "Dense Passage Retrieval" -vector_length: "768 (default)" -release_date: "2020-10-04" -paper: "https://arxiv.org/abs/2004.04906" -installation: "pip install vectorhub[encoders-text-torch-transformers]" -category: question-answer ---- - -## Description - -Open-domain question answering relies on efficient passage retrieval to select candidate contexts, where traditional sparse vector space models, such as TF-IDF or BM25, are the de facto method. In this work, we show that retrieval can be practically implemented using dense representations alone, where embeddings are learned from a small number of questions and passages by a simple dual-encoder framework. When evaluated on a wide range of open-domain QA datasets, our dense retriever outperforms a strong Lucene-BM25 system largely by 9%-19% absolute in terms of top-20 passage retrieval accuracy, and helps our end-to-end QA system establish new state-of-the-art on multiple open-domain QA benchmarks. - -## Example - -``` -#pip install vectorhub[encoders-text-torch-transformers] -from vectorhub.bi_encoders.qa.torch_transformers import DPR2Vec -model = DPR2Vec() -model.encode_question('How is the weather today?') -model.encode_answer('The weather is great today.') -``` diff --git a/vectorhub/bi_encoders/qa/torch_transformers/dpr.py b/vectorhub/bi_encoders/qa/torch_transformers/dpr.py deleted file mode 100644 index 8ffcdae0..00000000 --- a/vectorhub/bi_encoders/qa/torch_transformers/dpr.py +++ /dev/null @@ -1,98 +0,0 @@ -from datetime import date -from typing import List -from ....doc_utils import ModelDefinition -from ....import_utils import is_all_dependency_installed -from ....base import catch_vector_errors -from ..base import BaseQA2Vec -if is_all_dependency_installed('encoders-text-torch-transformers'): - from transformers import DPRContextEncoder, DPRContextEncoderTokenizer, DPRQuestionEncoder, DPRQuestionEncoderTokenizer, DPRReader, DPRReaderTokenizer - import torch - import numpy as np - -DPRModelDefinition = ModelDefinition(markdown_filepath='bi_encoders/qa/torch_transformers/dpr') -__doc__ = DPRModelDefinition.create_docs() - -class DPR2Vec(BaseQA2Vec): - definition = DPRModelDefinition - def __init__(self): - self.context_tokenizer = DPRContextEncoderTokenizer.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base') - self.context_model = DPRContextEncoder.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base', return_dict=True) - - self.query_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base') - self.query_encoder = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base") - - self.reader_tokenizer = DPRReaderTokenizer.from_pretrained('facebook/dpr-reader-single-nq-base') - self.reader_model = DPRReader.from_pretrained('facebook/dpr-reader-single-nq-base', return_dict=True) - self.vector_length = 768 - - def encode_question(self, question): - """ - Encode a question with DPR. - """ - input_ids = self.query_tokenizer(question, return_tensors='pt')["input_ids"] - return self.query_encoder(input_ids)[0].tolist()[0] - - def bulk_encode_questions(self, questions: str): - """ - Bulk encode the question - """ - input_ids = self.query_tokenizer(questions, truncation=True, max_length=True, return_tensors='pt')["input_ids"] - return self.query_encoder(input_ids)[0].tolist() - - def encode_answer(self, answer: str): - """ - Encode an answer with DPR. - """ - if isinstance(answer, str): - input_ids = self.context_tokenizer(answer, return_tensors='pt', truncation=True, - max_length=512)["input_ids"] - return self.context_model(input_ids).pooler_output.tolist()[0] - elif isinstance(answer, list): - return self.bulk_encode_answers(answer) - - def bulk_encode_answers(self, answers: str): - """ - Bulk encode the answers with DPR. - """ - input_ids = self.context_tokenizer(answers, return_tensors='pt', truncation=True, padding=True, - max_length=512)["input_ids"] - return self.context_model(input_ids).pooler_output.tolist() - - @catch_vector_errors - def encode(self, string: str, string_type: str='answer'): - """ - Encode question/answer using LAReQA model. - Args: - String: Any string - Context_string: The context of the string. - string_type: question/answer. - - Example: - >>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * - >>> model = LAReQA2Vec() - >>> model.encode_answer("Why?") - """ - if string_type.lower() == 'answer': - return self.encode_answer(string) - elif string_type.lower() == 'question': - return self.encode_question(string) - - @catch_vector_errors - def bulk_encode(self, strings: List[str], string_type: str='answer'): - """ - Bulk encode question/answer using LAReQA model. - Args: - String: List of strings. - Context_string: List of context of the strings. - string_type: question/answer. - - Example: - >>> from vectorhub.bi_encoders.qa.tfhub.lareqa_qa import * - >>> model = LAReQA2Vec() - >>> model.bulk_encode("Why?", string_type='answer') - """ - return [self.encode(x, string_type=string_type) for i, x in enumerate(strings)] - - @property - def __name__(self): - return "dpr" diff --git a/vectorhub/bi_encoders/text_image/__init__.py b/vectorhub/bi_encoders/text_image/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/vectorhub/bi_encoders/text_image/torch/__init__.py b/vectorhub/bi_encoders/text_image/torch/__init__.py deleted file mode 100644 index 42df6350..00000000 --- a/vectorhub/bi_encoders/text_image/torch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .clip import * \ No newline at end of file diff --git a/vectorhub/bi_encoders/text_image/torch/clip.md b/vectorhub/bi_encoders/text_image/torch/clip.md deleted file mode 100644 index 2a3a358e..00000000 --- a/vectorhub/bi_encoders/text_image/torch/clip.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -model_id: "text_image/clip" -model_name: "CLIP" -vector_length: "512 (default)" -release_date: "2021-01-01" -paper: "https://cdn.openai.com/papers/Learning_Transferable_Visual_Models_From_Natural_Language_Supervision.pdf" -repo: https://github.com/openai/CLIP -installation: "pip install vectorhub[clip]" -category: text-image -short_description: CLIP aims to test the ability of models to generalize arbitrary image classification tasks in a zero-shot manner. ---- - -## Example - -``` -#pip install vectorhub[clip] -from vectorhub.bi_encoders.text_image.torch import Clip2Vec -model = Clip2Vec() -model.encode_image('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode_text("A purple V") -``` - -## Description - -The CLIP model was developed by researchers at OpenAI to learn about what contributes to robustness in computer vision tasks. The model was also developed to test the ability of models to generalize to arbitrary image classification tasks in a zero-shot manner. It was not developed for general model deployment - to deploy models like CLIP, researchers will first need to carefully study their capabilities in relation to the specific context they’re being deployed within. - -### Model Date - -January 2021 - -### Model Type - -The base model uses a ResNet50 with several modifications as an image encoder and uses a masked self-attention Transformer as a text encoder. These encoders are trained to maximize the similarity of (image, text) pairs via a contrastive loss. There is also a variant of the model where the ResNet image encoder is replaced with a Vision Transformer. - -### Model Version - -Initially we’ve released one CLIP model based on the Vision Transformer architecture equivalent to ViT-B/32 -Please see the paper linked below for further details about their specification. - -### Documents - -- [Blog Post](https://openai.com/blog/clip/) -- [CLIP Paper](https://cdn.openai.com/papers/Learning_Transferable_Visual_Models_From_Natural_Language_Supervision.pdf) - - -## Model Use - -### Intended Use - -The model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such models - the CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. - -#### Primary intended uses - -The primary intended users of these models are AI researchers. - -We primarily imagine the model will be used by researchers to better understand robustness, generalization, and other capabilities, biases, and constraints of computer vision models. - -### Out-of-Scope Use Cases - -**Any** deployed use case of the model - whether commercial or not - is currently out of scope. Non-deployed use cases such as image search in a constrained environment, are also not recommended unless there is thorough in-domain testing of the model with a specific, fixed class taxonomy. This is because our safety assessment demonstrated a high need for task specific testing especially given the variability of CLIP’s performance with different class taxonomies. This makes untested and unconstrained deployment of the model in any use case currently potentially harmful. - -Certain use cases which would fall under the domain of surveillance and facial recognition are always out-of-scope regardless of performance of the model. This is because the use of artificial intelligence for tasks such as these can be premature currently given the lack of testing norms and checks to ensure its fair use. - -Since the model has not been purposefully trained in or evaluated on any languages other than English, its use should be limited to English language use cases. - - - -## Data - -The model was trained on publicly available image-caption data. This was done through a combination of crawling a handful of websites and using commonly-used pre-existing image datasets such as [YFCC100M](http://projects.dfki.uni-kl.de/yfcc100m/). A large portion of the data comes from our crawling of the internet. This means that the data is more representative of people and societies most connected to the internet which tend to skew towards more developed nations, and younger, male users. - -### Data Mission Statement - -Our goal with building this dataset was to test out robustness and generalizability in computer vision tasks. As a result, the focus was on gathering large quantities of data from different publicly-available internet data sources. The data was gathered in a mostly non-interventionist manner. However, we only crawled websites that had policies against excessively violent and adult images and allowed us to filter out such content. We do not intend for this dataset to be used as the basis for any commercial or deployed model and will not be releasing the dataset. - - - -## Performance and Limitations - -### Performance - -We have evaluated the performance of CLIP on a wide range of benchmarks across a variety of computer vision datasets such as OCR to texture recognition to fine-grained classification. The paper describes model performance on the following datasets: - -- Food101 -- CIFAR10 -- CIFAR100 -- Birdsnap -- SUN397 -- Stanford Cars -- FGVC Aircraft -- VOC2007 -- DTD -- Oxford-IIIT Pet dataset -- Caltech101 -- Flowers102 -- MNIST -- SVHN -- IIIT5K -- Hateful Memes -- SST-2 -- UCF101 -- Kinetics700 -- Country211 -- CLEVR Counting -- KITTI Distance -- STL-10 -- RareAct -- Flickr30 -- MSCOCO -- ImageNet -- ImageNet-A -- ImageNet-R -- ImageNet Sketch -- ObjectNet (ImageNet Overlap) -- Youtube-BB -- ImageNet-Vid - -## Limitations - -CLIP and our analysis of it have a number of limitations. CLIP currently struggles with respect to certain tasks such as fine grained classification and counting objects. CLIP also poses issues with regards to fairness and bias which we discuss in the paper and briefly in the next section. Additionally, our approach to testing CLIP also has an important limitation- in many cases we have used linear probes to evaluate the performance of CLIP and there is evidence suggesting that linear probes can underestimate model performance. - -### Bias and Fairness - -We find that the performance of CLIP - and the specific biases it exhibits - can depend significantly on class design and the choices one makes for categories to include and exclude. We tested the risk of certain kinds of denigration with CLIP by classifying images of people from [Fairface](https://arxiv.org/abs/1908.04913) into crime-related and non-human animal categories. We found significant disparities with respect to race and gender. Additionally, we found that these disparities could shift based on how the classes were constructed. (Details captured in the Broader Impacts Section in the paper). - -We also tested the performance of CLIP on gender, race and age classification using the Fairface dataset (We default to using race categories as they are constructed in the Fairface dataset.) in order to assess quality of performance across different demographics. We found accuracy >96% across all races for gender classification with ‘Middle Eastern’ having the highest accuracy (98.4%) and ‘White’ having the lowest (96.5%). Additionally, CLIP averaged ~93% for racial classification and ~63% for age classification. Our use of evaluations to test for gender, race and age classification as well as denigration harms is simply to evaluate performance of the model across people and surface potential risks and not to demonstrate an endorsement/enthusiasm for such tasks. - - - -## Feedback - -### Where to send questions or comments about the model - -Please use [this Google Form](https://forms.gle/Uv7afRH5dvY34ZEs9) diff --git a/vectorhub/bi_encoders/text_image/torch/clip.py b/vectorhub/bi_encoders/text_image/torch/clip.py deleted file mode 100644 index 7edc1c9d..00000000 --- a/vectorhub/bi_encoders/text_image/torch/clip.py +++ /dev/null @@ -1,237 +0,0 @@ -"""Clip2Vec by OpenAI -""" -import traceback -from concurrent.futures import ThreadPoolExecutor -from datetime import date -from typing import List -from ....doc_utils import ModelDefinition -from ....import_utils import is_all_dependency_installed -from ....base import catch_vector_errors -from ....encoders.image import BaseImage2Vec -from ....encoders.text import BaseText2Vec - -is_all_dependency_installed('clip') - -try: - import clip - import torch - import numpy as np - import requests - import cv2 - from PIL import Image - from requests.exceptions import MissingSchema -except: - import traceback - traceback.print_exc() - -CLIPModelDefinition = ModelDefinition(markdown_filepath='bi_encoders/text_image/torch/clip') -__doc__ = CLIPModelDefinition.create_docs() - -class Clip2Vec(BaseImage2Vec, BaseText2Vec): - definition = CLIPModelDefinition - urls = { - "ViT-B/32": {'vector_length': 512}, - "RN50": {'vector_length': 512} - } - def __init__(self, url='ViT-B/32', context_length:int=77): - self.context_length = context_length - self.device = 'cuda' if torch.cuda.is_available() else 'cpu' - # Note that the preprocess is a callable - self.model, self.preprocess = clip.load(url, device=self.device) - self.vector_length = self.urls[url]["vector_length"] - self.url = url - - def read(self, image_url): - try: - return Image.open(requests.get(image_url, stream=True).raw) - except MissingSchema: - return Image.open(image_url) - - def preprocess_black_and_white_image(self, x): - """Pass in after the read function - """ - x = self.preprocess.transforms[0](x) - x = self.preprocess.transforms[1](x) - x = self.preprocess.transforms[3](x) - x = torch.stack((x, x, x), dim=1) - x = self.preprocess.transforms[4](x) - return x - - @catch_vector_errors - def encode_text(self, text: str): - if self.device == 'cuda': - text = clip.tokenize(text, context_length=self.context_length).to(self.device) - return self.model.encode_text(text).cpu().detach().numpy().tolist()[0] - elif self.device == 'cpu': - text = clip.tokenize(text, context_length=self.context_length).to(self.device) - return self.model.encode_text(text).detach().numpy().tolist()[0] - - def encode_video(self, video_url: str): - """Encode a video by the first still frame - """ - cap = cv2.VideoCapture(video_url) - while True: - ret, frame = cap.read() - if not ret: - break - pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) - image = self.preprocess(pil_img).unsqueeze(0).to(self.device) - return self.model.encode_image(image).cpu().detach().numpy().tolist()[0] - - def bulk_encode_text(self, texts: List[str]): - if self.device == 'cuda': - tokenized_text = clip.tokenize(texts, context_length=self.context_length).to(self.device) - return self.model.encode_text(tokenized_text).cpu().detach().numpy().tolist() - elif self.device == 'cpu': - tokenized_text = clip.tokenize(texts, context_length=self.context_length).to(self.device) - return self.model.encode_text(tokenized_text).detach().numpy().tolist() - - def preprocess_image(self, img: str): - try: - if self.is_greyscale(img): - return self.preprocess_black_and_white_image(self.read(img)).unsqueeze(0).to(self.device) - return self.preprocess(self.read(img)).unsqueeze(0).to(self.device) - except: - traceback.print_exc() - return torch.empty((1, 3, 224, 224), dtype=torch.int32, device=self.device) - - def parallel_preprocess_image(self, images: str): - with ThreadPoolExecutor(max_workers=5) as executor: - future = executor.map(self.preprocess_image, images) - return list(future) - - @catch_vector_errors - def encode_image(self, image_url: str): - """Encodes an image - """ - if self.device == 'cpu': - image = self.preprocess_image(image_url) - if image.dim() == 3: - image = image.unsqueeze(0).to(self.device) - return self.model.encode_image(image).detach().numpy().tolist()[0] - elif self.device == 'cuda': - image = self.preprocess_image(image_url) - if image.ndim == 3: - image = image.unsqueeze(0).to(self.device) - elif image.ndim == 4: - image = image.to(self.device) - return self.model.encode_image(image).cpu().detach().numpy().tolist()[0] - - def bulk_encode_image(self, images: str): - """Batch Processing for CLIP image encoding - """ - # Parallel process the encoding - future = self.parallel_preprocess_image(images) - results = self.model.encode_image(torch.cat(list(future))).tolist() - # Replace NANs with default vector value - results[results != results] = 1e-7 - return results - - def encode(self, data: str, data_type='image'): - if data_type == 'image': - return self.encode_image(data) - elif data_type == 'text': - return self.encode_text(data) - raise ValueError("data_type must be either `image` or `text`") - - def bulk_encode(self, data: str, data_type='image'): - if data_type == 'image': - return self.bulk_encode_image(data) - elif data_type == 'text': - return self.bulk_encode_text(data) - raise ValueError("data_type must be either `image` or `text`") - - - -class ClipText2Vec(BaseText2Vec): - definition = CLIPModelDefinition - urls = { - "ViT-B/32": {'vector_length': 512}, - "RN50": {'vector_length': 512} - } - def __init__(self, url='ViT-B/32', context_length:int=77): - self.context_length = context_length - self.device = 'cuda' if torch.cuda.is_available() else 'cpu' - # Note that the preprocess is a callable - self.model, self.preprocess = clip.load(url, device=self.device) - self.vector_length = self.urls[url]["vector_length"] - - @catch_vector_errors - def encode(self, text: str): - if self.device == 'cuda': - text = clip.tokenize(text, context_length=self.context_length).to(self.device) - return self.model.encode_text(text).cpu().detach().numpy().tolist()[0] - elif self.device == 'cpu': - text = clip.tokenize(text, context_length=self.context_length).to(self.device) - return self.model.encode_text(text).detach().numpy().tolist()[0] - - def bulk_encode(self, texts: List[str]): - if self.device == 'cuda': - tokenized_text = clip.tokenize(texts, context_length=self.context_length).to(self.device) - return self.model.encode_text(tokenized_text).cpu().detach().numpy().tolist() - elif self.device == 'cpu': - tokenized_text = clip.tokenize(texts, context_length=self.context_length).to(self.device) - return self.model.encode_text(tokenized_text).detach().numpy().tolist() - - -class ClipImage2Vec(BaseImage2Vec): - definition = CLIPModelDefinition - urls = { - "ViT-B/32": {'vector_length': 512}, - "RN50": {'vector_length': 512} - } - def __init__(self, url='ViT-B/32', context_length:int=77): - self.context_length = context_length - self.device = 'cuda' if torch.cuda.is_available() else 'cpu' - # Note that the preprocess is a callable - self.model, self.preprocess = clip.load(url, device=self.device) - self.vector_length = self.urls[url]["vector_length"] - - def read(self, image_url): - try: - return Image.open(requests.get(image_url, stream=True).raw) - except MissingSchema: - return Image.open(image_url) - - def encode_video(self, video_url: str): - """Encode a video by the first still frame - """ - cap = cv2.VideoCapture(video_url) - while True: - ret, frame = cap.read() - if not ret: - break - pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) - image = self.preprocess(pil_img).unsqueeze(0).to(self.device) - return self.model.encode_image(image).cpu().detach().numpy().tolist()[0] - - @catch_vector_errors - def encode(self, image_url: str): - if self.device == 'cpu': - image = self.preprocess(self.read(image_url)).unsqueeze(0).to(self.device) - return self.model.encode_image(image).detach().numpy().tolist()[0] - elif self.device == 'cuda': - image = self.preprocess(self.read(image_url)).unsqueeze(0).to(self.device) - return self.model.encode_image(image).cpu().detach().numpy().tolist()[0] - - def bulk_encode(self, images: str): - return [self.encode(x) for x in images] - -class ClipVideo2Vec(ClipImage2Vec): - """Encode a video using an image with CLIP - """ - def encode(self, video_url: str): - """Encode a video by the first still frame - """ - cap = cv2.VideoCapture(video_url) - while True: - ret, frame = cap.read() - if not ret: - break - pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) - image = self.preprocess(pil_img).unsqueeze(0).to(self.device) - return self.model.encode_image(image).cpu().detach().numpy().tolist()[0] - - def bulk_encode(self, video_urls: List[str]): - return [self.encode(v) for v in video_urls] - diff --git a/vectorhub/doc_utils.py b/vectorhub/doc_utils.py deleted file mode 100644 index 1f291fc7..00000000 --- a/vectorhub/doc_utils.py +++ /dev/null @@ -1,334 +0,0 @@ -import yaml -import sys -import re -import os -from datetime import date -from pkg_resources import resource_exists, resource_filename - -class ModelDefinition: - def __init__(self, model_id: str='', model_name: str='', vector_length: int='', - description: str='', paper: str='', repo: str='', architecture: str='Not stated.', - tasks: str='Not stated.', release_date: date='', limitations: str='Not stated.', installation: str='Not stated.', - example: str='Not stated.', markdown_filepath: str='', **kwargs): - """ - Model definition. - Args: - model_id: the identity of the model. Required for AutoEncoder. - model_name: The name of the model - vector_length: The length of the vector - description: The description of the encoder - paper: The paper which dictates the encoder - repo: The repository fo the model - architecture: The architecture of the model. - task: The downstream task that the model was trained on - limitations: The limitations of the encoder - installation: How to isntall the encoder. - example: The example of the encoder - """ - self.model_id = model_id - self.model_name = model_name - self.vector_length = vector_length - self.description = description - self.paper = paper - self.repo = repo - self.architecture = architecture - self.tasks = tasks - self.release_date = release_date.__str__() if release_date is not None else None - self.limitations = limitations - self.installation = installation - self.example = example - self.markdown_filepath = markdown_filepath - for k, v in kwargs.items(): - # assert( k in self.__class__.__allowed ) - setattr(self, k, v) - if markdown_filepath != '': - self.from_markdown(markdown_filepath) - - @property - def data_type(self): - """ - Returns text/audio/image/qa - """ - return self.model_id.split('/')[0] - - def create_docs(self): - """ - Return a string with the RST documentation of the model. - """ - return f""" -**Model Name**: {self.model_name} - -**Vector Length**: {self.vector_length} - -**Description**: {self.description} - -**Paper**: {self.paper} - -**Repository**: {self.repo} - -**Architecture**: {self.architecture} - -**Tasks**: {self.tasks} - -**Release Date**: {self.release_date} - -**Limitations**: {self.limitations} - -**Installation**: ``{self.installation}`` - -**Example**: - -.. code-block:: python - - {self.example} - """ - - def to_dict(self, return_base_dictionary=False): - """ - Create a dictionary with all the attributes of the model. - """ - if return_base_dictionary: - return { - "model_id": self.model_id, - "model_name": self.model_name, - "vector_length": self.vector_length, - "description": self.description, - "paper": self.paper, - "repo": self.repo, - "architecture": self.architecture, - "tasks": self.tasks, - "limitations": self.limitations, - "installation" : self.installation, - "example" : self.example, - "release_date": self.release_date, - "vectorai_integration": self.vectorai_integration - } - else: - model_dict = {} - for attr in dir(self): - if '__' in attr: - continue - if isinstance(getattr(self, attr), (float, str, int)): - model_dict[attr] = getattr(self, attr) - # Enforce string typecast on vector length - model_dict['vector_length'] = str(model_dict['vector_length']) - return model_dict - - def _get_yaml(self, f): - """ - Returns YAML file from Python - Args: - f: Get YAML file. - """ - pointer = f.tell() - if f.readline() != '---\n': - f.seek(pointer) - return '' - readline = iter(f.readline, '') - readline = iter(readline.__next__, '---\n') #underscores needed for Python3? - return ''.join(readline) - - def from_markdown(self, markdown_filepath: str, encoding='UTF-8', splitter=r"(\#\#+\ +)|(\n)", - verbose=False): - """ - Reads definitions from the markdown. - Args: - markdown_filepath: The path of the markdown file. - encoding: The encoding used to open the Markdown file - """ - if '.md' not in markdown_filepath: - markdown_filepath += '.md' - # Check filepath exists with - if not os.path.exists(markdown_filepath): - if resource_exists('vectorhub', markdown_filepath): - markdown_filepath = resource_filename('vectorhub', markdown_filepath) - else: - raise FileNotFoundError(f"Unable to find {markdown_filepath}.") - if verbose: - print(markdown_filepath) - # Remove sys.argv, not sure what it was doing - with open(markdown_filepath, encoding=encoding) as f: - config = list(yaml.load_all(self._get_yaml(f), Loader=yaml.SafeLoader)) - text = f.read() - self.config = config[0] - for k,v in self.config.items(): - setattr(self, k, v) - self.markdown_description = text - self._split_markdown_description(text, splitter=splitter) - - @property - def audio_items_examples(self): - return [ - 'https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_69.wav', - 'https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_99.wav', - 'https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_10.wav', - 'https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_5.wav' - ] - - @property - def audio_metadata_examples(self): - ['male', 'male', 'female', 'male'] - - @property - def text_items_examples(self): - return [ - "chicken", - "toilet", - "paper", - "enjoy walking" - ] - - @property - def qa_items_examples(self): - return [ - "A blue whale in North Atlantic can grow up to 90 feet.", - "A blue whale in Antarctica can grow up to 110 feet.", - "A gorilla can lift 4000pounds (1810kg) on a bench press.", - "A well-trained man can lift up to 401.5kg." - ] - @property - def qa_search_example(self): - return "How long can a blue whale grow in Antarctica?" - - @property - def text_image_search_example(self): - return "Dog wearing a hat" - - @property - def qa_metadata_examples(self): - return ["whale", "whale", "gorilla", "human"] - - @property - def text_metadata_examples(self): - return [ - {'num_of_letters': 7, - 'type': 'animal'}, - {'num_of_letters': 6, - 'type': 'household_items'}, - {'num_of_letters': 5, - 'type': 'household_items'}, - {'num_of_letters': 12, - 'type': 'emotion'} - ] - - @property - def image_items_examples(self): - return [ - 'https://getvectorai.com/_nuxt/img/rabbit.4a65d99.png', - 'https://getvectorai.com/_nuxt/img/dog-2.b8b4cef.png', - 'https://getvectorai.com/_nuxt/img/dog-1.3cc5fe1.png', - ] - - @property - def image_metadata_examples(self): - return [ - {'animal': 'rabbit', 'hat': 'no'}, - {'animal': 'dog', 'hat': 'yes'}, - {'animal': 'dog', 'hat': 'yes'} - ] - - @property - def search_example(self): - return self.DATA_TYPE_TO_EXAMPLE[self.data_type][2] - - @property - def text_search_example(self): - return 'basin' - - @property - def image_search_example(self): - return self.image_items_examples[2] - - @property - def audio_search_example(self): - return self.audio_items_examples[0] - - @property - def item_examples(self): - return self.DATA_TYPE_TO_EXAMPLE[self.data_type][0] - - @property - def DATA_TYPE_TO_EXAMPLE(self): - # Example items, example metadata, example search - return { - 'text': (self.text_items_examples, self.text_metadata_examples, self.text_search_example), - 'image': (self.image_items_examples, self.image_metadata_examples, self.image_search_example), - 'audio': (self.audio_items_examples, self.audio_metadata_examples, self.audio_search_example), - 'qa': (self.qa_items_examples, self.qa_metadata_examples, self.qa_search_example), - 'text_image': (self.image_items_examples, self.image_metadata_examples, self.text_image_search_example), - } - - @property - def metadata_examples(self): - return self.DATA_TYPE_TO_EXAMPLE[self.data_type][1] - - @property - def vectorai_integration(self): - return f"""Index and search your vectors easily on the cloud using 1 line of code! - -``` -username = '' -email = '' -# You can request an api_key using - type in your username and email. -api_key = model.request_api_key(username, email) - -# Index in 1 line of code -items = {self.item_examples} -model.add_documents(user, api_key, items) - -# Search in 1 line of code and get the most similar results. -model.search('{self.search_example}') - -# Add metadata to your search -metadata = {self.metadata_examples} -model.add_documents(user, api_key, items, metadata=metadata) -``` - """ - - def _split_markdown_description(self, description: str, splitter: str=r"(\#\#+\ +)|(\n)"): - """ - Breaks markdown into heading and values. - Args: - description: Description of the markdown - splitter: Regex to split the sentence. Currently it splits on headings and new lines. - The purpose of this is to allow us to get keys from markdown files. - """ - # Loops through split markdown - # If ## is detected inside string, marks the next - # string as heading - # and whatever follows as the value - IS_HEADING = False - value = '' - heading = None - SKIP_NEW_LINE = False - markdown_values = {} - for x in re.split(splitter, description): - if x is None: - continue - if SKIP_NEW_LINE: - if x == '\n': - continue - - if IS_HEADING: - heading = x.lower().rstrip().replace(' ', '_') - IS_HEADING = False - # Skip new line after the heading is declared - SKIP_NEW_LINE = True - value = "" - elif '##' in x: - # Insert setting new layer - if heading is not None: - setattr(self, heading, value) - markdown_values[heading] = value - IS_HEADING = True - else: - SKIP_NEW_LINE = False - value += x - - # Set the final value - if hasattr(self, heading): - if getattr(self, heading) != value: - setattr(self, heading, value) - else: - setattr(self, heading, value) - diff --git a/vectorhub/encoders/__init__.py b/vectorhub/encoders/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/vectorhub/encoders/audio/__init__.py b/vectorhub/encoders/audio/__init__.py deleted file mode 100644 index 87d1a4fe..00000000 --- a/vectorhub/encoders/audio/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# from .base import * \ No newline at end of file diff --git a/vectorhub/encoders/audio/base.py b/vectorhub/encoders/audio/base.py deleted file mode 100644 index d4a8b332..00000000 --- a/vectorhub/encoders/audio/base.py +++ /dev/null @@ -1,41 +0,0 @@ -from ...import_utils import * - -if is_all_dependency_installed('audio-encoder'): - import librosa - import soundfile as sf - -import tempfile -import shutil -import os -from urllib.request import urlopen, Request -from urllib.parse import quote -import io -import numpy as np - -from ...base import Base2Vec, catch_vector_errors - -class BaseAudio2Vec(Base2Vec): - def read(self, audio: str, new_sampling_rate: int = 16000): - """An method to specify the read method to read the data. - """ - if type(audio) is str: - if 'http' in audio: - fd, fp = tempfile.mkstemp() - os.write(fd, urlopen(Request(quote(audio, safe=':/?*=\''), - headers={'User-Agent': "Magic Browser"})).read()) - if '.mp3' in audio: - data, sampling_rate = librosa.load(fp, dtype='float32') - else: - data, sampling_rate = sf.read(fp, dtype='float32') - os.close(fd) - else: - data, sampling_rate = sf.read(audio, dtype='float32') - elif type(audio) is bytes: - data, sampling_rate = sf.read(io.BytesIO(audio), dtype='float32') - elif type(audio) is io.BytesIO: - data, sampling_rate = sf.read(audio, dtype='float32') - return np.array(librosa.resample(data.T, sampling_rate, new_sampling_rate)) - - @catch_vector_errors - def bulk_encode(self, audios, vector_operation='mean'): - return [self.encode(c, vector_operation) for c in audios] diff --git a/vectorhub/encoders/audio/pytorch/__init__.py b/vectorhub/encoders/audio/pytorch/__init__.py deleted file mode 100644 index b3efe865..00000000 --- a/vectorhub/encoders/audio/pytorch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .wav2vec import * diff --git a/vectorhub/encoders/audio/pytorch/wav2vec.md b/vectorhub/encoders/audio/pytorch/wav2vec.md deleted file mode 100644 index 208aa801..00000000 --- a/vectorhub/encoders/audio/pytorch/wav2vec.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -model_id: "audio/wav2vec" -model_name: "Wav2Vec" -vector_length: "512 (default)" -paper: "https://arxiv.org/abs/2006.11477" -repo: "https://github.com/pytorch/fairseq" -installation: "pip install vectorhub[encoders-audio-pytorch]" -release_date: "2020-06-20" -category: audio -short_description: We show for the first time that learning powerful representations from speech audio alone followed by fine-tuning on transcribed speech can outperform the best semi-supervised methods while being conceptually simpler. ---- - -## Description - -We show for the first time that learning powerful representations from speech audio alone followed by fine-tuning on transcribed speech can outperform the best semi-supervised methods while being conceptually simpler. wav2vec 2.0 masks the speech input in the latent space and solves a contrastive task defined over a quantization of the latent representations which are jointly learned. Experiments using all labeled data of Librispeech achieve 1.8/3.3 WER on the clean/noisy test sets. When lowering the amount of labeled data to one hour, wav2vec 2.0 outperforms the previous state of the art on the 100 hour subset while using 100 times less labeled data. Using just ten minutes of labeled data and pre-training on 53k hours of unlabeled data still achieves 5.2/8.6 WER on the noisy/clean test sets of Librispeech. This demonstrates the feasibility of speech recognition with limited amounts of labeled data. - -## Example - -``` -#pip install vectorhub[encoders-audio-pytorch] -from vectorhub.encoders.audio.pytorch import Wav2Vec -model = Wav2Vec() -sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -model.encode(sample) -``` diff --git a/vectorhub/encoders/audio/pytorch/wav2vec.py b/vectorhub/encoders/audio/pytorch/wav2vec.py deleted file mode 100644 index 1490bf22..00000000 --- a/vectorhub/encoders/audio/pytorch/wav2vec.py +++ /dev/null @@ -1,54 +0,0 @@ -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -from ..base import BaseAudio2Vec - -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-audio-pytorch-fairseq']): - import torch - import numpy as np - from fairseq.models.wav2vec import Wav2VecModel - -WavModelDefinition = ModelDefinition(markdown_filepath='encoders/audio/pytorch/wav2vec') - -class Wav2Vec(BaseAudio2Vec): - definition = WavModelDefinition - urls = { - 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small.pt': {}, - 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_10m.pt': {}, - 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_100h.pt': {}, - 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_960h.pt': {}, - 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox.pt': {}, - 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_10m.pt': {}, - 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_100h.pt': {}, - 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec2_vox_960h.pt': {}, - } - def __init__(self, model_url: str = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small.pt'): - self.validate_model_url(model_url, self.urls) - self.init(model_url) - self.vector_length = 512 - - def init(self, model_url: str): - self.model_url = model_url - self.model_name = self.model_url.replace( - 'https://dl.fbaipublicfiles.com/fairseq/', '').replace('/', '_') - torch_model = torch.hub.load_state_dict_from_url(self.model_url) - self.model = Wav2VecModel.build_model(torch_model['args'], task=None) - - @catch_vector_errors - def encode(self, audio, vector_operation='mean'): - """ - Example: - - >>> from vectorhub.encoders.audio import Wav2Vec - >>> encoder = Wav2Vec() - >>> encoder.encode("...") - """ - if isinstance(audio, str): - audio = self.read(audio) - return self._vector_operation(self.model.feature_extractor(torch.from_numpy(np.array([audio]))).detach().numpy().tolist()[0], vector_operation=vector_operation, axis=1) - - @catch_vector_errors - def bulk_encode(self, audios, vector_operation='mean'): - return [self.encode(audio, vector_operation=vector_operation) for audio in audios] diff --git a/vectorhub/encoders/audio/tfhub/__init__.py b/vectorhub/encoders/audio/tfhub/__init__.py deleted file mode 100644 index c96d15c0..00000000 --- a/vectorhub/encoders/audio/tfhub/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .speech_embedding import * -from .trill import * -from .vggish import * -from .yamnet import * -from .trill_distilled import * diff --git a/vectorhub/encoders/audio/tfhub/speech_embedding.md b/vectorhub/encoders/audio/tfhub/speech_embedding.md deleted file mode 100644 index 34d23e0f..00000000 --- a/vectorhub/encoders/audio/tfhub/speech_embedding.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -model_id: "audio/speech-embedding" -model_name: "Speech Embedding" -vector_length: "96 (default)" -release_date: "2020-01-31" -paper: "https://arxiv.org/abs/2002.01322" -repo: "https://tfhub.dev/google/speech_embedding/1" -installation: "pip install vectorhub[encoders-audio-tfhub]" -category: audio -short_description: We show that using synthesized speech data in training small spoken term detection models can be more effective than using real data. ---- - -## Description - -With the rise of low power speech-enabled devices, there is a growing demand to quickly produce models for recognizing arbitrary sets of keywords. As with many machine learning tasks, one of the most challenging parts in the model creation process is obtaining a sufficient amount of training data. In this paper, we explore the effectiveness of synthesized speech data in training small spoken term detection models of around 400k parameters. Instead of training such models directly on the audio or low level features such as MFCCs, we use a pre-trained speech embedding model trained to extract useful features for keyword spotting models. Using this speech embedding, we show that a model which detects 10 keywords when trained on only synthetic speech is equivalent to a model trained on over 500 real examples. We also show that a model without our speech embeddings would need to be trained on over 4000 real examples to reach the same accuracy. - - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -``` -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import SpeechEmbedding2Vec -model = SpeechEmbedding2Vec() -vector = model.encode('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -``` diff --git a/vectorhub/encoders/audio/tfhub/speech_embedding.py b/vectorhub/encoders/audio/tfhub/speech_embedding.py deleted file mode 100644 index bc0d6fa2..00000000 --- a/vectorhub/encoders/audio/tfhub/speech_embedding.py +++ /dev/null @@ -1,50 +0,0 @@ -from datetime import date -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-audio-tfhub-speech_embedding']): - import tensorflow as tf - import tensorflow_hub as hub - import traceback - -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ..base import BaseAudio2Vec - -SpeechEmbeddingModelDefinition = ModelDefinition(markdown_filepath="encoders/audio/tfhub/speech_embedding.md") - -__doc__ = SpeechEmbeddingModelDefinition.create_docs() - -class SpeechEmbedding2Vec(BaseAudio2Vec): - definition = SpeechEmbeddingModelDefinition - urls = { - 'https://tfhub.dev/google/speech_embedding/1': {'vector_length': 96} - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/speech_embedding/1', signature: str = 'default'): - self.model_url = model_url - self.signature = signature - self.model = hub.load(self.model_url).signatures[self.signature] - self.model_name = self.model_url.replace( - 'https://tfhub.dev/google/', '').replace('/', '_') - self.vector_length = 96 - - @catch_vector_errors - def encode(self, audio, vector_operation='mean'): - """ - Encode the vector. - Example: - - >>> from vectorhub.encoders.audio import SpeechEmbedding2Vec - >>> encoder = SpeechEmbedding2Vec() - >>> encoder.encode(...) - """ - if isinstance(audio, str): - audio = self.read(audio) - return self._vector_operation(self.model(tf.constant([audio]))[self.signature][0], vector_operation=vector_operation)[0] - - @catch_vector_errors - def bulk_encode(self, audios, vector_operation='mean'): - # TODO: Change list comprehension to tensor. - # audios = [self.read(audio) if isinstance(audio, str) else audio for audio in audios] - # return self._vector_operation(self.model(tf.constant(audios))[self.signature][0], vector_operation=vector_operation) - # TODO: Change list comprehension to tensor. - return [self.encode(x, vector_operation=vector_operation) for x in audios] diff --git a/vectorhub/encoders/audio/tfhub/trill.md b/vectorhub/encoders/audio/tfhub/trill.md deleted file mode 100644 index d572031b..00000000 --- a/vectorhub/encoders/audio/tfhub/trill.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -model_name: "Trill - Triplet Loss Network" -model_id: "audio/trill" -vector_length: "512 (default)" -paper: "https://arxiv.org/abs/2002.12764" -release_date: "2020-02-25" -repo: "https://tfhub.dev/google/nonsemantic-speech-benchmark/trill/3" -installation: pip install vectorhub['encoders-audio-tfhub'] -category: audio -short_description: Introduces a benchmark for comparing speech representations on non-semantic tasks, and proposes a representation based on an unsupervised triplet-loss objective. ---- - -## Description - -The ultimate goal of transfer learning is to reduce labeled data requirements by exploiting a pre-existing embedding model trained for different datasets or tasks. The visual and language communities have established benchmarks to compare embeddings, but the speech community has yet to do so. This paper proposes a benchmark for comparing speech representations on non-semantic tasks, and proposes a representation based on an unsupervised triplet-loss objective. The proposed representation outperforms other representations on the benchmark, and even exceeds state-of-the-art performance on a number of transfer learning tasks. The embedding is trained on a publicly available dataset, and it is tested on a variety of low-resource downstream tasks, including personalization tasks and medical domain. The benchmark, models, and evaluation code are publicly released. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - - -## Example - -```python -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import Trill2Vec -model = Trill2Vec() -sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -model.encode(sample) -``` \ No newline at end of file diff --git a/vectorhub/encoders/audio/tfhub/trill.py b/vectorhub/encoders/audio/tfhub/trill.py deleted file mode 100644 index 4adcf331..00000000 --- a/vectorhub/encoders/audio/tfhub/trill.py +++ /dev/null @@ -1,45 +0,0 @@ -from datetime import date -from ....import_utils import * -from ....models_dict import * -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-audio-tfhub-trill']): - import tensorflow as tf - import tensorflow_hub as hub - import traceback -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ..base import BaseAudio2Vec - -TrillModelDefinition = ModelDefinition(markdown_filepath='encoders/audio/tfhub/trill') -__doc__ = TrillModelDefinition.create_docs() - -class Trill2Vec(BaseAudio2Vec): - definition = TrillModelDefinition - urls = { - 'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill/3': {'vector_length': 512} - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill/3', - layer: str = 'embedding'): - self.model_url = model_url - self.layer = layer - self.model = hub.load(self.model_url) - self.model_name = model_url.replace( - 'https://tfhub.dev/google/', '').replace('/', '_') - self.vector_length = 512 - - @catch_vector_errors - def encode(self, audio, vector_operation='mean'): - """ - - Example: - >>> from encoders.audio.trill import Trill2Vec - >>> encoder = Trill2Vec() - >>> encoder.encode(...) - - """ - if isinstance(audio, str): - audio = self.read(audio) - return self._vector_operation(self.model(samples=audio, sample_rate=16000)[self.layer], vector_operation) - - @catch_vector_errors - def bulk_encode(self, audios, vector_operation='mean'): - return [self.encode(audio) for audio in audios] diff --git a/vectorhub/encoders/audio/tfhub/trill_distilled.md b/vectorhub/encoders/audio/tfhub/trill_distilled.md deleted file mode 100644 index b7aea840..00000000 --- a/vectorhub/encoders/audio/tfhub/trill_distilled.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -model_name: "Trill Distilled - Triplet Loss Network" -model_id: "audio/trill-distilled" -vector_length: "2048 (default)" -paper: "https://arxiv.org/abs/2002.12764" -repo: "https://tfhub.dev/google/nonsemantic-speech-benchmark/trill-distilled/3" -installation: "pip install vectorhub[encoders-audio-tfhub]" -release_date: "2020-02-25" -category: audio -short_description: Introduces a benchmark for comparing speech representations on non-semantic tasks, and proposes a representation based on an unsupervised triplet-loss objective. ---- - -## Description - -The ultimate goal of transfer learning is to reduce labeled data requirements by exploiting a pre-existing embedding model trained for different datasets or tasks. The visual and language communities have established benchmarks to compare embeddings, but the speech community has yet to do so. This paper proposes a benchmark for comparing speech representations on non-semantic tasks, and proposes a representation based on an unsupervised triplet-loss objective. The proposed representation outperforms other representations on the benchmark, and even exceeds state-of-the-art performance on a number of transfer learning tasks. The embedding is trained on a publicly available dataset, and it is tested on a variety of low-resource downstream tasks, including personalization tasks and medical domain. The benchmark, models, and evaluation code are publicly released. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - - -## Example - -```python -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import TrillDistilled2Vec -model = TrillDistilled2Vec() -sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -model.encode(sample) -``` diff --git a/vectorhub/encoders/audio/tfhub/trill_distilled.py b/vectorhub/encoders/audio/tfhub/trill_distilled.py deleted file mode 100644 index 08ab14f8..00000000 --- a/vectorhub/encoders/audio/tfhub/trill_distilled.py +++ /dev/null @@ -1,36 +0,0 @@ -from datetime import date -from ....import_utils import * -from ....models_dict import * -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-audio-tfhub-trill']): - import tensorflow as tf - import tensorflow_hub as hub - import traceback -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ..base import BaseAudio2Vec - -TrillDistilledModelDefinition = ModelDefinition(markdown_filepath="encoders/audio/tfhub/trill_distilled") -__doc__ = TrillDistilledModelDefinition.create_docs() - -class TrillDistilled2Vec(BaseAudio2Vec): - definition = TrillDistilledModelDefinition - urls = { - 'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill-distilled/3': {'vector_length': 2048} - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/nonsemantic-speech-benchmark/trill-distilled/3', layer: str = 'embedding'): - self.model_url = model_url - self.layer = layer - self.model = hub.load(self.model_url) - self.model_name = model_url.replace( - 'https://tfhub.dev/google/', '').replace('/', '_') - self.vector_length = 2048 - - @catch_vector_errors - def encode(self, audio, vector_operation='mean', sample_rate=16000): - if isinstance(audio, str): - audio = self.read(audio) - return self._vector_operation(self.model(samples=audio, sample_rate=sample_rate)[self.layer], vector_operation) - - @catch_vector_errors - def bulk_encode(self, audios, vector_operation='mean'): - return [self.encode(audio, vector_operation=vector_operation) for audio in audios] diff --git a/vectorhub/encoders/audio/tfhub/vggish.md b/vectorhub/encoders/audio/tfhub/vggish.md deleted file mode 100644 index 88f037c5..00000000 --- a/vectorhub/encoders/audio/tfhub/vggish.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -model_id: "audio/vggish" -model_name: "VGGish" -vector_length: "128 (default)" -release_date: "2020-03-11" -repo: "https://tfhub.dev/google/vggish/1" -installation: "pip install vectorhub[encoders-audio-tfhub]" -category: audio -short_description: VGGish is a model for audio event embedding which uses the VGG-16 network and is trained on the YouTube-8M dataset. ---- - -## Description - -An audio event embedding model trained on the YouTube-8M dataset. -VGGish should be used: -- as a high-level feature extractor: the 128-D embedding output of VGGish can be used as the input features of another shallow model which can then be trained on a small amount of data for a particular task. This allows quickly creating specialized audio classifiers without requiring a lot of labeled data and without having to train a large model end-to-end. -- as a warm start: the VGGish model parameters can be used to initialize part of a larger model which allows faster fine-tuning and model exploration. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import Vggish2Vec -model = Vggish2Vec() -sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -model.encode(sample) -``` - -## Limitations - -VGGish has been trained on millions of YouTube videos and although these are very diverse, there can still be a domain -mismatch between the average YouTube video and the audio inputs expected for any given task. You should expect to do some -amount of fine-tuning and calibration to make VGGish usable in any system that you build. diff --git a/vectorhub/encoders/audio/tfhub/vggish.py b/vectorhub/encoders/audio/tfhub/vggish.py deleted file mode 100644 index fa632507..00000000 --- a/vectorhub/encoders/audio/tfhub/vggish.py +++ /dev/null @@ -1,37 +0,0 @@ -from datetime import date -from ....import_utils import is_all_dependency_installed -from ....models_dict import MODEL_REQUIREMENTS -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ..base import BaseAudio2Vec - -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-audio-tfhub-vggish']): - import tensorflow as tf - import tensorflow_hub as hub - - -VggishModelDefinition = ModelDefinition(markdown_filepath='encoders/audio/tfhub/vggish') - -__doc__ = VggishModelDefinition.create_docs() - -class Vggish2Vec(BaseAudio2Vec): - definition = VggishModelDefinition - urls = { - 'https://tfhub.dev/google/vggish/1': {'vector_length': 128} - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/vggish/1'): - self.model_url = model_url - self.model_name = self.model_url.replace( - 'https://tfhub.dev/google/', '').replace('/', '') - self.model = hub.load(self.model_url) - self.vector_length = 128 - - @catch_vector_errors - def encode(self, audio, vector_operation='mean'): - if isinstance(audio, str): - audio = self.read(audio) - return self._vector_operation(self.model(audio), vector_operation) - - @catch_vector_errors - def bulk_encode(self, audios, vector_operation='mean'): - return [self.encode(audio, vector_operation=vector_operation) for audio in audios] diff --git a/vectorhub/encoders/audio/tfhub/yamnet.md b/vectorhub/encoders/audio/tfhub/yamnet.md deleted file mode 100644 index a72cc6b4..00000000 --- a/vectorhub/encoders/audio/tfhub/yamnet.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -model_id: audio/yamnet -model_name: Yamnet -vector_length: "1024 (default)" -release_date: "2020-03-11" -repo: "https://tfhub.dev/google/yamnet/1" -installation: "pip install vectorhub[encoders-audio-tfhub]" -category: audio -short_description: YAMNet is a fast and accurate audio event classifier that can be used for a variety of audio tasks. ---- - -## Description - -YAMNet is an audio event classifier that takes audio waveform as input and makes independent predictions for each -of 521 audio events from the AudioSet ontology. The model uses the MobileNet v1 architecture and was trained using -the AudioSet corpus. This model was originally released in the TensorFlow Model Garden, where we have the model -source code, the original model checkpoint, and more detailed documentation. -This model can be used: - -- as a stand-alone audio event classifier that provides a reasonable baseline across a wide variety of audio events. -- as a high-level feature extractor: the 1024-D embedding output of YAMNet can be used as the input features of another shallow model which can then be trained on a small amount of data for a particular task. This allows quickly creating specialized audio classifiers without requiring a lot of labeled data and without having to train a large model end-to-end. -- as a warm start: the YAMNet model parameters can be used to initialize part of a larger model which allows faster fine-tuning and model exploration. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -``` -#pip install vectorhub[encoders-audio-tfhub] -from vectorhub.encoders.audio.tfhub import Yamnet2Vec -model = Yamnet2Vec() -sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav') -model.encode(sample) -``` - -## Limitations - -YAMNet's classifier outputs have not been calibrated across classes, so you cannot directly treat -the outputs as probabilities. For any given task, you will very likely need to perform a calibration with task-specific data -which lets you assign proper per-class score thresholds and scaling. -YAMNet has been trained on millions of YouTube videos and although these are very diverse, there can still be a domain mismatch -between the average YouTube video and the audio inputs expected for any given task. You should expect to do some amount of -fine-tuning and calibration to make YAMNet usable in any system that you build. diff --git a/vectorhub/encoders/audio/tfhub/yamnet.py b/vectorhub/encoders/audio/tfhub/yamnet.py deleted file mode 100644 index eda395c4..00000000 --- a/vectorhub/encoders/audio/tfhub/yamnet.py +++ /dev/null @@ -1,41 +0,0 @@ -from datetime import date -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-audio-tfhub-yamnet']): - import tensorflow as tf - import tensorflow_hub as hub - -from ..base import BaseAudio2Vec -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition - -YamnetModelDefinition = ModelDefinition(markdown_filepath='encoders/audio/tfhub/yamnet') -__doc__ = YamnetModelDefinition.create_docs() - -class Yamnet2Vec(BaseAudio2Vec): - definition = YamnetModelDefinition - urls = { - 'https://tfhub.dev/google/yamnet/1': {'vector_length': 1024} - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/yamnet/1'): - self.model_url = model_url - self.model = hub.load(self.model_url) - self.model_name = self.model_url.replace( - 'https://tfhub.dev/google/', '').replace('/', '_') - self.vector_length = 1024 - - @catch_vector_errors - def encode(self, audio, vector_operation='mean', layer='embeddings'): - if isinstance(audio, str): - audio = self.read(audio) - outputs = self.model(audio) - if layer == 'scores': - return self._vector_operation(outputs[0], vector_operation) - elif layer == 'log_mel_spectrogram': - return self._vector_operation(outputs[2], vector_operation) - else: - return self._vector_operation(outputs[1], vector_operation) - - @catch_vector_errors - def bulk_encode(self, audios, vector_operation='mean', layer='embeddings'): - return [self.encode(audio, vector_operation=vector_operation, layer=layer) for audio in audios] diff --git a/vectorhub/encoders/audio/vectorai/__init__.py b/vectorhub/encoders/audio/vectorai/__init__.py deleted file mode 100644 index 9c5475af..00000000 --- a/vectorhub/encoders/audio/vectorai/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -""" - The Vector AI deployed model -""" -from .vi_encoder import * diff --git a/vectorhub/encoders/audio/vectorai/vi_encoder.py b/vectorhub/encoders/audio/vectorai/vi_encoder.py deleted file mode 100644 index b04fcfba..00000000 --- a/vectorhub/encoders/audio/vectorai/vi_encoder.py +++ /dev/null @@ -1,63 +0,0 @@ -""" - Vector AI's deployed model. The purpose of this model is to - allow developers to easily build encodings and see for themselves - how the embedding works. These models are selected to work out-of-the-box - after testing for their success on our end. - - To get access to Vector AI, we need to use - - Example: - - >>> from vectorhub.text.encoder.vectorai import ViText2Vec - >>> model = ViText2Vec(username, api_key) - >>> model.encode("audio_file.wav") - -""" -import io -import base64 -import requests -from ..base import BaseAudio2Vec -from ....base import catch_vector_errors - -class ViAudio2Vec: - def __init__(self, username, api_key, url: str="https://api.vctr.ai", collection_name="base"): - """ - Request for a username and API key from gh.vctr.ai - Args: - Username and api_key: You can request a username and api key from vector AI Github package - using request_api_key method. - url: Url for Vector AI website. - collection_name: Not necessary for users. - - """ - self.username = username - self.api_key = api_key - self.url = url - self.collection_name = collection_name - self._name = "default" - - @property - def vector_length(self): - return 512 - - @catch_vector_errors - def encode(self, audio): - return requests.get( - url="{}/collection/encode_audio".format(self.url), - params={ - "username": self.username, - "api_key": self.api_key, - "collection_name": self.collection_name, - "audio_url": audio, - }, - ).json() - - @property - def __name__(self): - if self._name is None: - return "vectorai_audio" - return self._name - - @__name__.setter - def __name__(self, value): - self._name = value diff --git a/vectorhub/encoders/code/__init__.py b/vectorhub/encoders/code/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/vectorhub/encoders/code/transformers/__init__.py b/vectorhub/encoders/code/transformers/__init__.py deleted file mode 100644 index d2c74987..00000000 --- a/vectorhub/encoders/code/transformers/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -""" -Module for all transformers-based Code2Vec models. -""" -from .codebert import * \ No newline at end of file diff --git a/vectorhub/encoders/code/transformers/codebert.md b/vectorhub/encoders/code/transformers/codebert.md deleted file mode 100644 index 8aa74fbd..00000000 --- a/vectorhub/encoders/code/transformers/codebert.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -model_id: "text/codebert" -model_name: "CodeBert" -vector_length: "768 (default)" -paper: "https://arxiv.org/abs/2002.08155" -installation: "pip install vectorhub[encoders-code-transformers]" -release_date: "2020-02-19" -category: text -repo: https://github.com/microsoft/CodeBERT -short_description: CodeBERT learns general-purpose representations that support downstream NL-PL applications such as natural language codesearch, code documentation generation, etc. ---- - -## Description - -We present CodeBERT, a bimodal pre-trained model for programming language (PL) and nat-ural language (NL). CodeBERT learns general-purpose representations that support downstream NL-PL applications such as natural language codesearch, code documentation generation, etc. We develop CodeBERT with Transformer-based neural architecture, and train it with a hybrid objective function that incorporates the pre-training task of replaced token detection, which is to detect plausible alternatives sampled from generators. This enables us to utilize both bimodal data of NL-PL pairs and unimodal data, where the former provides input tokens for model training while the latter helps to learn better generators. We evaluate CodeBERT on two NL-PL applications by fine-tuning model parameters. Results show that CodeBERT achieves state-of-the-art performance on both natural language code search and code documentation generation tasks. Furthermore, to investigate what type of knowledge is learned in CodeBERT, we construct a dataset for NL-PL probing, and evaluate in a zero-shot setting where parameters of pre-trained models are fixed. Results show that CodeBERT performs better than previous pre-trained models on NL-PL probing. - -## Example - -```python -#pip install vectorhub[encoders-code-transformers] -from vectorhub.encoders.code.transformers import Code2Vec -model = Code2Vec() -sample = model.encode('import pandas as pd') -``` - diff --git a/vectorhub/encoders/code/transformers/codebert.py b/vectorhub/encoders/code/transformers/codebert.py deleted file mode 100644 index 24ebade6..00000000 --- a/vectorhub/encoders/code/transformers/codebert.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -CodeBert model -""" -from typing import List -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import is_all_dependency_installed -from ...text.base import BaseText2Vec - -if is_all_dependency_installed('encoders-code-transformers'): - import torch - from transformers import RobertaTokenizer, RobertaConfig, RobertaModel - -CodeBertModelDefinition = ModelDefinition(markdown_filepath="encoders/code/transformers/codebert") -__doc__ = CodeBertModelDefinition.create_docs() - -class Code2Vec(BaseText2Vec): - definition = CodeBertModelDefinition - urls = { - 'microsoft/codebert-base': {'vector_length': 768} - } - def __init__(self, model_name='microsoft/codebert-base'): - # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self.tokenizer = RobertaTokenizer.from_pretrained(model_name) - self.model = RobertaModel.from_pretrained(model_name) - self.vector_length = self.urls[model_name] - - @catch_vector_errors - def encode(self, description: str, code: str=None, pooling_method='mean', truncation=True): - """ - Pooling method is either pooler_output or mean. - Notes: if it is mean, we can take the last hidden state and add it to the - model. - Args: - Description: The description of what the code is doing - Code: What the code is doing. - Pooling_method: Pooling method can be either mean or pooled output. - Truncation: Whether the the sentence should be truncated. - """ - if pooling_method == 'pooler_output': - return self.model.forward(**self.tokenizer.encode_plus( - description, code, return_tensors='pt', truncation=truncation - ))[pooling_method].detach().numpy().tolist()[0] - elif pooling_method == 'mean': - return self._vector_operation(self.model.forward(**self.tokenizer.encode_plus( - description, code, return_tensors='pt', truncation=truncation - ))['last_hidden_state'].detach().numpy().tolist(), 'mean', axis=1)[0] - - @catch_vector_errors - def bulk_encode(self, descriptions: List[str], codes: List[str]=None, pooling_method: str='mean', truncation=True): - """ - Pooling method is either pooler_output or mean. - Notes: if it is mean, we can take the last hidden state and add it to the - model. - Args: - Pooling_method: Pooling method can be either mean or pooled output. - Truncation: Whether the the sentence should be truncated. - """ - if pooling_method == 'pooler_output': - return self.model.forward(**self.tokenizer.encode_plus( - descriptions, codes, return_tensors='pt', truncation=truncation - ))[pooling_method].detach().numpy().tolist() - elif pooling_method == 'mean': - return self._vector_operation(self.model.forward(**self.tokenizer.encode_plus( - descriptions, codes, return_tensors='pt', truncation=truncation - ))['last_hidden_state'].detach().numpy().tolist(), 'mean', axis=1) - - @property - def __name__(self): - return "codebert" diff --git a/vectorhub/encoders/face/__init__.py b/vectorhub/encoders/face/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/vectorhub/encoders/face/tf/__init__.py b/vectorhub/encoders/face/tf/__init__.py deleted file mode 100644 index c2589a8c..00000000 --- a/vectorhub/encoders/face/tf/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .face2vec import * diff --git a/vectorhub/encoders/face/tf/face2vec.md b/vectorhub/encoders/face/tf/face2vec.md deleted file mode 100644 index 7f6da181..00000000 --- a/vectorhub/encoders/face/tf/face2vec.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -model_id: "image/face2vec" -model_name: "Face2Vec (FaceNet)" -vector_length: "512 (default)" -paper: "https://arxiv.org/pdf/1503.03832.pdf" -repo: "N/A" -installation: "pip install vectorhub[encoders-image-tf-face-detection]" -release_date: "2015-03-12" -category: image -short_description: Deep convolutional neural networks are used to learn a mapping from face images to a compact Euclidean space where distances directly correspond to a measure of face similarity. This space can then be used for a wide range of face processing tasks such as recognition, verification and clustering. ---- - -## Description - -Despite significant recent advances in the field of face recognition, implementing face verification and recognition efficiently at scale presents serious challenges to current approaches. In this paper we present a system, called FaceNet, that directly learns a mapping from face images to a compact Euclidean space where distances directly correspond to a measure of face similarity. Once this space has been produced, tasks such as face recognition, verification and clustering can be easily implemented using standard techniques with FaceNet embeddings as feature vectors. -Our method uses a deep convolutional network trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. The benefit of our approach is much greater representational efficiency: we achieve state-of-the-art face recognition performance using only 128-bytes per face. -On the widely used Labeled Faces in the Wild (LFW) dataset, our system achieves a new record accuracy of 99.63%. On YouTube Faces DB it achieves 95.12%. Our system cuts the error rate in comparison to the best published result by 30% on both datasets. -We also introduce the concept of harmonic embeddings, and a harmonic triplet loss, which describe different versions of face embeddings (produced by different networks) that are compatible to each other and allow for direct comparison between each other. - -## Example - -```python -#pip install vectorhub[encoders-image-tf-face-detection] -from vectorhub.encoders.face.tf import Face2Vec -model = Face2Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -``` diff --git a/vectorhub/encoders/face/tf/face2vec.py b/vectorhub/encoders/face/tf/face2vec.py deleted file mode 100644 index aa78ba9b..00000000 --- a/vectorhub/encoders/face/tf/face2vec.py +++ /dev/null @@ -1,131 +0,0 @@ -""" - Face2Vec Embedding. -""" -import os -from typing import Union -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ...image.base import BaseImage2Vec - -if is_all_dependency_installed("encoders-image-tf-face-detection"): - import tensorflow as tf - if hasattr(tf, 'executing_eagerly'): - if not tf.executing_eagerly(): - tf.compat.v1.enable_eager_execution() - import appdirs - import cv2 - import numpy as np - import requests - from mtcnn.mtcnn import MTCNN - from numpy import asarray - from PIL import Image - from keras.models import load_model - -FaceNetModelDefinition = ModelDefinition(markdown_filepath="encoders/face/tf/face2vec") -__doc__ = FaceNetModelDefinition.create_docs() - -class Face2Vec(BaseImage2Vec): - definition = FaceNetModelDefinition - def __init__(self, model_url: str = 'https://drive.google.com/u/0/uc?id=1PZ_6Zsy1Vb0s0JmjEmVd8FS99zoMCiN1&export=download', redownload=True): - if not os.path.exists(self.model_path) or redownload: - self._download_model(model_url) - self.vector_length = self.urls[model_url]["vector_length"] - self.model = load_model(self.model_path) - - def _download_model(self, model_url): - response = requests.get(model_url) - if response.status_code != 200: - raise Exception(response.content) - with open(self.model_path, 'wb') as f: - f.write(response.content) - - @property - def model_path(self): - return os.path.join(self.cache_dir, 'facenet.h5') - - @property - def cache_dir(self): - return appdirs.user_cache_dir() - - @property - def urls(self): - """ - A simple dictionary with urls and their vector lengths - """ - return {'https://drive.google.com/u/0/uc?id=1PZ_6Zsy1Vb0s0JmjEmVd8FS99zoMCiN1&export=download': {'vector_length': 128}} - - def extract_face(self, image_input, reshape_size=None): - if isinstance(image_input, str): - pixels = self.read(image_input) - elif isinstance(image_input, np.ndarray): - pixels = image_input - - # create the detector, using default weights - detector = MTCNN() - # detect faces in the image - results = detector.detect_faces(pixels) - - # extract the bounding box from the first face - x1, y1, width, height = results[0]['box'] - # bug fix - x1, y1 = abs(x1), abs(y1) - x2, y2 = x1 + width, y1 + height - - # extract the face - face = pixels[y1:y2, x1:x2] - - # resize pixels to the model size - image = Image.fromarray(face) - if reshape_size is not None: - image = image.resize(reshape_size) - face_array = asarray(image) - return face_array - - def show_face_landmarks(self, image_filename: str): - """ - Show face landmarks - """ - detector = MTCNN() - - # image = cv2.cvtColor(cv2.imread("rose.jpeg"), cv2.COLOR_BGR2RGB) - image = self.extract_face(image_filename) - result = detector.detect_faces(image) - bounding_box = result[0]['box'] - keypoints = result[0]['keypoints'] - - cv2.rectangle(image, - (bounding_box[0], bounding_box[1]), - (bounding_box[0]+bounding_box[2], bounding_box[1] + bounding_box[3]), - (0,155,255), 2) - cv2.circle(image,(keypoints['left_eye']), 2, (0,155,255), 2) - cv2.circle(image,(keypoints['right_eye']), 2, (0,155,255), 2) - cv2.circle(image,(keypoints['nose']), 2, (0,155,255), 2) - cv2.circle(image,(keypoints['mouth_left']), 2, (0,155,255), 2) - cv2.circle(image,(keypoints['mouth_right']), 2, (0,155,255), 2) - return plt.imshow(image) - - def standardise_image(self, face_pixels): - """ - Standardise the image for face pixels. - """ - face_pixels = face_pixels.astype('float32') - # standardize pixel values across channels (global) - mean, std = face_pixels.mean(), face_pixels.std() - face_pixels = (face_pixels - mean) / std - return tf.expand_dims(face_pixels, axis=0) - - @catch_vector_errors - def encode(self, image): - if isinstance(image, (np.ndarray, str)): - image = self.standardise_image(self.extract_face(image, reshape_size=(160, 160))) - return self.model.predict([image]).tolist()[0] - - @catch_vector_errors - def bulk_encode(self, images): - """ - Bulk encode. Chunk size should be specified outside of the images. - """ - # TODO: Change from list comprehension to properly read - return [self.encode(x) for x in images] diff --git a/vectorhub/encoders/image/__init__.py b/vectorhub/encoders/image/__init__.py deleted file mode 100644 index 9b5ed21c..00000000 --- a/vectorhub/encoders/image/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .base import * diff --git a/vectorhub/encoders/image/base.py b/vectorhub/encoders/image/base.py deleted file mode 100644 index 93d26d5d..00000000 --- a/vectorhub/encoders/image/base.py +++ /dev/null @@ -1,92 +0,0 @@ -import requests -from requests.exceptions import MissingSchema -from typing import Union -from ...base import Base2Vec -from ...import_utils import is_all_dependency_installed -# TODO: Change encoders-image-tfhub into general encoders-image -if is_all_dependency_installed('encoders-image'): - import io - import imageio - import numpy as np - import matplotlib.pyplot as plt - from urllib.request import urlopen, Request - from skimage import transform - from PIL import Image - -class BaseImage2Vec(Base2Vec): - def read(self, image: str): - """ - An method to read images (converting them into NumPy arrays) - Args: - image: An image link/bytes/io Bytesio data format. - as_gray: read in the image as black and white - """ - if type(image) is str: - if 'http' in image: - try: - b = io.BytesIO(urlopen(Request( - image, headers={'User-Agent': "Mozilla/5.0"})).read()) - except: - import tensorflow as tf - return tf.image.decode_jpeg(requests.get(image).content, channels=3, name="jpeg_reader").numpy() - else: - b = image - elif type(image) is bytes: - b = io.BytesIO(image) - elif type(image) is io.BytesIO: - b = image - else: - raise ValueError("Cannot process data type. Ensure it is is string/bytes or BytesIO.") - try: - return np.array(imageio.imread(b, pilmode="RGB")) - except: - return np.array(imageio.imread(b)[:, :, :3]) - - def is_greyscale(self, img_path: str): - """Determine if an image is grayscale or not - """ - try: - img = Image.open(requests.get(img_path, stream=True).raw) - except MissingSchema: - img = Image.open(img_path) - img = img.convert('RGB') - w, h = img.size - for i in range(w): - for j in range(h): - r, g, b = img.getpixel((i,j)) - if r != g != b: - return False - return True - - def to_grayscale(self, sample, rgb_weights: list=None): - """ - Converting an image from RGB to Grayscale - """ - if rgb_weights is None: - return np.repeat(np.dot(sample[...,:3], self.rgb_weights)[..., np.newaxis], 3, -1) - else: - return np.repeat(np.dot(sample[...,:3], rgb_weights)[..., np.newaxis], 3, -1) - - @property - def rgb_weights(self): - """ - Get RGB weights for grayscaling. - """ - return [0.2989, 0.5870, 0.1140] - - def show_image(self, sample, cmap=None, is_grayscale=True): - """ - Show an image once it is read. - Arg: - sample: Image that is read (numpy array) - """ - if is_grayscale: - return plt.imshow(sample, cmap=plt.get_cmap("gray")) - return plt.imshow(sample, cmap=cmap) - - def image_resize(self, image_array, width=0, height=0, rescale=0, resize_mode='symmetric'): - if width and height: - image_array = transform.resize(image_array, (width, height), mode=resize_mode, preserve_range=True) - if rescale: - image_array = transform.rescale(image_array, rescale, preserve_range=True, anti_aliasing=True) - return np.array(image_array) diff --git a/vectorhub/encoders/image/cv2/color.md b/vectorhub/encoders/image/cv2/color.md deleted file mode 100644 index aa3934b4..00000000 --- a/vectorhub/encoders/image/cv2/color.md +++ /dev/null @@ -1,35 +0,0 @@ - ---- -model_id: "image/color" -model_name: "Color" -vector_length: "768" -paper: "" -repo: "" -installation: "pip install vectorhub" -release_date: "" -category: image -short_description: Color Encoder ---- - -## Description - -Extracts the color from an image into vectors. It breaks down the color distribution in images. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.cv2 import ColorEncoder -model = ColorEncoder() -vector = model.encode('https://getvectorai.com/assets/hub-logo-with-text.png') -``` diff --git a/vectorhub/encoders/image/cv2/color.py b/vectorhub/encoders/image/cv2/color.py deleted file mode 100644 index 8763d165..00000000 --- a/vectorhub/encoders/image/cv2/color.py +++ /dev/null @@ -1,43 +0,0 @@ -import urllib -from typing import List, Union -from ...base import BaseImage2Vec -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-image-cv2']): - import cv2 - import numpy as np - -CV2ModelDefinition = ModelDefinition(markdown_filepath='encoders/image/cv2/color.md') -__doc__ = CV2ModelDefinition.create_docs() - -class ColorEncoder: - definition = CV2ModelDefinition - def __init__(self): - pass - - @property - def __name__(self): - return "color" - - def encode(self, image_url, return_rgb: bool=False): - try: - req = urllib.request.urlopen(image_url) - arr = np.asarray(bytearray(req.read()), dtype=np.uint8) - img = cv2.imdecode(arr, -1) - except: - img = cv2.imread(image_url) - color = ('b','g','r') - if return_rgb: - histr = {} - for i,col in enumerate(color): - histr[col] = cv2.calcHist([img],[i],None,[256],[0,256]).T.tolist()[0] - return histr - histr = [] - for i,col in enumerate(color): - histr.extend(cv2.calcHist([img],[i],None,[256],[0,256]).T.tolist()[0]) - return histr - - def bulk_encode(self, image_urls: List[str]): - return [self.encode(image_url) for image_url in image_urls] diff --git a/vectorhub/encoders/image/fastai/__init__.py b/vectorhub/encoders/image/fastai/__init__.py deleted file mode 100644 index 09622d6e..00000000 --- a/vectorhub/encoders/image/fastai/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .base import * -from .resnet import * \ No newline at end of file diff --git a/vectorhub/encoders/image/fastai/base.py b/vectorhub/encoders/image/fastai/base.py deleted file mode 100644 index af8b0360..00000000 --- a/vectorhub/encoders/image/fastai/base.py +++ /dev/null @@ -1,49 +0,0 @@ -""" - The base class for FastAI as much of it can be replaced easily by changing the model. -""" -from abc import abstractproperty -from ..base import BaseImage2Vec -from ....import_utils import is_all_dependency_installed -from ....base import catch_vector_errors - -if is_all_dependency_installed('encoders-image-fastai'): - import torch - import numpy as np - # We use wildcard imports for FastAI as this is the way it is handled in the documentation. - from fastai.vision.all import cnn_learner, resnet34, ImageDataLoaders, error_rate, hook_outputs - from fastai.data.all import get_image_files, untar_data, URLs - -class FastAIBase(BaseImage2Vec): - def __init__(self, databunch=None, architecture=None): - self.databunch = databunch - self.architecture = architecture - self._create_learner(databunch, architecture) - - def _instantiate_empty_dataloader(self): - """ - As it is almost impossible to instantiate an empty dataloader, we use a CIFAR as a dummy. - """ - path = untar_data(URLs.CIFAR_100) - files = get_image_files(path/"train") - return ImageDataLoaders.from_lists(path, files, [0] * len(files)) - - def _create_learner(self): - dls = self._instantiate_empty_dataloader() - self.learn = cnn_learner(dls, resnet34, metrics=error_rate) - - @abstractproperty - def extraction_layer(self): - pass - - @catch_vector_errors - def encode(self, image): - with hook_outputs(self.extraction_layer) as h: - if isinstance(image, str): - y = self.learn.predict(self.read(image)) - elif isinstance(image, (np.ndarray, np.generic)): - y = self.learn.predict(image) - return h.stored[0].cpu().numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode(self, images): - return [self.encode(x) for x in images] diff --git a/vectorhub/encoders/image/fastai/resnet.py b/vectorhub/encoders/image/fastai/resnet.py deleted file mode 100644 index 625757ca..00000000 --- a/vectorhub/encoders/image/fastai/resnet.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -FastAI Resnet model -""" - -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import is_all_dependency_installed -from ..base import BaseImage2Vec -from .base import FastAIBase - -if is_all_dependency_installed('encoders-image-fastai'): - from fastai.vision.all import (resnet18, resnet34, resnet50, resnet101, resnet152, - squeezenet1_0, squeezenet1_1, densenet121, vgg16_bn, alexnet) - -FastAIResnetModelDefinition = ModelDefinition(markdown_filepath="encoders/image/fastai/resnet_fastai") -__doc__ = FastAIResnetModelDefinition.create_docs() - -class FastAIResnet2Vec(FastAIBase): - definition = FastAIResnetModelDefinition - def __init__(self, architecture='resnet34', databunch=None): - """ - Refer to possible_architectures method for reference to which architectures can be instantiated. - Args: - Architecture: The name of the architecture - Databunch: A FastAI Data collection data type that is used to instantiate a learner object. - """ - self.databunch = databunch - self.architecture = self.architecture_mappings[architecture] - self._create_learner() - - @property - def possible_architectures(self): - return list(self.architecture_mappings.keys()) - - @property - def architecture_mappings(self): - """ - Architecture mappings - """ - return { - 'resnet18': resnet18, - 'resnet34': resnet34, - 'resnet50': resnet50, - 'resnet101': resnet101, - 'resnet152': resnet152, - 'squeezenet1_0': squeezenet1_0, - 'squeezenet1_1': squeezenet1_1, - 'densenet121': densenet121, - 'vgg16_bn': vgg16_bn, - 'alexnet': alexnet - } - - @property - def extraction_layer(self): - """ - Here we selected the default to be layer_num 1 to extract the layer with the highest number of dimensions - after it has been flattened. - """ - return [self.learn.model[1][1]] diff --git a/vectorhub/encoders/image/fastai/resnet_fastai.md b/vectorhub/encoders/image/fastai/resnet_fastai.md deleted file mode 100644 index 5bbb2d26..00000000 --- a/vectorhub/encoders/image/fastai/resnet_fastai.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -model_id: "image/resnet-fastai" -model_name: "ResNet" -vector_length: "1024 (default)" -paper: "https://arxiv.org/abs/1512.03385" -installation: "pip install vectorhub[encoders-image-fastai]" -release_date: "2015-12-10" -category: image -short_description: Residual networks are 8x deeper than VGG nets but achieves lower complexity. Then, the residual network was able to achieve 3.57% error on the ImageNet dataset. ---- - -## Description - -Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers---8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. - -## Example - -```python -#pip install vectorhub[encoders-image-fastai] -from vectorhub.encoders.image.fastai import FastAIResnet2Vec -model = FastAIResnet2Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -``` diff --git a/vectorhub/encoders/image/tfhub/__init__.py b/vectorhub/encoders/image/tfhub/__init__.py deleted file mode 100644 index 643fc1e1..00000000 --- a/vectorhub/encoders/image/tfhub/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .bit import * -from .bit_medium import * -from .inceptionv1 import * -from .inceptionv2 import * -from .inceptionv3 import * -from .inception_resnet import * -from .mobilenet import * -from .mobilenetv2 import * -from .resnet import * -from .resnetv2 import * diff --git a/vectorhub/encoders/image/tfhub/bit.md b/vectorhub/encoders/image/tfhub/bit.md deleted file mode 100644 index b8dc1248..00000000 --- a/vectorhub/encoders/image/tfhub/bit.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -model_id: "image/bit" -model_name: "BiT - Big Transfer, General Visual Representation Learning (Small)" -vector_length: "2048 (default)" -paper: "https://arxiv.org/abs/1912.11370" -repo: "https://github.com/google-research/big_transfer" -installation: "pip install vectorhub[encoders-image-tfhub]" -release_date: "2019-12-24" -category: image -short_description: BiT achieves strong transfer performance across a wide range of datasets, and does so by applying a simple recipe in a manner that allows for efficient scaling up to very large datasets. ---- - -## Description - -Transfer of pre-trained representations improves sample efficiency and simplifies hyperparameter tuning when training -deep neural networks for vision. We revisit the paradigm of pre-training on large supervised datasets and fine-tuning the model -on a target task. We scale up pre-training, and propose a simple recipe that we call Big Transfer (BiT). By combining a few carefully -selected components, and transferring using a simple heuristic, we achieve strong performance on over 20 datasets. BiT performs well across -a surprisingly wide range of data regimes -- from 1 example per class to 1M total examples. BiT achieves 87.5% top-1 accuracy on ILSVRC-2012, 99.4% on CIFAR-10, and 76.3% on the 19 task Visual Task Adaptation Benchmark (VTAB). On small datasets, BiT attains 76.8% on -ILSVRC-2012 with 10 examples per class, and 97.0% on CIFAR-10 with 10 examples per class. We conduct detailed analysis -of the main components that lead to high transfer performance. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import BitSmall2Vec -model = BitSmall2Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -``` diff --git a/vectorhub/encoders/image/tfhub/bit.py b/vectorhub/encoders/image/tfhub/bit.py deleted file mode 100644 index 5fa98d22..00000000 --- a/vectorhub/encoders/image/tfhub/bit.py +++ /dev/null @@ -1,48 +0,0 @@ -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ..base import BaseImage2Vec - -if is_all_dependency_installed('encoders-image-tfhub'): - import tensorflow as tf - import tensorflow_hub as hub - -BITModelDefinition = ModelDefinition(markdown_filepath='encoders/image/tfhub/bit') - -__doc__ = BITModelDefinition.create_docs() - -class BitSmall2Vec(BaseImage2Vec): - definition = BITModelDefinition - urls = { - 'https://tfhub.dev/google/bit/s-r50x1/1': {"vector_length":2048}, # 2048 output shape - 'https://tfhub.dev/google/bit/s-r50x3/1': {"vector_length":6144}, # 6144 output shape - 'https://tfhub.dev/google/bit/s-r101x1/1': {"vector_length":2048}, # 2048 output shape - 'https://tfhub.dev/google/bit/s-r101x3/1': {"vector_length":6144}, # 6144 output shape - 'https://tfhub.dev/google/bit/s-r152x4/1': {"vector_length":8192}, # 8192 output shape - } - - def __init__(self, model_url: str = "https://tfhub.dev/google/bit/s-r50x1/1"): - self.validate_model_url(model_url, list(self.urls.keys())) - self.init(model_url) - self.vector_length = self.urls[model_url]["vector_length"] - - def init(self, model_url: str): - self.model_url = model_url - self.model_name = self.model_url.replace( - 'https://tfhub.dev/google/', '').replace('/', '_') - self.model = hub.load(self.model_url) - - @catch_vector_errors - def encode(self, image): - if isinstance(image, str): - image = self.read(image) - return self.model([image]).numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode(self, images): - """ - Bulk encode. Chunk size should be specified outside of the images. - """ - # TODO: Change from list comprehension to properly read - return [self.encode(x) for x in images] diff --git a/vectorhub/encoders/image/tfhub/bit_medium.md b/vectorhub/encoders/image/tfhub/bit_medium.md deleted file mode 100644 index 21f94a35..00000000 --- a/vectorhub/encoders/image/tfhub/bit_medium.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -model_id: "image/bit-medium" -model_name: "BiT Medium - Big Transfer, General Visual Representation Learning (Medium)" -vector_length: "2048 (default)" -paper: "https://arxiv.org/abs/1912.11370" -repo: "https://github.com/google-research/big_transfer" -installation: "pip install vectorhub[encoders-image-tfhub]" -release_date: "2019-12-24" -category: image -short_description: BiT achieves strong transfer performance across a wide range of datasets, and does so by applying a simple recipe in a manner that allows for efficient scaling up to very large datasets. ---- - -## Description - -Transfer of pre-trained representations improves sample efficiency and simplifies hyperparameter tuning when training -deep neural networks for vision. We revisit the paradigm of pre-training on large supervised datasets and fine-tuning the model -on a target task. We scale up pre-training, and propose a simple recipe that we call Big Transfer (BiT). By combining a few carefully -selected components, and transferring using a simple heuristic, we achieve strong performance on over 20 datasets. BiT performs well across -a surprisingly wide range of data regimes -- from 1 example per class to 1M total examples. BiT achieves 87.5% top-1 accuracy on ILSVRC-2012, -99.4% on CIFAR-10, and 76.3% on the 19 task Visual Task Adaptation Benchmark (VTAB). On small datasets, BiT attains 76.8% on -ILSVRC-2012 with 10 examples per class, and 97.0% on CIFAR-10 with 10 examples per class. We conduct detailed analysis -of the main components that lead to high transfer performance. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import BitMedium2Vec -model = BitMedium2Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -``` diff --git a/vectorhub/encoders/image/tfhub/bit_medium.py b/vectorhub/encoders/image/tfhub/bit_medium.py deleted file mode 100644 index ecfceeb4..00000000 --- a/vectorhub/encoders/image/tfhub/bit_medium.py +++ /dev/null @@ -1,29 +0,0 @@ -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ..base import BaseImage2Vec -from .bit import BitSmall2Vec - -if is_all_dependency_installed('encoders-image-tfhub'): - import tensorflow as tf - import tensorflow_hub as hub - import traceback - -BITMediumModelDefinition = ModelDefinition(markdown_filepath="encoders/image/tfhub/bit_medium") -__doc__ = BITMediumModelDefinition.create_docs() - -class BitMedium2Vec(BitSmall2Vec): - definition = BITMediumModelDefinition - urls = { - 'https://tfhub.dev/google/bit/m-r50x1/1': {"vector_length":2048}, # 2048 output shape - 'https://tfhub.dev/google/bit/m-r50x3/1': {"vector_length":6144}, # 6144 output shape - 'https://tfhub.dev/google/bit/m-r101x1/1': {"vector_length":2048}, # 2048 output shape - 'https://tfhub.dev/google/bit/m-r101x3/1': {"vector_length":6144}, # 6144 output shape - 'https://tfhub.dev/google/bit/m-r152x4/1': {"vector_length":8192}, # 8192 output shape - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/bit/m-r50x1/1'): - self.validate_model_url(model_url, list(self.urls.keys())) - self.init(model_url) - self.vector_length = self.urls[model_url]["vector_length"] - diff --git a/vectorhub/encoders/image/tfhub/inception_resnet.md b/vectorhub/encoders/image/tfhub/inception_resnet.md deleted file mode 100644 index b9094a79..00000000 --- a/vectorhub/encoders/image/tfhub/inception_resnet.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -model_id: "image/inception-resnet" -model_name: "Inception Resnet" -vector_length: "1536 (default)" -paper: "https://arxiv.org/abs/1602.07261" -repo: "https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4" -installation: "pip install vectorhub[encoders-image-tfhub]" -release_date: "2016-02-23" -category: image -short_description: Residual connections improve the performance of deep convolutional neural networks, especially for large networks, and they improve the performance of Inception networks more than other deep network architectures. ---- - -## Description - -Very deep convolutional networks have been central to the largest advances in image recognition performance in -recent years. One example is the Inception architecture that has been shown to achieve very good performance at -relatively low computational cost. Recently, the introduction of residual connections in conjunction with a more traditional -architecture has yielded state-of-the-art performance in the 2015 ILSVRC challenge; its performance was similar to the latest -generation Inception-v3 network. This raises the question of whether there are any benefit in combining the Inception architecture -with residual connections. Here we give clear empirical evidence that training with residual connections accelerates the training -of Inception networks significantly. There is also some evidence of residual Inception networks outperforming similarly expensive -Inception networks without residual connections by a thin margin. We also present several new streamlined architectures for both -residual and non-residual Inception networks. These variations improve the single-frame recognition performance on the ILSVRC 2012 -classification task significantly. We further demonstrate how proper activation scaling stabilizes the training of very wide residual -Inception networks. With an ensemble of three residual and one Inception-v4, we achieve 3.08 percent top-5 error on the test set of the -ImageNet classification (CLS) challenge. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import InceptionResnet2Vec -model = InceptionResnet2Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -``` diff --git a/vectorhub/encoders/image/tfhub/inception_resnet.py b/vectorhub/encoders/image/tfhub/inception_resnet.py deleted file mode 100644 index 368044b5..00000000 --- a/vectorhub/encoders/image/tfhub/inception_resnet.py +++ /dev/null @@ -1,41 +0,0 @@ -from typing import List -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import is_all_dependency_installed -from ..base import BaseImage2Vec -if is_all_dependency_installed('encoders-image-tfhub'): - import tensorflow as tf - import tensorflow_hub as hub - import traceback - -InceptionResnetModelDefinition = ModelDefinition(markdown_filepath='encoders/image/tfhub/inception_resnet') - -__doc__ = InceptionResnetModelDefinition.create_docs() - -class InceptionResnet2Vec(BaseImage2Vec): - definition = InceptionResnetModelDefinition - urls = {"https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4": {"vector_length": 1536}} - def __init__(self, model_url="https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4"): - self.model_url = model_url - self.model_name = self.model_url.replace( - 'https://tfhub.dev/google/', '').replace('/', '_') - self.model = hub.load(self.model_url) - self.vector_length = 1536 - - @catch_vector_errors - def encode(self, image): - """ - Encode an image using InceptionResnet. - - Example: - >>> from vectorhub.image.encoder.tfhub import inception_resnet - >>> model = InceptionResnet2Vec(username, api_key) - >>> model.encode("Hey!") - """ - if isinstance(image, str): - image = self.read(image) - return self.model([image]).numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode(self, images): - return [self.encode(x) for x in images] diff --git a/vectorhub/encoders/image/tfhub/inceptionv1.md b/vectorhub/encoders/image/tfhub/inceptionv1.md deleted file mode 100644 index 84c54bb7..00000000 --- a/vectorhub/encoders/image/tfhub/inceptionv1.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -model_id: image/inceptionv1 -model_name: Inception V1 -vector_length: "1024 (default)" -paper: "https://arxiv.org/abs/1409.4842" -repo: 'https://tfhub.dev/google/imagenet/inception_v1/feature_vector/4' -installation: "pip install vectorhub[encoders-image-tfhub]" -release_date: "2014-09-17" -category: image -short_description: A deeper and wider neural network architecture, codenamed Inception, which is responsible for setting the state of the art for classification and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014. ---- - -## Description - -We propose a deep convolutional neural network architecture codenamed "Inception", which was responsible for setting the new state of the art for classification and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014 (ILSVRC 2014). The main hallmark of this architecture is the improved utilization of the computing resources inside the network. This was achieved by a carefully crafted design that allows for increasing the depth and width of the network while keeping the computational budget constant. To optimize quality, the architectural decisions were based on the Hebbian principle and the intuition of multi-scale processing. One particular incarnation used in our submission for ILSVRC 2014 is called GoogLeNet, a 22 layers deep network, the quality of which is assessed in the context of classification and detection. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import InceptionV12Vec -model = InceptionV22Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -``` diff --git a/vectorhub/encoders/image/tfhub/inceptionv1.py b/vectorhub/encoders/image/tfhub/inceptionv1.py deleted file mode 100644 index 79be8b70..00000000 --- a/vectorhub/encoders/image/tfhub/inceptionv1.py +++ /dev/null @@ -1,45 +0,0 @@ -""" - Abstract from: https://arxiv.org/abs/1409.4842 - - We propose a deep convolutional neural network architecture codenamed "Inception", which was responsible for setting the new state of the art for classification and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014 (ILSVRC 2014). The main hallmark of this architecture is the improved utilization of the computing resources inside the network. This was achieved by a carefully crafted design that allows for increasing the depth and width of the network while keeping the computational budget constant. To optimize quality, the architectural decisions were based on the Hebbian principle and the intuition of multi-scale processing. One particular incarnation used in our submission for ILSVRC 2014 is called GoogLeNet, a 22 layers deep network, the quality of which is assessed in the context of classification and detection. - -""" -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ..base import BaseImage2Vec - -if is_all_dependency_installed('encoders-image-tfhub'): - import tensorflow as tf - import tensorflow_hub as hub - -InceptionModelDefinition = ModelDefinition(markdown_filepath='encoders/image/tfhub/inceptionv1') - -__doc__ = InceptionModelDefinition.create_docs() - -class InceptionV12Vec(BaseImage2Vec): - definition = InceptionModelDefinition - urls = { - 'https://tfhub.dev/google/imagenet/inception_v1/feature_vector/4': {'vector_length': 1024} - } - - def __init__(self, model_url: str = 'https://tfhub.dev/google/imagenet/inception_v1/feature_vector/4'): - self.init(model_url) - self.vector_length = 1024 - - def init(self, model_url: str): - self.model_url = model_url - self.model_name = self.model_url.replace( - 'https://tfhub.dev/google/', '').replace('/', '_') - self.model = hub.load(self.model_url) - - @catch_vector_errors - def encode(self, image): - if isinstance(image, str): - image = self.read(image) - return self.model([image]).numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode(self, images): - return [self.encode(x) for x in images] diff --git a/vectorhub/encoders/image/tfhub/inceptionv2.md b/vectorhub/encoders/image/tfhub/inceptionv2.md deleted file mode 100644 index f154cd3a..00000000 --- a/vectorhub/encoders/image/tfhub/inceptionv2.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -model_id: image/inception-v2 -model_name: Inception -vector_length: "1024 (default)" -paper: "https://arxiv.org/abs/1409.4842" -repo: 'https://tfhub.dev/google/imagenet/inception_v1/feature_vector/4' -installation: "pip install vectorhub[encoders-image-tfhub]" -release_date: "2015-12-11" -category: image -short_description: A deeper and wider neural network architecture, codenamed Inception, which is responsible for setting the state of the art for classification and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014. ---- - -## Description - -We propose a deep convolutional neural network architecture codenamed "Inception", which was responsible for setting the new state of the art for classification and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014 (ILSVRC 2014). The main hallmark of this architecture is the improved utilization of the computing resources inside the network. This was achieved by a carefully crafted design that allows for increasing the depth and width of the network while keeping the computational budget constant. To optimize quality, the architectural decisions were based on the Hebbian principle and the intuition of multi-scale processing. One particular incarnation used in our submission for ILSVRC 2014 is called GoogLeNet, a 22 layers deep network, the quality of which is assessed in the context of classification and detection. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import InceptionV22Vec -model = InceptionV22Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -``` diff --git a/vectorhub/encoders/image/tfhub/inceptionv2.py b/vectorhub/encoders/image/tfhub/inceptionv2.py deleted file mode 100644 index 70bc58a5..00000000 --- a/vectorhub/encoders/image/tfhub/inceptionv2.py +++ /dev/null @@ -1,30 +0,0 @@ -""" - Abstract from: https://arxiv.org/abs/1409.4842 - - We propose a deep convolutional neural network architecture codenamed "Inception", which was responsible for setting the new state of the art for classification and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014 (ILSVRC 2014). The main hallmark of this architecture is the improved utilization of the computing resources inside the network. This was achieved by a carefully crafted design that allows for increasing the depth and width of the network while keeping the computational budget constant. To optimize quality, the architectural decisions were based on the Hebbian principle and the intuition of multi-scale processing. One particular incarnation used in our submission for ILSVRC 2014 is called GoogLeNet, a 22 layers deep network, the quality of which is assessed in the context of classification and detection. - -""" -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ..base import BaseImage2Vec -from .inceptionv1 import InceptionV12Vec - -if is_all_dependency_installed('encoders-image-tfhub'): - import tensorflow as tf - import tensorflow_hub as hub - -InceptionV2ModelDefinition = ModelDefinition(markdown_filepath='encoders/image/tfhub/inceptionv2') - -__doc__ = InceptionV2ModelDefinition.create_docs() - -class InceptionV22Vec(InceptionV12Vec): - definition = InceptionV2ModelDefinition - urls ={ - 'https://tfhub.dev/google/imagenet/inception_v2/feature_vector/4': {'vector_length': 1024} - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/imagenet/inception_v2/feature_vector/4'): - self.model_url = model_url - self.init(model_url) - self.vector_length = 1024 diff --git a/vectorhub/encoders/image/tfhub/inceptionv3.md b/vectorhub/encoders/image/tfhub/inceptionv3.md deleted file mode 100644 index f17cc731..00000000 --- a/vectorhub/encoders/image/tfhub/inceptionv3.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -model_id: "image/inception-v3" -model_name: "Inception" -vector_length: "2048 (default)" -repo: 'https://tfhub.dev/google/imagenet/inception_v1/feature_vector/4' -installation: "pip install vectorhub[encoders-image-tfhub]" -release_date: "2015-12-11" -category: image -short_description: A deeper and wider neural network architecture, codenamed Inception, which is responsible for setting the state of the art for classification and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014. ---- - -## Description - -We propose a deep convolutional neural network architecture codenamed "Inception", which was responsible for setting the new state of the art for classification and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014 (ILSVRC 2014). The main hallmark of this architecture is the improved utilization of the computing resources inside the network. This was achieved by a carefully crafted design that allows for increasing the depth and width of the network while keeping the computational budget constant. To optimize quality, the architectural decisions were based on the Hebbian principle and the intuition of multi-scale processing. One particular incarnation used in our submission for ILSVRC 2014 is called GoogLeNet, a 22 layers deep network, the quality of which is assessed in the context of classification and detection. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import InceptionV32Vec -model = InceptionV32Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -``` diff --git a/vectorhub/encoders/image/tfhub/inceptionv3.py b/vectorhub/encoders/image/tfhub/inceptionv3.py deleted file mode 100644 index 7f5b3bd4..00000000 --- a/vectorhub/encoders/image/tfhub/inceptionv3.py +++ /dev/null @@ -1,32 +0,0 @@ -""" - Abstract from: https://arxiv.org/abs/1409.4842 - - We propose a deep convolutional neural network architecture codenamed "Inception", which was responsible for setting the new state of the art for classification and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014 (ILSVRC 2014). The main hallmark of this architecture is the improved utilization of the computing resources inside the network. This was achieved by a carefully crafted design that allows for increasing the depth and width of the network while keeping the computational budget constant. To optimize quality, the architectural decisions were based on the Hebbian principle and the intuition of multi-scale processing. One particular incarnation used in our submission for ILSVRC 2014 is called GoogLeNet, a 22 layers deep network, the quality of which is assessed in the context of classification and detection. - -""" -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ..base import BaseImage2Vec -from .inceptionv1 import InceptionV12Vec - -if is_all_dependency_installed('encoders-image-tfhub'): - import tensorflow as tf - import tensorflow_hub as hub - - -InceptionV3ModelDefinition = ModelDefinition(markdown_filepath='encoders/image/tfhub/inceptionv3') - - -__doc__ = InceptionV3ModelDefinition.create_docs() - -class InceptionV32Vec(InceptionV12Vec): - definition = InceptionV3ModelDefinition - urls = { - "https://tfhub.dev/google/inaturalist/inception_v3/feature_vector/4": {'vector_length': 2048} - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/4'): - self.init(model_url) - self.vector_length = 2048 - \ No newline at end of file diff --git a/vectorhub/encoders/image/tfhub/mobilenet.md b/vectorhub/encoders/image/tfhub/mobilenet.md deleted file mode 100644 index 14e1044f..00000000 --- a/vectorhub/encoders/image/tfhub/mobilenet.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -model_id: "image/mobilenet" -model_name: "MobileNet" -vector_length: "1024 (default)" -paper: https://arxiv.org/abs/1704.04861 -repo: https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/4 -release_date: "2017-04-17" -installation: "pip install vectorhub[encoders-image-tfhub]" -category: image -short_description: MobileNets are a class of neural networks optimized for mobile and embedded hardware that provides a trade-off between accuracy and latency. ---- - -## Description - -We present a class of efficient models called MobileNets for mobile and embedded vision applications. MobileNets are based on a streamlined architecture that uses depth-wise separable convolutions to build light weight deep neural networks. We introduce two simple global hyper-parameters that efficiently trade off between latency and accuracy. These hyper-parameters allow the model builder to choose the right sized model for their application based on the constraints of the problem. We present extensive experiments on resource and accuracy tradeoffs and show strong performance compared to other popular models on ImageNet classification. We then demonstrate the effectiveness of MobileNets across a wide range of applications and use cases including object detection, finegrain classification, face attributes and large scale geo-localization. - -## Training Corpora - -Trained on Imagenet (AKA ILSVRC-2012-CLS dataset for image classification). - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import MobileNetV12Vec -model = MobileNetV12Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -``` diff --git a/vectorhub/encoders/image/tfhub/mobilenet.py b/vectorhub/encoders/image/tfhub/mobilenet.py deleted file mode 100644 index 7b4657a4..00000000 --- a/vectorhub/encoders/image/tfhub/mobilenet.py +++ /dev/null @@ -1,78 +0,0 @@ -import numpy as np -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ..base import BaseImage2Vec -if is_all_dependency_installed('encoders-image-tfhub'): - import tensorflow as tf - import tensorflow_hub as hub - import io - import imageio - import matplotlib.pyplot as plt - from urllib.request import urlopen, Request - from urllib.parse import quote - from skimage import transform - -MobileNetModelDefinition = ModelDefinition(markdown_filepath='encoders/image/tfhub/mobilenet') - -__doc__ = MobileNetModelDefinition.create_docs() - -class MobileNetV12Vec(BaseImage2Vec): - definition = MobileNetModelDefinition - urls = { - # 100 depth - 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/4': {"vector_length":1024, "image_dimensions":224}, - 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_192/feature_vector/4': {"vector_length":1024, "image_dimensions":192}, - 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_160/feature_vector/4': {"vector_length":1024, "image_dimensions":160}, - 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_128/feature_vector/4': {"vector_length":1024, "image_dimensions":128}, - - # 75 depth - 'https://tfhub.dev/google/imagenet/mobilenet_v1_075_224/feature_vector/4': {"vector_length":768, "image_dimensions":224}, - 'https://tfhub.dev/google/imagenet/mobilenet_v1_075_192/feature_vector/4': {"vector_length":768, "image_dimensions":192}, - 'https://tfhub.dev/google/imagenet/mobilenet_v1_075_160/feature_vector/4': {"vector_length":768, "image_dimensions":160}, - 'https://tfhub.dev/google/imagenet/mobilenet_v1_075_128/feature_vector/4': {"vector_length":768, "image_dimensions":128}, - - # 50 depth - 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_224/feature_vector/4': {"vector_length":512, "image_dimensions":224}, - 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_192/feature_vector/4': {"vector_length":512, "image_dimensions":192}, - 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_160/feature_vector/4': {"vector_length":512, "image_dimensions":160}, - 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_128/feature_vector/4': {"vector_length":512, "image_dimensions":128}, - - # 25 depth - 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_224/feature_vector/4': {"vector_length":256, "image_dimensions":224}, - 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_192/feature_vector/4': {"vector_length":256, "image_dimensions":192}, - 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_160/feature_vector/4': {"vector_length":256, "image_dimensions":160}, - 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_128/feature_vector/4': {"vector_length":256, "image_dimensions":128}, - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/4', resize_mode: str='symmetric'): - self.validate_model_url(model_url, self.urls) - self.vector_length = self.urls[model_url]["vector_length"] - self.image_dimensions = self.urls[model_url]["image_dimensions"] - self.init(model_url) - self.resize_mode = resize_mode - - def init(self, model_url: str): - self.model_url = model_url - self.model_name = self.model_url.replace( - 'https://tfhub.dev/google/', '').replace('/', '_') - self.model = tf.keras.Sequential([ - hub.KerasLayer(self.model_url, trainable=False) - ]) - self.model.build([None, self.image_dimensions, self.image_dimensions, 3]) - - @catch_vector_errors - def encode(self, image): - if isinstance(image, str): - image = self.read(image) - resized_image = self.image_resize(image, self.image_dimensions, self.image_dimensions, - resize_mode=self.resize_mode)[np.newaxis, ...] - return self.model(resized_image).numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode(self, images): - """ - Bulk encode. Chunk size should be specified outside of the images. - """ - # TODO: Change from list comprehension to properly read in bulk - return [self.encode(x) for x in images] diff --git a/vectorhub/encoders/image/tfhub/mobilenetv2.md b/vectorhub/encoders/image/tfhub/mobilenetv2.md deleted file mode 100644 index 904d5f53..00000000 --- a/vectorhub/encoders/image/tfhub/mobilenetv2.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -model_id: "image/mobilenet-v2" -model_name: "MobileNet V2" -vector_length: "1792 (default)" -paper: "https://arxiv.org/abs/1704.04861" -repo: "https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/4" -installation: pip install vectorhub[encoders-image-tfhub] -release_date: "2018-01-13" -category: image -short_description: MobileNets are a class of neural networks optimized for mobile and embedded hardware that provides a trade-off between accuracy and latency. ---- - -## Description - -We present a class of efficient models called MobileNets for mobile and embedded vision applications. MobileNets are based on a streamlined architecture that uses depth-wise separable convolutions to build light weight deep neural networks. We introduce two simple global hyper-parameters that efficiently trade off between latency and accuracy. These hyper-parameters allow the model builder to choose the right sized model for their application based on the constraints of the problem. We present extensive experiments on resource and accuracy tradeoffs and show strong performance compared to other popular models on ImageNet classification. We then demonstrate the effectiveness of MobileNets across a wide range of applications and use cases including object detection, finegrain classification, face attributes and large scale geo-localization. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import MobileNetV22Vec -model = MobileNetV22Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -``` \ No newline at end of file diff --git a/vectorhub/encoders/image/tfhub/mobilenetv2.py b/vectorhub/encoders/image/tfhub/mobilenetv2.py deleted file mode 100644 index 2732828d..00000000 --- a/vectorhub/encoders/image/tfhub/mobilenetv2.py +++ /dev/null @@ -1,58 +0,0 @@ -import numpy as np -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ..base import BaseImage2Vec -from .mobilenet import MobileNetV12Vec -if is_all_dependency_installed('encoders-image-tfhub'): - import tensorflow as tf - import tensorflow_hub as hub - -MobileNetV2ModelDefinition = ModelDefinition(markdown_filepath='encoders/image/tfhub/mobilenetv2') - -__doc__ = MobileNetV2ModelDefinition.create_docs() - -class MobileNetV22Vec(MobileNetV12Vec): - definition = MobileNetV2ModelDefinition - urls ={ - # 140 depth - 'https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/feature_vector/4': {"vector_length":1792, "image_dimensions":224}, - - # 130 depth - 'https://tfhub.dev/google/imagenet/mobilenet_v2_130_224/feature_vector/4': {"vector_length":1664, "image_dimensions":224}, - - # 100 depth - 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/4': {"vector_length":1280, "image_dimensions":224}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_192/feature_vector/4': {"vector_length":1280, "image_dimensions":192}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_160/feature_vector/4': {"vector_length":1280, "image_dimensions":160}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_128/feature_vector/4': {"vector_length":1280, "image_dimensions":128}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_96/feature_vector/4': {"vector_length":1280, "image_dimensions":96}, - - # 75 depth - 'https://tfhub.dev/google/imagenet/mobilenet_v2_075_224/feature_vector/4': {"vector_length":1280, "image_dimensions":224}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_075_192/feature_vector/4': {"vector_length":1280, "image_dimensions":192}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_075_160/feature_vector/4': {"vector_length":1280, "image_dimensions":160}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_075_128/feature_vector/4': {"vector_length":1280, "image_dimensions":128}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_075_96/feature_vector/4': {"vector_length":1280, "image_dimensions":96}, - - # 50 depth - 'https://tfhub.dev/google/imagenet/mobilenet_v2_050_224/feature_vector/4': {"vector_length":1280, "image_dimensions":224}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_050_192/feature_vector/4': {"vector_length":1280, "image_dimensions":192}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_050_160/feature_vector/4': {"vector_length":1280, "image_dimensions":160}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_050_128/feature_vector/4': {"vector_length":1280, "image_dimensions":128}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_050_96/feature_vector/4': {"vector_length":1280, "image_dimensions":96}, - - # 35 depth - 'https://tfhub.dev/google/imagenet/mobilenet_v2_035_224/feature_vector/4': {"vector_length":1280, "image_dimensions":224}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_035_192/feature_vector/4': {"vector_length":1280, "image_dimensions":192}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_035_160/feature_vector/4': {"vector_length":1280, "image_dimensions":160}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_035_128/feature_vector/4': {"vector_length":1280, "image_dimensions":128}, - 'https://tfhub.dev/google/imagenet/mobilenet_v2_035_96/feature_vector/4': {"vector_length":1280, "image_dimensions":96}, - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/feature_vector/4', resize_mode:str="symmetric"): - self.validate_model_url(model_url, self.urls) - self.vector_length = self.urls[model_url]["vector_length"] - self.image_dimensions = self.urls[model_url]["image_dimensions"] - self.init(model_url) - self.resize_mode = resize_mode diff --git a/vectorhub/encoders/image/tfhub/resnet.md b/vectorhub/encoders/image/tfhub/resnet.md deleted file mode 100644 index c21b0ff0..00000000 --- a/vectorhub/encoders/image/tfhub/resnet.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -model_id: "image/resnet" -model_name: "ResNet" -vector_length: "2048 (default)" -paper: "https://arxiv.org/abs/1512.03385" -installation: "pip install vectorhub[encoders-image-tfhub]" -release_date: "2015-12-10" -category: image -short_dsecription: Residual networks are 8x deeper than VGG nets but achieves lower complexity. Then, the residual network was able to achieve 3.57% error on the ImageNet dataset. ---- - -## Description - -Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers---8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -This is an example - -```python -#pip install vectorhub[encoders-image-tfhub] -from vectorhub.encoders.image.tfhub import ResnetV12Vec -model = ResnetV12Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -``` diff --git a/vectorhub/encoders/image/tfhub/resnet.py b/vectorhub/encoders/image/tfhub/resnet.py deleted file mode 100644 index 5e157368..00000000 --- a/vectorhub/encoders/image/tfhub/resnet.py +++ /dev/null @@ -1,48 +0,0 @@ -from datetime import date -from typing import List -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import is_all_dependency_installed -from ....models_dict import MODEL_REQUIREMENTS -from ..base import BaseImage2Vec - -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-image-tfhub-resnet']): - import tensorflow as tf - import tensorflow_hub as hub - -ResNetModelDefinition = ModelDefinition(markdown_filepath='encoders/image/tfhub/resnet') - -__doc__ = ResNetModelDefinition.create_docs() - -class ResnetV12Vec(BaseImage2Vec): - definition = ResNetModelDefinition - urls = { - # 50 layers - 'https://tfhub.dev/google/imagenet/resnet_v1_50/feature_vector/4':{'vector_length': 2048}, - - # 101 layers - 'https://tfhub.dev/google/imagenet/resnet_v1_101/feature_vector/4':{'vector_length': 2048}, - - # 152 layers - 'https://tfhub.dev/google/imagenet/resnet_v1_152/feature_vector/4':{'vector_length': 2048}, - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/imagenet/resnet_v1_50/feature_vector/4'): - self.validate_model_url(model_url, self.urls) - self.init(model_url) - self.vector_length = self.urls[model_url]['vector_length'] - - def init(self, model_url: str): - self.model_url = model_url - self.model_name = self.model_url.replace( - 'https://tfhub.dev/google/', '').replace('/', '_') - self.model = hub.load(self.model_url) - - @catch_vector_errors - def encode(self, image): - if isinstance(image, str): - image = self.read(image) - return self.model([image]).numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode(self, images: List[str]): - return [self.encode(x) for x in images] diff --git a/vectorhub/encoders/image/tfhub/resnetv2.md b/vectorhub/encoders/image/tfhub/resnetv2.md deleted file mode 100644 index cf5b8ee8..00000000 --- a/vectorhub/encoders/image/tfhub/resnetv2.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -model_id: 'image/resnet-v2' -model_name: "ResNet" -vector_length: "2048 (default)" -paper: "https://arxiv.org/abs/1512.03385" -installation: "pip install vectorhub['encoders-image-tfhub']" -release_date: "2016-03-16" -category: image -short_description: Residual networks are 8x deeper than VGG nets but achieves lower complexity. Then, the residual network was able to achieve 3.57% error on the ImageNet dataset. ---- - -## Description - -Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers---8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub['encoders-image-tfhub'] -from vectorhub.encoders.image.tfhub import ResnetV22Vec -model = ResnetV22Vec() -sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') -model.encode(sample) -``` diff --git a/vectorhub/encoders/image/tfhub/resnetv2.py b/vectorhub/encoders/image/tfhub/resnetv2.py deleted file mode 100644 index b149c7b8..00000000 --- a/vectorhub/encoders/image/tfhub/resnetv2.py +++ /dev/null @@ -1,42 +0,0 @@ -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -from ..base import BaseImage2Vec -from .resnet import ResnetV12Vec - -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-image-tfhub-resnet']): - import tensorflow as tf - import tensorflow_hub as hub - -ResNetV2ModelDefinition = ModelDefinition(markdown_filepath='encoders/image/tfhub/resnetv2') -__doc__ = ResNetV2ModelDefinition.create_docs() - -class ResnetV22Vec(ResnetV12Vec): - definition = ResNetV2ModelDefinition - urls = { - 'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/4': {'vector_length': 2048}, - - # 101 layers - 'https://tfhub.dev/google/imagenet/resnet_v2_101/feature_vector/4':{'vector_length': 2048}, - - # 152 layers - 'https://tfhub.dev/google/imagenet/resnet_v2_152/feature_vector/4':{'vector_length': 2048}, - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/4'): - self.validate_model_url(model_url, self.urls) - self.init(model_url) - self.vector_length = 2048 - - @property - def urls(self): - return { - 'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/4': {'vector_length': 2048}, - - # 101 layers - 'https://tfhub.dev/google/imagenet/resnet_v2_101/feature_vector/4':{'vector_length': 2048}, - - # 152 layers - 'https://tfhub.dev/google/imagenet/resnet_v2_152/feature_vector/4':{'vector_length': 2048}, - } diff --git a/vectorhub/encoders/image/vectorai/__init__.py b/vectorhub/encoders/image/vectorai/__init__.py deleted file mode 100644 index c3c87d16..00000000 --- a/vectorhub/encoders/image/vectorai/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .vi_encoder import * diff --git a/vectorhub/encoders/image/vectorai/vi_encoder.py b/vectorhub/encoders/image/vectorai/vi_encoder.py deleted file mode 100644 index ca6fd920..00000000 --- a/vectorhub/encoders/image/vectorai/vi_encoder.py +++ /dev/null @@ -1,54 +0,0 @@ -""" - Vector AI's deployed model. The purpose of this model is to allow developers to easily build encodings and see for themselves - how the embedding works. These models are selected to work out-of-the-box after testing for their success on our end. - - To get access to Vector AI, we need to use - - Example: - - >>> from vectorhub.text.encoder.vectorai import ViText2Vec - >>> model = ViText2Vec(username, api_key) - >>> model.encode("sample.jpg") - -""" - -import io -import base64 -import requests -from ....base import catch_vector_errors - -class ViImage2Vec: - def __init__(self, username, api_key, url=None, collection_name="base"): - """ - Request for a username and API key from gh.vctr.ai - """ - self.username = username - self.api_key = api_key - if url: - self.url = url - else: - self.url = "https://api.vctr.ai" - self.collection_name = collection_name - self._name = "default" - - @catch_vector_errors - def encode(self, image): - return requests.get( - url="{}/collection/encode_image".format(self.url), - params={ - "username": self.username, - "api_key": self.api_key, - "collection_name": self.collection_name, - "image_url": image, - }, - ).json() - - @property - def __name__(self): - if self._name is None: - return "deployed_image" - return self._name - - @__name__.setter - def __name__(self, value): - self._name = value \ No newline at end of file diff --git a/vectorhub/encoders/text/README.md b/vectorhub/encoders/text/README.md deleted file mode 100644 index 2a1335c9..00000000 --- a/vectorhub/encoders/text/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Text2Vec - -All kinds of differing length of texts can be turned into vectors: -e.g. Word2Vec, Sentence2Vec, Doc2Vec, PDF2Vec - -All kinds of differing models that have strengths in different departments: -e.g. BERT for , Word2Vec for Association, - -All kinds of differing domain of texts can be turned into vectors: -e.g. Med2Vec diff --git a/vectorhub/encoders/text/__init__.py b/vectorhub/encoders/text/__init__.py deleted file mode 100644 index 773cfc46..00000000 --- a/vectorhub/encoders/text/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .base import * \ No newline at end of file diff --git a/vectorhub/encoders/text/base.py b/vectorhub/encoders/text/base.py deleted file mode 100644 index 923b5d8d..00000000 --- a/vectorhub/encoders/text/base.py +++ /dev/null @@ -1,38 +0,0 @@ -""" - Base Text2Vec Model -""" -import warnings -from ...base import Base2Vec -from abc import ABC, abstractmethod -from typing import Union, List, Dict - -class BaseText2Vec(Base2Vec, ABC): - def read(self, text: str): - """An abstract method to specify the read method to read the data. - """ - pass - - @property - def test_word(self): - return "dummy word" - - @property - def vector_length(self): - """ - Set the vector length of the model. - """ - if hasattr(self, "_vector_length"): - return self._vector_length - else: - print(f"The vector length is not explicitly stated so we are inferring " + \ - "from our test word - {self.test_word}.") - setattr(self, "_vector_length", len(self.encode(self.test_word))) - return self._vector_length - - @vector_length.setter - def vector_length(self, value): - self._vector_length = value - - @abstractmethod - def encode(self, words: Union[List[str]]): - pass \ No newline at end of file diff --git a/vectorhub/encoders/text/sentence_transformers/__init__.py b/vectorhub/encoders/text/sentence_transformers/__init__.py deleted file mode 100644 index 0182440f..00000000 --- a/vectorhub/encoders/text/sentence_transformers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .sentence_auto_transformers import * diff --git a/vectorhub/encoders/text/sentence_transformers/sentence_auto_transformers.md b/vectorhub/encoders/text/sentence_transformers/sentence_auto_transformers.md deleted file mode 100644 index 922235ec..00000000 --- a/vectorhub/encoders/text/sentence_transformers/sentence_auto_transformers.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -model_id: "text/sentence-transformers" -model_name: "Sentence Transformer Models" -vector_length: "Depends on model." -paper: https://arxiv.org/abs/1908.10084 -repo: https://github.com/UKPLab/sentence-transformers -release_date: "2019-08-27" -installation: pip install vectorhub[encoders-text-sentence-transformers] -category: text -short_description: These are Sentence Transformer models from sbert.net by UKPLab. ---- - -## Description - -These are Sentence Transformer models from sbert.net by UKPLab. - -## Example - -```python -#pip install vectorhub[encoders-text-sentence-transformers] -from vectorhub.encoders.text.sentence_transformers import SentenceTransformer2Vec -model = SentenceTransformer2Vec('distilroberta-base-paraphrase-v1') -model.encode("I enjoy taking long walks along the beach with my dog.") -``` diff --git a/vectorhub/encoders/text/sentence_transformers/sentence_auto_transformers.py b/vectorhub/encoders/text/sentence_transformers/sentence_auto_transformers.py deleted file mode 100644 index 6ac59271..00000000 --- a/vectorhub/encoders/text/sentence_transformers/sentence_auto_transformers.py +++ /dev/null @@ -1,149 +0,0 @@ -import warnings -from typing import List -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -from ..base import BaseText2Vec -is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-sentence-transformers']) - -try: - from sentence_transformers import SentenceTransformer - from sentence_transformers import SentenceTransformer - from sentence_transformers import models, datasets, losses - import gzip - from torch.utils.data import DataLoader - import numpy as np - import pandas as pd - from tqdm.auto import tqdm - import nltk -except: - import traceback - traceback.print_exc() - -SentenceTransformerModelDefinition = ModelDefinition(markdown_filepath='encoders/text/sentence_transformers/sentence_auto_transformers.md') - -LIST_OF_URLS = { - 'distilroberta-base-paraphrase-v1' : {"vector_length": 768}, - 'xlm-r-distilroberta-base-paraphrase-v1' : {"vector_length": 768}, - "paraphrase-xlm-r-multilingual-v1": {"vector_length": 768}, - - 'distilbert-base-nli-stsb-mean-tokens' : {"vector_length": 768}, - 'bert-large-nli-stsb-mean-tokens' : {"vector_length": 1024}, - 'roberta-base-nli-stsb-mean-tokens' : {"vector_length": 768}, - 'roberta-large-nli-stsb-mean-tokens' : {"vector_length": 1024}, - - 'distilbert-base-nli-stsb-quora-ranking' : {"vector_length": 768}, - 'distilbert-multilingual-nli-stsb-quora-ranking' : {"vector_length": 768}, - - 'distilroberta-base-msmarco-v1' : {"vector_length": 768}, - - 'distiluse-base-multilingual-cased-v2' : {"vector_length": 512}, - 'xlm-r-bert-base-nli-stsb-mean-tokens' : {"vector_length": 768}, - - 'bert-base-wikipedia-sections-mean-tokens' : {"vector_length": 768}, - - 'LaBSE' : {"vector_length": 768}, - - 'average_word_embeddings_glove.6B.300d' : {"vector_length": 300}, - 'average_word_embeddings_komninos' : {"vector_length": 300}, - 'average_word_embeddings_levy_dependency' : {"vector_length": 768}, - 'average_word_embeddings_glove.840B.300d' : {"vector_length": 300}, - 'paraphrase-xlm-r-multilingual-v1': {"vector_length": 768}, -} - -__doc__ = SentenceTransformerModelDefinition.create_docs() - - -class SentenceTransformer2Vec(BaseText2Vec): - definition = SentenceTransformerModelDefinition - urls = LIST_OF_URLS - def __init__(self, model_name: str): - self.model_name = model_name - self.urls = LIST_OF_URLS - self.validate_model_url(model_name, LIST_OF_URLS) - if model_name in LIST_OF_URLS: - self.vector_length = LIST_OF_URLS[model_name]["vector_length"] - else: - self.vector_length = None - warnings.warn("Not included in the official model repository. Please specify set the vector length attribute.") - self.model = SentenceTransformer(model_name) - - def get_list_of_urls(self): - """ - Return list of URLS. - """ - return self.urls - - @catch_vector_errors - def encode(self, text: str) -> List[float]: - """ - Encode word from transformers. - This takes the beginning set of tokens and turns them into vectors - and returns mean pooling of the tokens. - Args: - word: string - """ - return self.model.encode([text])[0].tolist() - - @catch_vector_errors - def bulk_encode(self, texts: List[str]) -> List[List[float]]: - """ - Bulk encode words from transformers. - """ - return self.model.encode(texts).tolist() - - def run_tsdae_on_documents(self, fields, documents, batch_size=32, - learning_rate: float=3e-5, num_epochs: int=1, - model_output_path: str='.', weight_decay: int=0, - use_amp: bool=True, scheduler: str='constantlr', - temp_filepath = "./_temp.txt", chunksize=100): - """ -Set use_amp to True if your GPU supports FP16 cores - """ - train_sentences = [] - for c in self.chunk(documents, chunksize=500): - train_sentences += self.get_fields_across_document(fields, c) - return self.run_tsdae(train_sentences, batch_size=batch_size, - learning_rate=learning_rate, num_epochs=num_epochs, - model_output_path=model_output_path, weight_decay=weight_decay, - use_amp=use_amp, scheduler=scheduler) - - def run_tsdae(self, train_sentences: list, - batch_size=32, learning_rate: float=3e-5, num_epochs: int=1, - model_output_path: str='.', weight_decay: int=0, - use_amp: bool=True, scheduler: str='constantlr'): - """ -Set use_amp to True if your GPU supports FP16 cores - """ - train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences) - train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True) - train_loss = losses.DenoisingAutoEncoderLoss( - self.model, - tie_encoder_decoder=True - ) - self.model.fit( - train_objectives=[(train_dataloader, train_loss)], - epochs=num_epochs, - weight_decay=weight_decay, - scheduler=scheduler, - optimizer_params={'lr': learning_rate}, - show_progress_bar=True, - checkpoint_path=model_output_path, - use_amp=use_amp - ) - print("Finished training. You can now encode.") - print(f"Model saved at {model_output_path}") - - def read_sentences_from_text(self, filepath: str, - minimum_line_length: int=10): - """method for reading sentences from a file - """ - train_sentences = [] - with gzip.open(filepath, 'rt', encoding='utf8') if filepath.endswith('.gz') else open(filepath, encoding='utf8') as fIn: - for line in tqdm(fIn, desc='Read file'): - line = line.strip() - if len(line) >= minimum_line_length: - train_sentences.append(line) - return train_sentences diff --git a/vectorhub/encoders/text/tf_transformers/__init__.py b/vectorhub/encoders/text/tf_transformers/__init__.py deleted file mode 100644 index 14492271..00000000 --- a/vectorhub/encoders/text/tf_transformers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .tf_auto_transformers import * diff --git a/vectorhub/encoders/text/tf_transformers/tf_auto_transformers.md b/vectorhub/encoders/text/tf_transformers/tf_auto_transformers.md deleted file mode 100644 index c5891c8d..00000000 --- a/vectorhub/encoders/text/tf_transformers/tf_auto_transformers.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -model_id: text/tf-auto-transformers -model_name: "Transformer Models" -vector_length: 'Depends on model.' -paper: "https://arxiv.org/abs/1910.03771" -repo: "https://huggingface.co/transformers/pretrained_models.html" -installation: "pip install vectorhub[encoders-text-tf-transformers]" -release_date: null -category: text -short_description: These are Tensorflow Automodels from HuggingFace. ---- - -## Description - -These are Tensorflow Automodels from HuggingFace. - -## Example - -```python -#pip install vectorhub[encoders-text-tf-transformers] -from vectorhub.encoders.text.tf_transformers import TFTransformer2Vec -model = TFTransformer2Vec('bert-base-uncased') -model.encode("I enjoy taking long walks along the beach with my dog.") -``` diff --git a/vectorhub/encoders/text/tf_transformers/tf_auto_transformers.py b/vectorhub/encoders/text/tf_transformers/tf_auto_transformers.py deleted file mode 100644 index 59cccf66..00000000 --- a/vectorhub/encoders/text/tf_transformers/tf_auto_transformers.py +++ /dev/null @@ -1,41 +0,0 @@ -from typing import List -from ..base import BaseText2Vec -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-tf-transformers-auto']): - import tensorflow as tf - from transformers import AutoTokenizer, TFAutoModel - -TransformerModelDefinition = ModelDefinition(markdown_filepath='encoders/text/tf_transformers/tf_auto_transformers.md') - -__doc__ = TransformerModelDefinition.create_docs() - - -class TFTransformer2Vec(BaseText2Vec): - definition = TransformerModelDefinition - def __init__(self, model_name: str, config=None): - if config is None: - self.model = TFAutoModel.from_pretrained(model_name) - else: - self.model = TFAutoModel.from_pretrained(model_name, config=config) - self.tokenizer = AutoTokenizer.from_pretrained(model_name) - - @catch_vector_errors - def encode(self, text: str) -> List[float]: - """ - Encode word from transformers. - This takes the beginning set of tokens and turns them into vectors - and returns mean pooling of the tokens. - Args: - word: string - """ - return tf.reduce_mean(self.model(self.tokenizer(text, return_tensors='tf'))[0], axis=1).numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode(self, texts: List[str]) -> List[List[float]]: - """ - Bulk encode words from transformers. - """ - return tf.reduce_mean(self.model(self.tokenizer(texts, return_tensors='tf', truncation=True, padding=True))[0], axis=1).numpy().tolist() diff --git a/vectorhub/encoders/text/tfhub/__init__.py b/vectorhub/encoders/text/tfhub/__init__.py deleted file mode 100644 index cf1d6645..00000000 --- a/vectorhub/encoders/text/tfhub/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .albert import * -from .bert import * -from .experts_bert import * -from .elmo import * -from .labse import * -from .use import * -from .use_multi import * -from .use_lite import * -from .use_transformer import * -from .use_multi_transformer import * diff --git a/vectorhub/encoders/text/tfhub/albert.md b/vectorhub/encoders/text/tfhub/albert.md deleted file mode 100644 index e91bcbca..00000000 --- a/vectorhub/encoders/text/tfhub/albert.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -# be sure to put quotations -model_id: 'text/albert' -model_name: 'Albert - A Lite Bert' -vector_length: 768 (albert_en_base) -paper: 'https://arxiv.org/abs/1909.11942' -repo: 'https://tfhub.dev/tensorflow/albert_en_base/1' -installation: "pip install vectorhub[encoders-text-tfhub]" -release_date: "2019-09-26" -category: text -short_description: We propose a novel method to reduce the memory consumption of BERT, and show that it improves the scalability of BERT models. ---- - -## Description - -Increasing model size when pretraining natural language representations often results in improved performance on downstream tasks. However, at some point further model increases become harder due to GPU/TPU memory limitations and longer training times. To address these problems, we present two parameter-reduction techniques to lower memory consumption and increase the training speed of BERT. Comprehensive empirical evidence shows that our proposed methods lead to models that scale much better compared to the original BERT. We also use a self-supervised loss that focuses on modeling inter-sentence coherence, and show it consistently helps downstream tasks with multi-sentence inputs. As a result, our best model establishes new state-of-the-art results on the GLUE, RACE, and \squad benchmarks while having fewer parameters compared to BERT-large. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import Albert2Vec -model = Albert2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -``` diff --git a/vectorhub/encoders/text/tfhub/albert.py b/vectorhub/encoders/text/tfhub/albert.py deleted file mode 100644 index 5c1d18b4..00000000 --- a/vectorhub/encoders/text/tfhub/albert.py +++ /dev/null @@ -1,52 +0,0 @@ -from datetime import date -from ..base import BaseText2Vec -from ....base import catch_vector_errors -from ....import_utils import is_all_dependency_installed -from ....models_dict import MODEL_REQUIREMENTS -from ....doc_utils import ModelDefinition - -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-tfhub-albert']): - from tensorflow.python.framework.errors_impl import NotFoundError - import tensorflow as tf - import tensorflow_hub as hub - try: - import tensorflow_text - except NotFoundError: - print('The installed Tensorflow Text version is not aligned with tensorflow, make sure that tensorflow-text version is same version as tensorflow') - -AlbertModelDefinition = ModelDefinition(markdown_filepath='encoders/text/tfhub/albert') -__doc__ = AlbertModelDefinition.create_docs() - -class Albert2Vec(BaseText2Vec): - definition = AlbertModelDefinition - urls = { - 'https://tfhub.dev/tensorflow/albert_en_base/1': {'vector_length': 768}, - 'https://tfhub.dev/tensorflow/albert_en_xxlarge/1': {'vector_length': 4096}, - 'https://tfhub.dev/tensorflow/albert_en_large/1': {'vector_length': 1024}, - 'https://tfhub.dev/tensorflow/albert_en_xlarge/1': {'vector_length': 2048}, - 'https://tfhub.dev/tensorflow/albert_en_base/2': {'vector_length': 768}, - 'https://tfhub.dev/tensorflow/albert_en_xxlarge/2': {'vector_length': 4096}, - 'https://tfhub.dev/tensorflow/albert_en_large/2': {'vector_length': 1024}, - 'https://tfhub.dev/tensorflow/albert_en_xlarge/2': {'vector_length': 2048}, - } - def __init__(self, model_url: str = 'https://tfhub.dev/tensorflow/albert_en_base/2', max_seq_length: int = 228, normalize: bool = True, - preprocessor_url:str ='http://tfhub.dev/tensorflow/albert_en_preprocess/1'): - self.validate_model_url(model_url, list(self.urls.keys())) - self.max_seq_length = max_seq_length - self.normalize = normalize - self.init(model_url) - self.init_tokenizer(preprocessor_url) - - def init_tokenizer(self, preprocessor_url): - self.preprocessor = hub.KerasLayer(preprocessor_url) - - def init(self, model_url): - self.model = hub.KerasLayer(model_url) - - @catch_vector_errors - def encode(self, text: str, pooling_strategy='pooled_output'): - return self.model(self.preprocessor([text]))[pooling_strategy].numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode(self, texts: list, pooling_strategy='pooled_output'): - return self.model(self.preprocessor(texts))[pooling_strategy].numpy().tolist() diff --git a/vectorhub/encoders/text/tfhub/bert.md b/vectorhub/encoders/text/tfhub/bert.md deleted file mode 100644 index ae6173b6..00000000 --- a/vectorhub/encoders/text/tfhub/bert.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -# be sure to put quotations -model_id: "text/bert" -model_name: "BERT - Bidirectional Encoder Representations" -vector_length: 1024 (Bert Large) -paper: "https://arxiv.org/abs/1810.04805v2" -repo: "https://tfhub.dev/google/collections/bert/1" -release_date: '2018-10-11' -installation: "pip install vectorhub[encoders-text-tfhub]" -category: text ---- - -## Description - -We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. - -![Bert Image](https://miro.medium.com/max/619/1*iJqlhZz-g6ZQJ53-rE9VvA.png) - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import Bert2Vec -model = Bert2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -``` - -## Model Versions - -Model Table | Vector Length -------------| ---------- -google/bert_cased_L-12_H-768_A-12 | 768 -google/bert_cased_L-24_H-1024_A-16 | 1024 -google/bert_chinese_L-12_H-768_A-12 | 768 -google/bert_multi_cased_L-12_H-768_A-12 | 768 -google/bert_uncased_L-12_H-768_A-12 | 768 -google/bert_uncased_L-24_H-1024_A-16 | 1024 - - -## Limitations - -* NA - -## Training Corpora: - -* BooksCorpus (800M words) -* Wikipedia (2,500M words) - -## Other Notes: - -* BERT was trained for 1M steps with a batch size of 128,000 words. - - diff --git a/vectorhub/encoders/text/tfhub/bert.py b/vectorhub/encoders/text/tfhub/bert.py deleted file mode 100644 index 5512a1fd..00000000 --- a/vectorhub/encoders/text/tfhub/bert.py +++ /dev/null @@ -1,124 +0,0 @@ -from ..base import BaseText2Vec -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import is_all_dependency_installed -from ....models_dict import MODEL_REQUIREMENTS -from datetime import date -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-tfhub-bert']): - import tensorflow as tf - if hasattr(tf, 'executing_eagerly'): - if not tf.executing_eagerly(): - tf.compat.v1.enable_eager_execution() - import tensorflow_hub as hub - import bert - import numpy as np - -BertModelDefinition = ModelDefinition(markdown_filepath='encoders/text/tfhub/bert') - -__doc__ = BertModelDefinition.create_docs() - -class Bert2Vec(BaseText2Vec): - definition = BertModelDefinition - urls = { - 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/2': {'vector_length': 1024}, - 'https://tfhub.dev/tensorflow/bert_en_wwm_cased_L-24_H-1024_A-16/2': {'vector_length': 1024}, - 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2': {'vector_length': 768}, - 'https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/2': {'vector_length': 1024}, - 'https://tfhub.dev/tensorflow/bert_en_cased_L-24_H-1024_A-16/2': {'vector_length': 1024}, - 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/2': {'vector_length': 768}, - 'https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/2': {'vector_length': 768}, - 'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/2': {'vector_length': 768}, - - 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/3': {'vector_length': 1024}, - 'https://tfhub.dev/tensorflow/bert_en_wwm_cased_L-24_H-1024_A-16/3': {'vector_length': 1024}, - 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3': {'vector_length': 768}, - 'https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/3': {'vector_length': 1024}, - 'https://tfhub.dev/tensorflow/bert_en_cased_L-24_H-1024_A-16/3': {'vector_length': 1024}, - 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/3': {'vector_length': 768}, - 'https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/3': {'vector_length': 768}, - 'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/3': {'vector_length': 768}, - } - def __init__(self, model_url: str = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/3', - max_seq_length: int = 64, normalize: bool = True): - self.validate_model_url(model_url, list(self.urls.keys())) - self.max_seq_length = max_seq_length - self.normalize = normalize - self.model_input_type = "dict" - self.init(model_url) - self.tokenizer = self.init_tokenizer() - - def init(self, model_url: str): - self.model = hub.KerasLayer(model_url) - input_word_ids = tf.keras.layers.Input(shape=(self.max_seq_length,), dtype=tf.int32) - input_mask = tf.keras.layers.Input(shape=(self.max_seq_length,), dtype=tf.int32) - input_type_ids = tf.keras.layers.Input(shape=(self.max_seq_length,), dtype=tf.int32) - try: - self.model(dict(input_word_ids=input_word_ids, input_mask=input_mask, input_type_ids=input_type_ids)) - except ValueError: - self.model_input_type = "list" - self.model([input_word_ids, input_mask, input_type_ids]) - - def init_tokenizer(self): - self.vocab_file = self.model.resolved_object.vocab_file.asset_path.numpy() - self.do_lower_case = self.model.resolved_object.do_lower_case.numpy() - return bert.bert_tokenization.FullTokenizer(self.vocab_file, self.do_lower_case) - - def process(self, input_strings: str): - input_ids_all, input_mask_all, input_type_ids_all = [], [], [] - if isinstance(input_strings, str): - input_strings = [input_strings] - for input_string in input_strings: - # Tokenize input. - input_tokens = ["[CLS]"] + \ - self.tokenizer.tokenize(input_string) + ["[SEP]"] - input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens) - sequence_length = min(len(input_ids), self.max_seq_length) - - # Padding or truncation. - if len(input_ids) >= self.max_seq_length: - input_ids = input_ids[:self.max_seq_length] - else: - input_ids = input_ids + [0] * \ - (self.max_seq_length - len(input_ids)) - - input_mask = [1] * sequence_length + [0] * \ - (self.max_seq_length - sequence_length) - - input_ids_all.append(input_ids) - input_mask_all.append(input_mask) - input_type_ids_all.append([0] * self.max_seq_length) - - return np.array(input_ids_all), np.array(input_mask_all), np.array(input_type_ids_all) - - @catch_vector_errors - def encode(self, text: str, pooling_strategy='pooled_output'): - input_ids, input_mask, input_type_ids = self.process(text) - if self.model_input_type == "list": - return self.model([ - tf.convert_to_tensor(input_ids, tf.int32, name="input_word_ids"), - tf.convert_to_tensor(input_mask, tf.int32, name="input_mask"), - tf.convert_to_tensor(input_type_ids, tf.int32, name="input_type_ids") - ])[0].numpy().tolist()[0] - else: - return self.model({ - "input_word_ids": tf.convert_to_tensor(input_ids, tf.int32, name="input_word_ids"), - "input_mask": tf.convert_to_tensor(input_mask, tf.int32, name="input_mask"), - "input_type_ids": tf.convert_to_tensor(input_type_ids, tf.int32, name="input_type_ids") - })[pooling_strategy].numpy().tolist()[0] - - - @catch_vector_errors - def bulk_encode(self, texts: list, pooling_strategy='pooled_output'): - input_ids, input_mask, input_type_ids = self.process(texts) - if self.model_input_type == "list": - return self.model([ - tf.convert_to_tensor(input_ids, tf.int32, name="input_word_ids"), - tf.convert_to_tensor(input_mask, tf.int32, name="input_mask"), - tf.convert_to_tensor(input_type_ids, tf.int32, name="input_type_ids") - ])[0].numpy().tolist() - else: - return self.model({ - "input_word_ids": tf.convert_to_tensor(input_ids, tf.int32, name="input_word_ids"), - "input_mask": tf.convert_to_tensor(input_mask, tf.int32, name="input_mask"), - "input_type_ids": tf.convert_to_tensor(input_type_ids, tf.int32, name="input_type_ids") - })[pooling_strategy].numpy().tolist() diff --git a/vectorhub/encoders/text/tfhub/elmo.md b/vectorhub/encoders/text/tfhub/elmo.md deleted file mode 100644 index da380024..00000000 --- a/vectorhub/encoders/text/tfhub/elmo.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -model_id: "text/elmo" -model_name: "Elmo (Embeddings From Language Models)" -vector_length: "1024 (default)" -paper: "https://arxiv.org/abs/1802.05365" -repo: "https://tfhub.dev/google/elmo/3" -installation: "pip install vectorhub[encoders-text-tfhub]" -release_date: "2020-07-03" -category: text -short_description: ELMo is a deep, character-based, bidirectional language model that learns to embed words in a way that captures their context. ---- - -## Description - -Computes contextualized word representations using character-based word representations and bidirectional LSTMs, as described in the paper "Deep contextualized word representations" [1]. - -ELMo (Embeddings from Language Models) representations are deep as they are a function of all of the -internal layers of the biLM. More specifically, we learn a linear combination of the vectors stacked above each input word for each end task. - - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` -## Training Corpora - -According to the original paper, this was trained on 1 Billion Word Benchmark. The 1 Billion Word Benchmark consists of English monolingual versions -- Europarl corpus (corpus is extracted from the European parliament) -- News commentary -- News - -From this, the following steps were taken to normalize the data: -- Normalization and tokenisation was performed on using scripts from WMT11 site, slightly augmented to normalize various UTF-8 variants for common punctuation. -- Duplicate sentences were removed -- Vocabulary was constructed by discording all words with count below 3 -- Words outside of vocabulary were mapped to -- Sentence order was randomized and data was split into 100 disjoint partitions -- One random partition was chosen as held-out set -- Held-out set was randomly shuffled and split into 50 disjoint partitions to be used as development/test data -- One partition partition is never predicted by the language model of the held-out data was used as test data in our experiments -- Out-of-vocabulary rate on the test set was set at 0.28% - -## Example - -```python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import Elmo2Vec -model = Elmo2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -``` diff --git a/vectorhub/encoders/text/tfhub/elmo.py b/vectorhub/encoders/text/tfhub/elmo.py deleted file mode 100644 index 6ee9b2ad..00000000 --- a/vectorhub/encoders/text/tfhub/elmo.py +++ /dev/null @@ -1,81 +0,0 @@ -import warnings -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -from ..base import BaseText2Vec -from datetime import date -is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-tfhub-use']) - -try: - import tensorflow_hub as hub - import tensorflow.compat.v1 as tf - import numpy as np -except: - import traceback - traceback.print_exc() - -ElmoModelDefinition = ModelDefinition(markdown_filepath='encoders/text/tfhub/elmo.md') - -__doc__ = ElmoModelDefinition.create_docs() - -class Elmo2Vec(BaseText2Vec): - definition = ElmoModelDefinition - urls ={ - "https://tfhub.dev/google/elmo/3": {'vector_length': 1024} - } - def __init__(self, model_url: str="https://tfhub.dev/google/elmo/3", trainable_model=True): - warnings.warn("We are disabling TF2 eager execution to run this. This may conflict with other models. If you need + \ - other models., try to use a fresh environment or a new virtual machine.") - tf.disable_eager_execution() - self.model = hub.Module(model_url, trainable=trainable_model) - self.vector_length = 1024 - - @catch_vector_errors - def encode(self, text, output_layer: str="elmo"): - """ - The output layer can be one of the following: - lstm_outputs1: the first LSTM hidden state with shape [batch_size, max_length, 1024]. - lstm_outputs2: the second LSTM hidden state with shape [batch_size, max_length, 1024]. - elmo: the weighted sum of the 3 layers, where the weights are trainable. This tensor has shape [batch_size, max_length, 1024] - default: a fixed mean-pooling of all contextualized word representations with shape [batch_size, 1024]. - Note: The output layer word_emb is character-based and is not supported by VectorHub. - """ - sess = tf.Session() - init = tf.global_variables_initializer() - sess.run(init) - if output_layer != 'default': - vector = self.model( - [text], - signature="default", - as_dict=True)[output_layer].eval(session=sess)[0][0].tolist() - else: - vector = self.model( - [text], - signature="default", - as_dict=True)[output_layer].eval(session=sess)[0].tolist() - sess.close() - return vector - - @catch_vector_errors - def bulk_encode(self, texts, output_layer: str="elmo"): - """ - The output layer can be one of the following: - lstm_outputs1: the first LSTM hidden state with shape [batch_size, max_length, 1024]. - lstm_outputs2: the second LSTM hidden state with shape [batch_size, max_length, 1024]. - elmo: the weighted sum of the 3 layers, where the weights are trainable. This tensor has shape [batch_size, max_length, 1024] - default: a fixed mean-pooling of all contextualized word representations with shape [batch_size, 1024]. - Note: The output layer word_emb is character-based and is not supported by VectorHub. - """ - sess = tf.Session() - init = tf.global_variables_initializer() - sess.run(init) - vectors = self.model( - texts, - signature="default", - as_dict=True)[output_layer].eval(session=sess).tolist() - sess.close() - if output_layer == 'default': - return vectors - else: - return [x[0] for x in vectors] diff --git a/vectorhub/encoders/text/tfhub/experts_bert.md b/vectorhub/encoders/text/tfhub/experts_bert.md deleted file mode 100644 index fcab0a0c..00000000 --- a/vectorhub/encoders/text/tfhub/experts_bert.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -# be sure to put quotations -model_id: "text/experts_bert" -model_name: "Experts BERT - Collection of BERT experts fine-tuned on different datasets." -vector_length: 768 (Bert Large) -paper: "https://arxiv.org/abs/1810.04805v2" -repo: "https://tfhub.dev/google/collections/experts/bert/1" -release_date: '2021-02-15' -installation: "pip install vectorhub[encoders-text-tfhub]" -category: text ---- - -## Description - -Starting from a pre-trained BERT model and fine-tuning on the downstream task gives impressive results on many NLP tasks. One can further increase the performance by starting from a BERT model that better aligns or transfers to the task at hand, particularly when having a low number of downstream examples. For example, one can use a BERT model that was trained on text from a similar domain or by use a BERT model that was trained for a similar task. - -This is a collection of such BERT "expert" models that were trained on a diversity of datasets and tasks to improve performance on downstream tasks like question answering, tasks that require natural language inference skills, NLP tasks in the medical text domain, and more. - -![Bert Image](https://miro.medium.com/max/619/1*iJqlhZz-g6ZQJ53-rE9VvA.png) - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import ExpertsBert2Vec -model = ExpertsBert2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -``` - -## Model Versions - -Model Table | Vector Length -------------| ---------- -https://tfhub.dev/google/experts/bert/wiki_books/2 | 768 -https://tfhub.dev/google/experts/bert/wiki_books/mnli/2 | 768 -https://tfhub.dev/google/experts/bert/wiki_books/qnli/2 | 768 -https://tfhub.dev/google/experts/bert/wiki_books/qqp/2 | 768 -https://tfhub.dev/google/experts/bert/wiki_books/sst2/2 | 768 -https://tfhub.dev/google/experts/bert/wiki_books/squad2/2 | 768 -https://tfhub.dev/google/experts/bert/pubmed/2 | 768 -https://tfhub.dev/google/experts/bert/pubmed/squad2/2 | 768 - - -## Limitations - -* NA - -## Training Corpora: - -* BooksCorpus (800M words) -* Wikipedia (2,500M words) -* MEDLINE/PubMed -* CORD-19 -* CoLa -* MRPC - -## Other Notes: - -* NA - - diff --git a/vectorhub/encoders/text/tfhub/experts_bert.py b/vectorhub/encoders/text/tfhub/experts_bert.py deleted file mode 100644 index d071fa7d..00000000 --- a/vectorhub/encoders/text/tfhub/experts_bert.py +++ /dev/null @@ -1,57 +0,0 @@ -from datetime import date -from ..base import BaseText2Vec -from ....base import catch_vector_errors -from ....import_utils import is_all_dependency_installed -from ....models_dict import MODEL_REQUIREMENTS -from ....doc_utils import ModelDefinition - -is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-tfhub-albert']) - -try: - from tensorflow.python.framework.errors_impl import NotFoundError - import tensorflow as tf - import tensorflow_hub as hub - try: - import tensorflow_text - except NotFoundError: - print('The installed Tensorflow Text version is not aligned with tensorflow, make sure that tensorflow-text version is same version as tensorflow') -except: - import traceback - traceback.print_exc() - -ExpertsBertModelDefinition = ModelDefinition(markdown_filepath='encoders/text/tfhub/experts_bert') -__doc__ = ExpertsBertModelDefinition.create_docs() - -class ExpertsBert2Vec(BaseText2Vec): - definition = ExpertsBertModelDefinition - urls = { - "https://tfhub.dev/google/experts/bert/wiki_books/2": {"vector_length": 768}, - "https://tfhub.dev/google/experts/bert/wiki_books/mnli/2": {"vector_length": 768}, - "https://tfhub.dev/google/experts/bert/wiki_books/qnli/2": {"vector_length": 768}, - "https://tfhub.dev/google/experts/bert/wiki_books/qqp/2": {"vector_length": 768}, - "https://tfhub.dev/google/experts/bert/wiki_books/sst2/2": {"vector_length": 768}, - "https://tfhub.dev/google/experts/bert/wiki_books/squad2/2": {"vector_length": 768}, - "https://tfhub.dev/google/experts/bert/pubmed/2": {"vector_length": 768}, - "https://tfhub.dev/google/experts/bert/pubmed/squad2/2": {"vector_length": 768}, - } - def __init__(self, model_url: str = "https://tfhub.dev/google/experts/bert/wiki_books/2", max_seq_length: int = 228, normalize: bool = True, - preprocessor_url:str ='https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3'): - self.validate_model_url(model_url, list(self.urls.keys())) - self.max_seq_length = max_seq_length - self.normalize = normalize - self.init(model_url) - self.init_tokenizer(preprocessor_url) - - def init_tokenizer(self, preprocessor_url): - self.preprocessor = hub.KerasLayer(preprocessor_url) - - def init(self, model_url): - self.model = hub.KerasLayer(model_url) - - @catch_vector_errors - def encode(self, text: str, pooling_strategy='pooled_output'): - return self.model(self.preprocessor([text]))[pooling_strategy].numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode(self, texts: list, pooling_strategy='pooled_output'): - return self.model(self.preprocessor(texts))[pooling_strategy].numpy().tolist() diff --git a/vectorhub/encoders/text/tfhub/labse.md b/vectorhub/encoders/text/tfhub/labse.md deleted file mode 100644 index d9a8147a..00000000 --- a/vectorhub/encoders/text/tfhub/labse.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -model_id: "text/labse" -model_name: "LaBSE - Language-agnostic BERT Sentence Embedding" -vector_length: "768 (default)" -paper: "https://arxiv.org/pdf/2007.01852v1.pdf" -repo: "https://tfhub.dev/google/LaBSE/1" -installation: "pip install vectorhub[encoders-text-tfhub]" -release_date: "2020-07-03" -category: text -short_description: We present a multilingual BERT embedding model, called LaBSE, that produces language-agnostic cross-lingual sentence embeddings for 109 languages. ---- - -## Description - -The language-agnostic BERT sentence embedding encodes text into high dimensional vectors. The model is trained and optimized to produce similar representations exclusively for bilingual sentence pairs that are translations of each other. So it can be used for mining for translations of a sentence in a larger corpus. -In “Language-agnostic BERT Sentence Embedding”, we present a multilingual BERT embedding model, called LaBSE, that produces language-agnostic cross-lingual sentence embeddings for 109 languages. The model is trained on 17 billion monolingual sentences and 6 billion bilingual sentence pairs using MLM and TLM pre-training, resulting in a model that is effective even on low-resource languages for which there is no data available during training. Further, the model establishes a new state of the art on multiple parallel text (a.k.a. bitext) retrieval tasks. We have released the pre-trained model to the community through tfhub, which includes modules that can be used as-is or can be fine-tuned using domain-specific data. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Training Corpora - -LABSE has 2 types of data: -- Monolingual data (CommonCrawl and Wikipedia) -- Bilingual translation pairs (translation corpus is constructed from webpages using a bitext mining system) - -The extracted sentence pairs are filtered by a pre-trained contrastive data-selection (CDS) scoring model. -Human annotators manually evaluate sentence pairs from a small sub-set of the harvested pairs and mark the pairs as either "GOOD" or "BAD" translations, from which 80% of the retrained pairs from the manual are rated as "GOOD". - -## Training Setup - -Short lines less than 10 characters and long lines more than 5000 characters are removed. -Wiki data was extracted from the 05-21-2020 dump using WikiExtractor. - - -## Example - -```python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import LaBSE2Vec -model = LaBSE2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -``` diff --git a/vectorhub/encoders/text/tfhub/labse.py b/vectorhub/encoders/text/tfhub/labse.py deleted file mode 100644 index 113774c6..00000000 --- a/vectorhub/encoders/text/tfhub/labse.py +++ /dev/null @@ -1,96 +0,0 @@ -from datetime import date -from ..base import BaseText2Vec -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import is_all_dependency_installed -from ....models_dict import MODEL_REQUIREMENTS - -is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-tfhub-labse']) - -try: - import tensorflow as tf - if hasattr(tf, 'executing_eagerly'): - if not tf.executing_eagerly(): - tf.compat.v1.enable_eager_execution() - import tensorflow_hub as hub - import bert - import numpy as np -except: - import traceback - traceback.print_exc() - -LABSEModelDefinition = ModelDefinition(markdown_filepath='encoders/text/tfhub/labse.md') -__doc__ = LABSEModelDefinition.create_docs() - - -class LaBSE2Vec(BaseText2Vec): - definition = LABSEModelDefinition - urls = { - 'https://tfhub.dev/google/LaBSE/1': {'vector_length': 768} - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/LaBSE/1', max_seq_length: int = 128, normalize: bool = True): - self.max_seq_length = max_seq_length - self.normalize = normalize - self.model = self.init(model_url) - self.tokenizer = self.init_tokenizer() - self.vector_length = 768 - - def init(self, model_url: str): - self.model_layer = hub.KerasLayer(model_url) - input_word_ids = tf.keras.layers.Input(shape=(self.max_seq_length,), dtype=tf.int32, - name="input_word_ids") - input_mask = tf.keras.layers.Input(shape=(self.max_seq_length,), dtype=tf.int32, - name="input_mask") - segment_ids = tf.keras.layers.Input(shape=(self.max_seq_length,), dtype=tf.int32, - name="segment_ids") - pooled_output, _ = self.model_layer( - [input_word_ids, input_mask, segment_ids]) - if(self.normalize): - pooled_output = tf.keras.layers.Lambda( - lambda x: tf.nn.l2_normalize(x, axis=1))(pooled_output) - return tf.keras.Model( - inputs=[input_word_ids, input_mask, segment_ids], - outputs=pooled_output) - - def init_tokenizer(self): - self.vocab_file = self.model_layer.resolved_object.vocab_file.asset_path.numpy() - self.do_lower_case = self.model_layer.resolved_object.do_lower_case.numpy() - return bert.bert_tokenization.FullTokenizer( - self.vocab_file, self.do_lower_case) - - def process(self, input_strings: str): - input_ids_all, input_mask_all, segment_ids_all = [], [], [] - if isinstance(input_strings, str): - input_strings = [input_strings] - for input_string in input_strings: - # Tokenize input. - input_tokens = ["[CLS]"] + \ - self.tokenizer.tokenize(input_string) + ["[SEP]"] - input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens) - sequence_length = min(len(input_ids), self.max_seq_length) - - # Padding or truncation. - if len(input_ids) >= self.max_seq_length: - input_ids = input_ids[:self.max_seq_length] - else: - input_ids = input_ids + [0] * \ - (self.max_seq_length - len(input_ids)) - - input_mask = [1] * sequence_length + [0] * \ - (self.max_seq_length - sequence_length) - - input_ids_all.append(input_ids) - input_mask_all.append(input_mask) - segment_ids_all.append([0] * self.max_seq_length) - - return np.array(input_ids_all), np.array(input_mask_all), np.array(segment_ids_all) - - @catch_vector_errors - def encode(self, text: str): - input_ids, input_mask, segment_ids = self.process(text) - return self.model([input_ids, input_mask, segment_ids]).numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode(self, texts: list): - input_ids, input_mask, segment_ids = self.process(texts) - return self.model([input_ids, input_mask, segment_ids]).numpy().tolist() \ No newline at end of file diff --git a/vectorhub/encoders/text/tfhub/use.md b/vectorhub/encoders/text/tfhub/use.md deleted file mode 100644 index 6864b9f9..00000000 --- a/vectorhub/encoders/text/tfhub/use.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -model_id: "text/use" -model_name: "USE - Universal Sentence Encoder" -vector_length: "512 (Base model)" -paper: "https://arxiv.org/abs/1803.11175" -repo: "https://tfhub.dev/google/collections/universal-sentence-encoder/1" -installation: "pip install vectorhub[encoders-text-tfhub]" -release_date: "2018-03-29" -category: text -short_description: Greater-than-word length text encoder for semantic search. ---- - -## Description - -We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.", - -![USE Image](https://www.gstatic.com/aihub/tfhub/universal-sentence-encoder/example-similarity.png) - -Image from [Google](https://tfhub.dev/google/universal-sentence-encoder/1). - -## Training Corpora - -Wikipedia, Web News, web question-answering pages and discussion forums. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-text-tfhub] -#FOR WINDOWS: pip install vectorhub[encoders-text-tfhub-windows] -from vectorhub.encoders.text.tfhub import USE2Vec -model = USE2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -``` diff --git a/vectorhub/encoders/text/tfhub/use.py b/vectorhub/encoders/text/tfhub/use.py deleted file mode 100644 index 501eb83f..00000000 --- a/vectorhub/encoders/text/tfhub/use.py +++ /dev/null @@ -1,48 +0,0 @@ -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -from ..base import BaseText2Vec -from datetime import date - -is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-tfhub-use']) - -try: - import tensorflow_hub as hub - import tensorflow as tf - if hasattr(tf, 'executing_eagerly'): - if not tf.executing_eagerly(): - tf.compat.v1.enable_eager_execution() -except: - import traceback - traceback.print_exc() - -USEModelDefinition = ModelDefinition(markdown_filepath='encoders/text/tfhub/use.md') - -__doc__ = USEModelDefinition.create_docs() - -class USE2Vec(BaseText2Vec): - definition = USEModelDefinition - urls = { - "https://tfhub.dev/google/universal-sentence-encoder/4": {'vector_length': 512}, - "https://tfhub.dev/google/universal-sentence-encoder-large/5": {'vector_length': 512} - } - # or layer19 - def __init__(self, model_url: str = "https://tfhub.dev/google/universal-sentence-encoder-large/5"): - self.validate_model_url(model_url, list(self.urls.keys())) - self.init(model_url) - self.vector_length = 512 - - def init(self, model_url: str): - self.model_url = model_url - self.model = hub.load(self.model_url) - self.model_name = model_url.replace('https://tfhub.dev/google/', '').replace('/', '_') - - @catch_vector_errors - def encode(self, text): - return self.model([text]).numpy().tolist()[0] - - # can consider compress in the future - @catch_vector_errors - def bulk_encode(self, texts, threads=10, chunks=100): - return [i for c in self.chunk(texts, chunks) for i in self.model(c).numpy().tolist()] diff --git a/vectorhub/encoders/text/tfhub/use_lite.md b/vectorhub/encoders/text/tfhub/use_lite.md deleted file mode 100644 index d39f380f..00000000 --- a/vectorhub/encoders/text/tfhub/use_lite.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -model_id: "text/use-lite" -model_name: "USE Lite - Universal Sentence Encoder Lite" -vector_length: "512 (default)" -paper: "https://arxiv.org/abs/1803.11175" -repo: "https://tfhub.dev/google/universal-sentence-encoder-lite/2" -installation: pip install vectorhub[encoders-text-tfhub] -release_date: "2018-03-29" -category: text ---- - -## Description - -The Universal Sentence Encoder Lite module is a lightweight version of Universal Sentence Encoder. This lite version is good for use cases when your computation resource is limited. For example, on-device inference. It's small and still gives good performance on various natural language understanding tasks. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-text-tfhub] -from vectorhub.encoders.text.tfhub import USELite2Vec -model = USELite2Vec() -model.encode("I enjoy taking long walks along the beach with my dog. -``` diff --git a/vectorhub/encoders/text/tfhub/use_lite.py b/vectorhub/encoders/text/tfhub/use_lite.py deleted file mode 100644 index 6fd8dc3e..00000000 --- a/vectorhub/encoders/text/tfhub/use_lite.py +++ /dev/null @@ -1,83 +0,0 @@ -import warnings -import numpy as np -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import is_all_dependency_installed -from ....models_dict import MODEL_REQUIREMENTS -from ..base import BaseText2Vec -from .use import USE2Vec - -is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-tfhub-use-multi']) - -try: - import tensorflow as tf - from tensorflow.python.framework.errors_impl import NotFoundError - try: - import tensorflow_text - except NotFoundError: - print('The installed Tensorflow Text version is not aligned with tensorflow, make sure that tensorflow-text version is same version as tensorflow') -except: - import traceback - traceback.print_exc() - -USELiteModelDefinition = ModelDefinition(markdown_filepath='encoders/text/tfhub/use_lite.md') - -class USELite2Vec(BaseText2Vec): - definition = USELiteModelDefinition - - urls = { - "https://tfhub.dev/google/universal-sentence-encoder-lite/2": {'vector_length': 512} - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/universal-sentence-encoder-lite/2'): - list_of_urls = [ - "https://tfhub.dev/google/universal-sentence-encoder-lite/2", - ] - self.validate_model_url(model_url, list_of_urls) - self.vector_length = 512 - warnings.warn("Using USELite2Vec requires disabling tf2 behaviours: tf.disable_v2_behavior(). Meaning it can break the usage of other models if ran. If you are ok with this run model.init() to disable tf2 and run USELite2Vec") - - def init(self): - import tensorflow.compat.v1 as tf - tf.disable_v2_behavior() - self.input_placeholder = tf.sparse_placeholder(tf.int64, shape=[None, None]) - self.model = hub.Module("https://tfhub.dev/google/universal-sentence-encoder-lite/2") - self.encodings = module(inputs=dict( - values=self.input_placeholder.values, - indices=self.input_placeholder.indices, - dense_shape=self.input_placeholder.dense_shape - )) - with tf.Session() as sess: - spm_path = sess.run(module(signature="spm_path")) - self.sp = spm.SentencePieceProcessor() - self.sp.Load(spm_path) - - def process_texts(self, texts): - ids = [self.sp.EncodeAsIds(x) for x in texts] - return ( - [item for sublist in ids for item in sublist], - [[row,col] for row in range(len(ids)) for col in range(len(ids[row]))], - (len(ids), max(len(x) for x in ids)) - ) - - @catch_vector_errors - def encode(self, text): - values, indices, dense_shape = self.process_texts([text]) - with tf.Session() as session: - session.run([tf.global_variables_initializer(), tf.tables_initializer()]) - message_embeddings = session.run(self.encodings, - feed_dict={self.input_placeholder.values: values, - self.input_placeholder.indices: indices, - self.input_placeholder.dense_shape: dense_shape}) - return np.array(message_embeddings)[0].tolist() - - @catch_vector_errors - def bulk_encode(self, texts, threads=10, chunks=100): - values, indices, dense_shape = self.process_texts(texts) - with tf.Session() as session: - session.run([tf.global_variables_initializer(), tf.tables_initializer()]) - message_embeddings = session.run(self.encodings, - feed_dict={self.input_placeholder.values: values, - self.input_placeholder.indices: indices, - self.input_placeholder.dense_shape: dense_shape}) - return np.array(message_embeddings).tolist() diff --git a/vectorhub/encoders/text/tfhub/use_multi.md b/vectorhub/encoders/text/tfhub/use_multi.md deleted file mode 100644 index 96bfe9ce..00000000 --- a/vectorhub/encoders/text/tfhub/use_multi.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -model_id: "text/use-multi" -model_name: "USE Multi - Universal Sentence Encoder Multilingual" -vector_length: "512 (Base model)" -paper: "https://arxiv.org/abs/1803.11175" -repo: "https://tfhub.dev/google/collections/universal-sentence-encoder/1" -installation: "pip install vectorhub[encoders-text-tfhub]" -release_date: "2018-03-29" -category: text ---- - -## Description - -The Universal Sentence Encoder Multilingual module is an extension of the Universal Sentence Encoder Large that includes training on multiple tasks across languages. Supports 16 languages (Arabic, Chinese-simplified, Chinese-traditional, English, French, German, Italian, Japanese, Korean, Dutch, Polish, Portuguese, Spanish, Thai, Turkish, Russian) text encoder. - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-text-tfhub] -from vectorhub.encoders.text.tfhub import USEMulti2Vec -model = USEMulti2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -``` diff --git a/vectorhub/encoders/text/tfhub/use_multi.py b/vectorhub/encoders/text/tfhub/use_multi.py deleted file mode 100644 index 5831b87e..00000000 --- a/vectorhub/encoders/text/tfhub/use_multi.py +++ /dev/null @@ -1,35 +0,0 @@ -from ....doc_utils import ModelDefinition -from ....import_utils import is_all_dependency_installed -from ....models_dict import MODEL_REQUIREMENTS -from .use import USE2Vec - -is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-tfhub-use-multi']) - -try: - import tensorflow as tf - if hasattr(tf, 'executing_eagerly'): - if not tf.executing_eagerly(): - tf.compat.v1.enable_eager_execution() - from tensorflow.python.framework.errors_impl import NotFoundError - try: - import tensorflow_text - except NotFoundError: - print('The installed Tensorflow Text version is not aligned with tensorflow, make sure that tensorflow-text version is same version as tensorflow') -except: - import traceback - traceback.print_exc() - -USEMultiModelDefinition = ModelDefinition(markdown_filepath='encoders/text/tfhub/use_multi') - -__doc__ = USEMultiModelDefinition.create_docs() - -class USEMulti2Vec(USE2Vec): - definition = USEMultiModelDefinition - urls = { - "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3": {'vector_length': 512}, - "https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3": {'vector_length': 512} - } - def __init__(self, model_url: str = 'https://tfhub.dev/google/universal-sentence-encoder-multilingual/3'): - self.validate_model_url(model_url, list(self.urls.keys())) - self.init(model_url) - self.vector_length = 512 diff --git a/vectorhub/encoders/text/tfhub/use_multi_transformer.md b/vectorhub/encoders/text/tfhub/use_multi_transformer.md deleted file mode 100644 index afec2c3c..00000000 --- a/vectorhub/encoders/text/tfhub/use_multi_transformer.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -model_id: "text/use-multi-mlm" -model_name: "USE with conditional MLM Multilingual" -vector_length: "1024 (Base model)" -paper: "https://arxiv.org/abs/1803.11175" -repo: "https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-large/1" -installation: "pip install vectorhub[encoders-text-tfhub]" -release_date: "2021-01-31" -category: text ---- - -*WARNING* This model currently has memory leaks that have yet to be patched. - -## Description - -The universal sentence encoder family of models map text into high dimensional vectors that capture sentence-level semantics. Our English-large (en-large) model is trained using a conditional masked language model described in [1]. The model is intended to be used for text classification, text clustering, semantic textural similarity, etc. It can also be use used as modularized input for multimodal tasks with text as a feature. The model can be fine-tuned for all of these tasks. The large model employs a 24 layer BERT transformer architecture. - - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-text-tfhub] -from vectorhub.encoders.text.tfhub import USEMultiTransformer2Vec -model = USEMultiTransformer2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -``` diff --git a/vectorhub/encoders/text/tfhub/use_multi_transformer.py b/vectorhub/encoders/text/tfhub/use_multi_transformer.py deleted file mode 100644 index 8cfa6736..00000000 --- a/vectorhub/encoders/text/tfhub/use_multi_transformer.py +++ /dev/null @@ -1,56 +0,0 @@ -import warnings -import numpy as np -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import is_all_dependency_installed -from ....models_dict import MODEL_REQUIREMENTS -from ..base import BaseText2Vec -from .use import USE2Vec - -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-tfhub-use-transformer']): - import tensorflow as tf - import tensorflow_hub as hub - import tensorflow_text as text - -USEMultiTransformerModelDefinition = ModelDefinition(markdown_filepath='encoders/text/tfhub/use_multi_transformer') - -__doc__ = USEMultiTransformerModelDefinition.create_docs() - -class USEMultiTransformer2Vec(USE2Vec): - definition = USEMultiTransformerModelDefinition - urls = { - "https://tfhub.dev/google/universal-sentence-encoder-cmlm/multilingual-base/1": {"vector_length": 512}, - "https://tfhub.dev/google/universal-sentence-encoder-cmlm/multilingual-base-br/1": {"vector_length": 512} - } - def __init__(self, model_url: str="https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-large/1", - preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"): - self.validate_model_url(model_url, list(self.urls.keys())) - self.init(model_url) - self.vector_length = 512 - self.model_url = model_url - self.preprocess_url = preprocessor_url - self.preprocessor = hub.KerasLayer(preprocessor_url) - self.encoder = hub.KerasLayer(model_url) - - @property - def preprocessor_urls(self): - return [ - "https://tfhub.dev/google/universal-sentence-encoder-cmlm/multilingual-preprocess/2" - ] - - @catch_vector_errors - def encode(self, text, pooling_strategy='defualt'): - """ - Pooling strategy can be one of 'pooled_output' or 'default'. - """ - return self.encoder(self.preprocessor(tf.constant([text])))['default'].numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode(self, texts, pooling_strategy='default'): - """ - Bulk encode the texts. - Pooling strategy can be one of 'pooled_output' or 'default'. - """ - return self.encoder(self.preprocessor(tf.constant(texts)))['default'].numpy().tolist() - diff --git a/vectorhub/encoders/text/tfhub/use_transformer.md b/vectorhub/encoders/text/tfhub/use_transformer.md deleted file mode 100644 index 916bd669..00000000 --- a/vectorhub/encoders/text/tfhub/use_transformer.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -model_id: "text/use-mlm" -model_name: "USE with conditional MLM" -vector_length: "1024 (Base model)" -paper: "https://arxiv.org/abs/1803.11175" -repo: "https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-large/1" -installation: "pip install vectorhub[encoders-text-tfhub]" -release_date: "2021-01-31" -category: text ---- - -*WARNING* This model currently has memory leaks that have yet to be patched. - -## Description - -The universal sentence encoder family of models map text into high dimensional vectors that capture sentence-level semantics. Our English-large (en-large) model is trained using a conditional masked language model described in [1]. The model is intended to be used for text classification, text clustering, semantic textural similarity, etc. It can also be use used as modularized input for multimodal tasks with text as a feature. The model can be fine-tuned for all of these tasks. The large model employs a 24 layer BERT transformer architecture. - - -## Working in Colab - -If you are using this in colab and want to save this so you don't have to reload, use: - -``` -import os -os.environ['TFHUB_CACHE_DIR'] = "drive/MyDrive/" -os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED" -``` - -## Example - -```python -#pip install vectorhub[encoders-text-tfhub] -from vectorhub.encoders.text.tfhub import USETransformer2Vec -model = USETransformer2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -``` diff --git a/vectorhub/encoders/text/tfhub/use_transformer.py b/vectorhub/encoders/text/tfhub/use_transformer.py deleted file mode 100644 index 50074b40..00000000 --- a/vectorhub/encoders/text/tfhub/use_transformer.py +++ /dev/null @@ -1,54 +0,0 @@ -import warnings -import numpy as np -from datetime import date -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import is_all_dependency_installed -from ....models_dict import MODEL_REQUIREMENTS -from ..base import BaseText2Vec -from .use import USE2Vec - -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-tfhub-use-transformer']): - import tensorflow as tf - import tensorflow_hub as hub - -USETransformerModelDefinition = ModelDefinition(markdown_filepath='encoders/text/tfhub/use_transformer') - -__doc__ = USETransformerModelDefinition.create_docs() - -class USETransformer2Vec(USE2Vec): - definition = USETransformerModelDefinition - urls = { - "https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-large/1": {'vector_length': 1024}, - "https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1": {'vector_length': 512}, - } - def __init__(self, model_url: str="https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-large/1", - preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"): - self.validate_model_url(model_url, list(self.urls.keys())) - self.init(model_url) - self.vector_length = 1024 - self.model_url = model_url - self.preprocess_url = preprocessor_url - self.preprocessor = hub.KerasLayer(preprocessor_url) - self.encoder = hub.KerasLayer(model_url) - - @property - def preprocessor_urls(self): - return [ - "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3" - ] - - @catch_vector_errors - def encode(self, text, pooling_strategy='defualt'): - """ - Pooling strategy can be one of 'pooled_output' or 'default'. - """ - return self.encoder(self.preprocessor(tf.constant([text])))['default'].numpy().tolist()[0] - - @catch_vector_errors - def bulk_encode(self, texts, pooling_strategy='default'): - """ - Bulk encode the texts. - Pooling strategy can be one of 'pooled_output' or 'default'. - """ - return self.encoder(self.preprocessor(tf.constant(texts)))['default'].numpy().tolist() diff --git a/vectorhub/encoders/text/torch_transformers/__init__.py b/vectorhub/encoders/text/torch_transformers/__init__.py deleted file mode 100644 index ef9c5d9f..00000000 --- a/vectorhub/encoders/text/torch_transformers/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .legal_bert import * -from .torch_auto_transformers import * -from .torch_longformers import * diff --git a/vectorhub/encoders/text/torch_transformers/legal_bert.md b/vectorhub/encoders/text/torch_transformers/legal_bert.md deleted file mode 100644 index 6e3a4d1e..00000000 --- a/vectorhub/encoders/text/torch_transformers/legal_bert.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -model_id: "text/legal-bert" -model_name: "Legal Bert" -vector_length: "768 (default)" -paper: "https://arxiv.org/abs/2010.02559" -repo: "https://huggingface.co/nlpaueb/legal-bert-base-uncased" -release_date: "2020-10-06" -installation: "pip install vectorhub[encoders-text-torch-transformers]" -category: text -short_description: We propose a systematic investigation of the available strategies when applying BERT in Legal domains. ---- - -## Description - -BERT has achieved impressive performance in several NLP tasks. However, there has been limited investigation on its adaptation guidelines in specialised domains. Here we focus on the legal domain, where we explore several approaches for applying BERT models to downstream legal tasks, evaluating on multiple datasets. Our findings indicate that the previous guidelines for pre-training and fine-tuning, often blindly followed, do not always generalize well in the legal domain. Thus we propose a systematic investigation of the available strategies when applying BERT in specialised domains. These are: (a) use the original BERT out of the box, (b) adapt BERT by additional pre-training on domain-specific corpora, and (c) pre-train BERT from scratch on domain-specific corpora. We also propose a broader hyper-parameter search space when fine-tuning for downstream tasks and we release LEGAL-BERT, a family of BERT models intended to assist legal NLP research, computational law, and legal technology applications. - -## Example - -```python -#pip install vectorhub[encoders-text-torch-transformers] -from vectorhub.encoders.text.torch_transformers import LegalBert2Vec -model = LegalBert2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -``` diff --git a/vectorhub/encoders/text/torch_transformers/legal_bert.py b/vectorhub/encoders/text/torch_transformers/legal_bert.py deleted file mode 100644 index 6a5acaf7..00000000 --- a/vectorhub/encoders/text/torch_transformers/legal_bert.py +++ /dev/null @@ -1,53 +0,0 @@ -from typing import List, Union -from ..base import BaseText2Vec -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -from datetime import date -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-torch-transformers-auto']): - from transformers import AutoTokenizer, AutoModel - import torch - -LegalBertModelDefinition = ModelDefinition(markdown_filepath='encoders/text/torch_transformers/legal_bert') - -__doc__ = LegalBertModelDefinition.create_docs() - -class LegalBert2Vec(BaseText2Vec): - definition = LegalBertModelDefinition - urls = { - "nlpaueb/bert-base-uncased-contracts": {"data": "Trained on US contracts"}, - "nlpaueb/bert-base-uncased-eurlex": {"data": "Trained on EU legislation"}, - "nlpaueb/bert-base-uncased-echr ": {"data": "Trained on ECHR cases"}, - "nlpaueb/legal-bert-base-uncased": {"data": "Trained on all the above"}, - "nlpaueb/legal-bert-small-uncased": {"data": "Trained on all the above"} - } - def __init__(self, model_name: str="nlpaueb/legal-bert-base-uncased"): - self.model = AutoModel.from_pretrained(model_name) - self.tokenizer = AutoTokenizer.from_pretrained(model_name) - - @catch_vector_errors - def encode(self, text: Union[str, List[str]]) -> List[float]: - """ - Encode words using transformers. - Args: - text: str - """ - if isinstance(text, str): - return torch.mean(self.model(**self.tokenizer(text, return_tensors='pt'))[0], axis=1).detach().tolist()[0] - if isinstance(text, list): - return self.bulk_encode(text) - raise ValueError("Not a string or a list of strings, please enter valid data type.") - - @catch_vector_errors - def bulk_encode(self, texts: List[str]) -> List[List[float]]: - """ - Encode multiple sentences using transformers. - args: - texts: List[str] - """ - # We use pad_to_multiple_of as other arguments usually do not work. - # TODO: FIx the older method - # return torch.mean(self.model(**self.tokenizer(texts, return_tensors='pt', pad_to_multiple_of=self.tokenizer.model_max_length, - # truncation=True, padding=True))[0], axis=1).detach().tolist() - return [self.encode(x) for x in texts] diff --git a/vectorhub/encoders/text/torch_transformers/torch_auto_transformers.md b/vectorhub/encoders/text/torch_transformers/torch_auto_transformers.md deleted file mode 100644 index 2d4c39ad..00000000 --- a/vectorhub/encoders/text/torch_transformers/torch_auto_transformers.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -model_id: text/torch-auto-transformers -model_name: "Transformer Models" -vector_length: 'Depends on model.' -paper: "https://arxiv.org/abs/1910.03771" -repo: "https://huggingface.co/transformers/pretrained_models.html" -installation: "pip install vectorhub[encoders-text-torch-transformers-auto]" -release_date: null -category: text ---- - -## Description - -These are Tensorflow Automodels from HuggingFace. - -## Example - -```python -#pip install vectorhub[encoders-text-tf-transformers] -from vectorhub.encoders.text.tf_transformers import TFTransformer2Vec -model = TFTransformer2Vec('bert-base-uncased') -model.encode("I enjoy taking long walks along the beach with my dog.") -``` diff --git a/vectorhub/encoders/text/torch_transformers/torch_auto_transformers.py b/vectorhub/encoders/text/torch_transformers/torch_auto_transformers.py deleted file mode 100644 index fd3c37aa..00000000 --- a/vectorhub/encoders/text/torch_transformers/torch_auto_transformers.py +++ /dev/null @@ -1,64 +0,0 @@ -from typing import List, Union -from ..base import BaseText2Vec -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-torch-transformers-auto']): - from transformers import AutoTokenizer, AutoModel - import torch - -TransformerModelDefinition = ModelDefinition(markdown_filepath='encoders/text/torch_transformers/torch_auto_transformers.md') - -__doc__ = TransformerModelDefinition.create_docs() - - -def list_tested_transformer_models(): - """ - List the transformed models. - """ - return [ - "bert-base-uncased", - "distilbert-base-uncased", - "facebook/bart-base" - ] - -class Transformer2Vec(BaseText2Vec): - definition = TransformerModelDefinition - urls = { - "bert-base-uncased": {'vector_length': 768}, - "distilbert-base-uncased": {'vector_length': 768}, - "facebook/bart-base": {'vector_length': 768} - } - - def __init__(self, model_name: str): - self.model = AutoModel.from_pretrained(model_name) - self.tokenizer = AutoTokenizer.from_pretrained(model_name) - print("Transformer2Vec uses the AutoModel to allow for easier models.") - print("Therefore, not all models will worked but most do. " + \ - "Call the list of tested transformer models using list_tested_models.") - - @catch_vector_errors - def encode(self, text: Union[str, List[str]]) -> List[float]: - """ - Encode words using transformers. - Args: - word: str - - """ - if isinstance(text, str): - return torch.mean(self.model(**self.tokenizer(text, return_tensors='pt'))[0], axis=1).detach().tolist()[0] - if isinstance(text, list): - return self.bulk_encode(text) - raise ValueError("Not a string or a list of strings, please enter valid data type.") - - @catch_vector_errors - def bulk_encode(self, texts: List[str]) -> List[List[float]]: - """ - Encode multiple sentences using transformers. - args: - Sentences: List[str] - """ - # We use pad_to_multiple_of as other arguments usually do not work. - return torch.mean(self.model(**self.tokenizer(texts, return_tensors='pt', pad_to_multiple_of=self.tokenizer.model_max_length, - truncation=True, padding=True))[0], axis=1).detach().tolist() diff --git a/vectorhub/encoders/text/torch_transformers/torch_longformers.md b/vectorhub/encoders/text/torch_transformers/torch_longformers.md deleted file mode 100644 index 18f2ff40..00000000 --- a/vectorhub/encoders/text/torch_transformers/torch_longformers.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -model_id: "text/longformer" -model_name: "Longformer" -vector_length: "768 (default)" -paper: "https://arxiv.org/abs/2004.05150" -repo: "https://huggingface.co/allenai/longformer-base-4096" -release_date: "2020-04-10" -installation: "pip install vectorhub[encoders-text-torch-transformers]" -category: text -short_description: We propose a new attention-based model for long-sequence processing that is able to learn long-term dependencies and process documents of thousands of tokens or longer. ---- - -## Description - -From the abstract of the paper: - -Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer’s attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA. - -Novelties in the LongFormer paper come from its use of attention pattern: - -- Sliding Window: The attention pattern employes fixed-size window attention surrounding each token on both sides. -- Dilated Sliding Window: Similar to CNN dilation, sliding windows can be dilated (i.e. have gaps) -- Global Attention: Certain tokens attend to all tokens (e.g. for classification, global attention is used for CLS) -- Linear Projections for Global Attention: 2 sets of projections are used to compute attention scores of sliding window attention. - -## Training Corpora - -- Books corpus -- English Wikipedia -- One third of a subset of Realnews dataset with documents longer than 1200 tokens -- One third of stories corpus - -## Example - -```python -#pip install vectorhub[encoders-text-torch-transformers] -from vectorhub.encoders.text.torch_transformers import Longformer2Vec -model = Longformer2Vec() -model.encode("I enjoy taking long walks along the beach with my dog.") -``` diff --git a/vectorhub/encoders/text/torch_transformers/torch_longformers.py b/vectorhub/encoders/text/torch_transformers/torch_longformers.py deleted file mode 100644 index 31e16e05..00000000 --- a/vectorhub/encoders/text/torch_transformers/torch_longformers.py +++ /dev/null @@ -1,54 +0,0 @@ -from typing import List, Union -from ..base import BaseText2Vec -from ....base import catch_vector_errors -from ....doc_utils import ModelDefinition -from ....import_utils import * -from ....models_dict import MODEL_REQUIREMENTS -if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-torch-transformers-auto']): - from transformers import LongformerTokenizer, LongformerModel - import torch - -LongformerModelDefinition = ModelDefinition(markdown_filepath='encoders/text/torch_transformers/torch_longformers.md') - -__doc__ = LongformerModelDefinition.create_docs() - -class Longformer2Vec(BaseText2Vec): - definition = LongformerModelDefinition - urls = { - 'allenai/longformer-base-4096': {'vector_length': 4096}, - 'allenai/longformer-large-4096': {'vector_length': 4096} - } - def __init__(self, model_name: str = "allenai/longformer-base-4096"): - self.model = LongformerModel.from_pretrained(model_name) - self.tokenizer = LongformerTokenizer.from_pretrained(model_name) - - @catch_vector_errors - def encode(self, text: Union[str, List[str]]) -> List[float]: - """ - Encode words using transformers. - Args: - text: str - - """ - if isinstance(text, str): - return torch.mean(self.model(**self.tokenizer(text, return_tensors='pt'))[0], axis=1).detach().tolist()[0] - if isinstance(text, list): - return self.bulk_encode(text) - raise ValueError( - "Not a string or a list of strings, please enter valid data type.") - - @catch_vector_errors - def bulk_encode(self, texts: List[str], pooling_method='mean') -> List[List[float]]: - """ - Encode multiple sentences using transformers. - args: - texts: List[str] - """ - # We use pad_to_multiple_of as other arguments usually do not work. - if pooling_method == 'mean': - return torch.mean( - self.model(**self.tokenizer(texts, return_tensors='pt', - pad_to_multiple_of=self.tokenizer.model_max_length, - truncation=True, padding=True))[0], axis=1).detach().tolist() - else: - raise NotImplementedError diff --git a/vectorhub/encoders/text/vectorai/__init__.py b/vectorhub/encoders/text/vectorai/__init__.py deleted file mode 100644 index de5e8db2..00000000 --- a/vectorhub/encoders/text/vectorai/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .vi_encoder import * \ No newline at end of file diff --git a/vectorhub/encoders/text/vectorai/vi_encoder.py b/vectorhub/encoders/text/vectorai/vi_encoder.py deleted file mode 100644 index cffa5f52..00000000 --- a/vectorhub/encoders/text/vectorai/vi_encoder.py +++ /dev/null @@ -1,83 +0,0 @@ -""" - Vector AI's deployed model. The purpose of this model is to allow developers to easily build encodings and see for themselves - how the embedding works. These models are selected to work out-of-the-box after testing for their success on our end. - - To get access to Vector AI, we need to use - - Example: - - >>> from vectorhub.text.encoder.vectorai import ViText2Vec - >>> model = ViText2Vec(username, api_key) - >>> model.encode("Hey!") - >>> model.bulk_encode(["hey", "stranger"]) - -""" -import io -import base64 -import numpy as np -import requests -from abc import abstractmethod -from typing import List, Union -from ..base import BaseText2Vec -from ....base import catch_vector_errors - -class ViText2Vec(BaseText2Vec): - def __init__(self, username, api_key, url=None, collection_name="base"): - """ - Request for a username and API key from gh.vctr.ai! - """ - self.username = username - self.api_key = api_key - if url: - self.url = url - else: - self.url = "https://api.vctr.ai" - self.collection_name = collection_name - self._name = "default" - - @catch_vector_errors - def encode(self, text: Union[str, List[str]]): - """ - Convert text to vectors. - """ - if isinstance(text, str): - return requests.get( - url="{}/collection/encode_text".format(self.url), - params={ - "username": self.username, - "api_key": self.api_key, - "collection_name": self.collection_name, - "text": text, - }, - ).json() - elif isinstance(text, list): - return self.bulk_encode(text) - - @catch_vector_errors - def bulk_encode(self, texts: List[str]): - """ - Bulk convert text to vectors - """ - return requests.get( - url="{}/collection/bulk_encode_text".format(self.url), - params={ - "username": self.username, - "api_key": self.api_key, - "collection_name": self.collection_name, - "texts": texts, - } - ).json() - - @property - def __name__(self): - if self._name is None: - return "deployed_text" - return self._name - - @__name__.setter - def __name__(self, value): - self._name = value - - @property - def vector_length(self): - return 512 diff --git a/vectorhub/encoders/video/__init__.py b/vectorhub/encoders/video/__init__.py deleted file mode 100644 index e4d683b7..00000000 --- a/vectorhub/encoders/video/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .sampler import FrameSamplingFilter \ No newline at end of file diff --git a/vectorhub/encoders/video/sampler.py b/vectorhub/encoders/video/sampler.py deleted file mode 100644 index 2090c4e7..00000000 --- a/vectorhub/encoders/video/sampler.py +++ /dev/null @@ -1,89 +0,0 @@ -from math import ceil -import numpy as np -import os -import tempfile -from ...import_utils import * - -if is_all_dependency_installed('encoders-video'): - import librosa - import soundfile as sf - from cv2 import cv2 - from moviepy.video.io.ffmpeg_reader import ffmpeg_parse_infos - from moviepy.video.io.VideoFileClip import VideoFileClip - - -class FrameSamplingFilter(): - - def __init__(self, every=None, hertz=None, top_n=None): - if every is None and hertz is None and top_n is None: - raise ValueError("When initializing the FrameSamplingFilter, " - "one of the 'every', 'hertz', or 'top_n' must " - "be specified.") - self.every = every - self.hertz = hertz - self.top_n = top_n - - def get_audio_sampling_rate(self, filename: str): - infos = ffmpeg_parse_infos(filename) - fps = infos.get('audio_fps', 44100) - if fps == 'unknown': - fps = 44100 - return fps - - def load_clip(self, filename: str): - audio_fps = self.get_audio_sampling_rate(filename) - self.clip = VideoFileClip(filename, audio_fps) - - def initialize_video(self, filename: str): - self.filename = filename - self.load_clip(filename) - self.fps = self.clip.fps - self.width = self.clip.w - self.height = self.clip.h - self.frame_index = range(int(ceil(self.fps * self.clip.duration))) - self.duration = self.clip.duration - self.n_frames = len(self.frame_index) - - def get_audio_vector(self, new_sampling_rate: int = 16000): - fd, fp = tempfile.mkstemp() - audio = f'{fp}.wav' - self.clip.audio.to_audiofile(audio) - data, sampling_rate = sf.read(audio, dtype='float32') - os.close(fd) - os.remove(audio) - return np.array(librosa.resample(data.T, sampling_rate, new_sampling_rate)) - - def transform(self, filename: str): - self.initialize_video(filename) - - if (self.every is not None): - new_idx = range(self.n_frames)[::self.every] - elif (self.hertz is not None): - interval = self.fps / float(self.hertz) - new_idx = np.arange(0, self.n_frames, interval).astype(int) - new_idx = list(new_idx) - elif self.top_n is not None: - diffs = [] - for i, img in enumerate(range(self.n_frames)): - if i == 0: - last = img - continue - pixel_diffs = cv2.sumElems(cv2.absdiff( - self.get_frame(last), self.get_frame(img))) - diffs.append(sum(pixel_diffs)) - last = img - new_idx = sorted(range(len(diffs)), - key=lambda i: diffs[i], - reverse=True)[:self.top_n] - - result = [] - for index in new_idx: - result.append(self.get_frame(index)) - return result - - def get_frame(self, index: int): - return self.clip.get_frame(index) - - def iter_frames(self): - for i, f in enumerate(self.frame_index): - yield self.get_frame(f) diff --git a/vectorhub/errors.py b/vectorhub/errors.py deleted file mode 100644 index 919bce05..00000000 --- a/vectorhub/errors.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Errors -""" - -class ModelError(Exception): - """Base error class for all errors in library - """ - - def __init__(self, message: str): - """ - The main Vector Hub base error. - Args: - message: The error message - Example: - >>> raise ModelError("Missing ____.") - """ - self.response_message = message \ No newline at end of file diff --git a/vectorhub/import_utils.py b/vectorhub/import_utils.py deleted file mode 100644 index 5e42babd..00000000 --- a/vectorhub/import_utils.py +++ /dev/null @@ -1,64 +0,0 @@ -""" - Utilities for importing libraries. -""" -import sys -import warnings -import pkg_resources -import json -import os -import importlib -from contextlib import contextmanager -import sys - -def get_package_requirements(requirement_type: str): - """ - Load in extra_requirements.json from the package - """ - os.path.dirname(os.path.realpath(__file__)) - requirements = json.load(open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'extra_requirements.json'))) - dependencies = [] - for k, v in requirements.items(): - if requirement_type in v: - dependencies.append(k) - return dependencies - -def is_dependency_installed(dependency: str): - """ - Returns True if the dependency is installed else False. - """ - IS_INSTALLED = True - try: - pkg_resources.get_distribution(dependency) - except pkg_resources.ContextualVersionConflict: - IS_INSTALLED = True - except: - IS_INSTALLED = False - return IS_INSTALLED - -def is_all_dependency_installed(requirement_type: str, raise_warning=True): - """ - Returns True/False if the dependency is isntalled - Args: - requirement_type: The type of requirement. This can be found in the values in extra_requirements.json - raise_warning: Raise warning if True - """ - IS_ALL_INSTALLED = True - requirements = get_package_requirements(requirement_type) - for r in requirements: - if not is_dependency_installed(r): - if raise_warning: - warnings.warn(f"You are missing {r} dependency for this submodule. Run `pip install vectorhub[{requirement_type}]`") - IS_ALL_INSTALLED = False - return IS_ALL_INSTALLED - -# @contextmanager -# def catch_import_error(*args, **kw): -# """Tryies to run statement otherwise it fails -# """ -# try: -# importlib.import_module(name=name, package=package) -# except ImportError: -# if package is None: -# warnings.warn("Missing {}. Try re-starting notebook/environment if you just installed.".format(name)) -# else: -# warnings.warn("Missing {} from {}. Try re-starting notebook/environment if you just installed.".format(name, package)) diff --git a/vectorhub/indexer.py b/vectorhub/indexer.py deleted file mode 100644 index 9d1ab6fc..00000000 --- a/vectorhub/indexer.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -Indexer with the model. -""" -import warnings -from typing import List, Any, Optional - -class ViIndexer: - @property - def encoder_type(self): - """The encoder type ensures it uses either the 'encode' or 'encode_question'/'encode_answer' - Currently supported encoder types: - Question-Answer - Text-Image - Encoder - """ - if self.definition.model_id.startswith('qa'): - return 'qa' - elif self.definition.model_id.startswith('text_image'): - return 'text_image' - else: - return 'encoder' - - def request_api_key(self, username: str, email: str, referral_code="vectorhub_referred"): - """ - Requesting an API key. - """ - print("API key is being requested. Be sure to save it somewhere!") - - from vectorai import request_api_key - return request_api_key(username=username, email=email, - description='vectorhub', referral_code=referral_code) - - def add_documents(self, username: str, api_key: str, - items: List[Any], metadata: Optional[List[Any]]=None, - collection_name: str=None): - """ - Add documents to the Vector AI cloud. - """ - from vectorai import ViClient - self.username = username - self.api_key = api_key - if collection_name is not None: - self.collection_name = collection_name - else: - self.collection_name = 'vectorhub_collection_with_' + self.__name__.lower() - if metadata is not None: - docs = [self._create_document(item, metadata) for i, (item, metadata) in enumerate(list(zip(items, metadata)))] - else: - docs = [self._create_document(item) for i, item in enumerate(items)] - - self.client = ViClient(username, api_key) - if self.encoder_type == 'encoder': - return self.client.insert_documents(self.collection_name, docs, {'item': self}, overwrite=True) - elif self.encoder_type == 'qa': - return self.client.insert_documents(self.collection_name, docs, {'item': self}, overwrite=True) - elif self.encoder_type == 'text_image': - return self.client.insert_documents(self.collection_name, docs, {'item': self}, overwrite=True) - - def _create_document(self, item: List[str], metadata=None): - return { - # '_id': str(_id), - 'item': item, - 'metadata': metadata - } - - def delete_collection(self, collection_name=None): - if collection_name is None: - collection_name = self.collection_name - return self.delete_collection(collection_name) - - def get_vector_field_name(self): - # if self.encoder_type in ('qa'): - # return 'item_vector_' - # elif self.encoder_type in ('encoder', 'text_image'): - return f'item_{self.__name__}_vector_' - - def search(self, item: Any, num_results: int=10): - """ - Simple search with Vector AI - """ - warnings.warn("If you are looking for more advanced functionality, we recommend using the official Vector AI Github package") - if self.encoder_type == 'encoder': - return self.client.search(self.collection_name, self.encode(item), - field=self.get_vector_field_name(), page_size=num_results) - elif self.encoder_type == 'qa': - return self.client.search(self.collection_name, self.encode_question(item), - field=self.get_vector_field_name(), page_size=num_results) - elif self.encoder_type == 'text_image': - return self.client.search(self.collection_name, self.encode_text(item), - field=self.get_vector_field_name(), page_size=num_results) - - def retrieve_documents(self, num_of_documents: int): - """ - Get all the documents in our package. - """ - return self.client.retrieve_documents(self.collection_name, page_size=num_of_documents)['documents'] - - def retrieve_all_documents(self): - """ - Retrieve all documents. - """ - return self.retrieve_all_documents(self.collection_name) diff --git a/vectorhub/models_dict.py b/vectorhub/models_dict.py deleted file mode 100644 index de299aa7..00000000 --- a/vectorhub/models_dict.py +++ /dev/null @@ -1,47 +0,0 @@ -""" - Dictionary For Models -""" - -# Map Model to the requirements here. -# This is used to allow the user to list what models they have which can be installed. - -MODEL_REQUIREMENTS = { - "encoders-audio-pytorch-fairseq": "encoders-audio-pytorch", - "encoders-audio-tfhub-speech_embedding": "encoders-audio-tfhub", - "encoders-audio-tfhub-trill": 'encoders-audio-tfhub', - "encoders-audio-tfhub-vggish": "encoders-audio-tfhub", - "encoders-audio-tfhub-yamnet": "encoders-audio-tfhub", - "encoders-audio-vectorai": None, - - "encoders-text-tfhub-albert": "encoders-text-tfhub", - "encoders-text-tfhub-bert": "encoders-text-tfhub-windows", - "encoders-text-tfhub-expertsbert": "encoders-text-tfhub", - "encoders-text-tfhub-elmo": "encoders-text-tfhub", - "encoders-text-tfhub-labse": "encoders-text-tfhub-windows", - "encoders-text-tfhub-use": "encoders-text-tfhub-windows", - "encoders-text-tfhub-use-multi": "encoders-text-tfhub", - "encoders-text-tfhub-use-transformer": "encoders-text-tfhub", - "encoders-text-torch-transformers-legalbert": "encoders-text-torch-transformers", - "encoders-text-tf-transformers-auto": "encoders-text-tf-transformers", - "encoders-text-torch-transformers-auto": "encoders-text-torch-transformers", - "encoders-text-torch-transformers-longformer": "encoders-text-torch-transformers", - "encoders-text-sentence-transformers": "encoders-text-sentence-transformers", - "encoders-text-vectorai": None, - - "encoders-image-tfhub-bit": "encoders-image-tfhub", - "encoders-image-tfhub-resnet": "encoders-image-tfhub", - "encoders-image-tfhub-inception": "encoders-image-tfhub", - "encoders-image-tfhub-inception-resnet": "encoders-image-tfhub", - "encoders-image-tfhub-mobilenet": "encoders-image-tfhub", - "encoders-image-fastai-resnet": "encoders-image-fastai", - "encoders-image-vectorai": None, - "encoders-image-tf-face-detection": "encoders-image-tf-face-detection", - - "text-bi-encoder-tfhub-lareqa-qa": "encoders-text-tfhub", - "text-bi-encoder-tfhub-use-qa": "encoders-text-tfhub", - "text-bi-encoder-torch-dpr": "encoders-text-torch-transformers", - - "encoders-code-transformers": "encoders-text-transformers", - - "text-image-bi-encoder-clip": "clip" -} diff --git a/vectorhub/options.py b/vectorhub/options.py deleted file mode 100644 index f96b25dd..00000000 --- a/vectorhub/options.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -Python Options -To access Python options for VectorHub -""" -from enum import Enum -class IfErrorReturns(Enum): - RETURN_NONE: str="RETURN_NONE" - RETURN_EMPTY_VECTOR: str = "RETURN_EMPTY_VECTOR" - RAISE_ERROR: str = "RAISE_ERROR" - -OPTIONS = { - 'if_error': IfErrorReturns.RETURN_NONE -} - -def get_option(field): - """Get an option with a specific field name. - """ - return OPTIONS[field] - -def set_option(field, value): - """Set an option with a specific field name. - """ - OPTIONS[field] = value diff --git a/vectorhub/utils.py b/vectorhub/utils.py deleted file mode 100644 index f858e4c3..00000000 --- a/vectorhub/utils.py +++ /dev/null @@ -1,40 +0,0 @@ -""" - Various utilities for VectorHub. -""" -import json -import os -from pathlib import Path -from pkg_resources import resource_filename -from collections import defaultdict -from .models_dict import * -from .import_utils import * - -def list_installed_models(extra_requirements_file: str=resource_filename('vectorhub', '../extra_requirements.json')): - """ - List models that are installed. - We use resource_filename to resolve relative directory issues. - """ - requirements = json.load(open(extra_requirements_file)) - print("The following packages are available to be used: ") - all_available_models = [] - for package, dependency in MODEL_REQUIREMENTS.items(): - if is_all_dependency_installed(dependency, raise_warning=False): - print(package) - all_available_models.append(package) - return all_available_models - -def list_models(return_names_only=False): - """ - List available models. - Args: - return_names_only: Return the model names - """ - if return_names_only: - return [x.stem for x in list(Path('.').glob('**/[!_]*.py'))] - - all_models = [str(x).replace('.py', '') for x in list(Path(resource_filename('vectorhub', 'encoders/text')).rglob(f'[!_]*.py'))] + \ - [str(x).replace('.py', '') for x in list(Path(resource_filename('vectorhub', 'encoders/image')).rglob(f'[!_]*.py'))] + \ - [str(x).replace('.py', '') for x in list(Path(resource_filename('vectorhub', 'encoders/audio')).rglob(f'[!_]*.py'))] + \ - [str(x).replace('.py', '') for x in list(Path(resource_filename('vectorhub', 'bi_encoders/qa')).rglob(f'[!_]*.py'))] - return [x.split('vectorhub/')[-1] for x in all_models if '/base' not in x] -