From df811d0deacebfd6cc77e8bf501d9b87ff006fb5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 4 Apr 2022 21:15:08 -0500 Subject: [PATCH] run auto formatters cleanup website --- .github/ISSUE_TEMPLATE.md | 4 +- .github/workflows/build.yml | 110 +-- .pre-commit-config.yaml | 67 +- LICENSE.md | 6 +- MANIFEST.in | 1 - README.md | 41 +- conftest.py | 8 +- docs/doc-requirements.txt | 4 +- docs/source/_static/custom.css | 4 +- docs/source/conf.py | 84 +-- docs/source/contributors/contrib.md | 11 +- docs/source/contributors/debug.md | 13 +- docs/source/contributors/devinstall.md | 32 +- docs/source/contributors/docker.md | 23 +- docs/source/contributors/roadmap.md | 49 +- .../contributors/system-architecture.md | 336 +++++---- docs/source/developers/custom-images.md | 54 +- docs/source/developers/dev-process-proxy.md | 22 +- docs/source/developers/kernel-launcher.md | 25 +- docs/source/developers/kernel-library.md | 8 +- docs/source/developers/kernel-manager.md | 9 +- .../source/developers/kernel-specification.md | 17 +- docs/source/developers/rest-api.rst | 1 - docs/source/operators/config-add-env.md | 16 +- docs/source/operators/config-cli.md | 5 +- docs/source/operators/config-culling.md | 1 + docs/source/operators/config-dynamic.md | 15 +- docs/source/operators/config-env-debug.md | 4 +- docs/source/operators/config-file.md | 10 +- .../operators/config-kernel-override.md | 38 +- docs/source/operators/config-security.md | 108 +-- docs/source/operators/config-sys-env.md | 5 +- docs/source/operators/deploy-conductor.md | 8 +- docs/source/operators/deploy-distributed.md | 43 +- docs/source/operators/deploy-docker.md | 50 +- docs/source/operators/deploy-kubernetes.md | 465 +++++++----- docs/source/operators/deploy-single.md | 11 +- docs/source/operators/deploy-yarn-cluster.md | 32 +- docs/source/operators/installing-eg.md | 7 +- docs/source/operators/installing-kernels.md | 10 +- docs/source/operators/launching-eg.md | 8 +- docs/source/other/related-resources.md | 18 +- docs/source/other/troubleshooting.md | 204 +++--- docs/source/users/client-config.md | 10 +- docs/source/users/connecting-to-eg.md | 17 +- docs/source/users/index.rst | 1 - docs/source/users/installation.md | 11 +- docs/source/users/kernel-envs.md | 6 +- enterprise_gateway/__init__.py | 3 + enterprise_gateway/__main__.py | 4 +- enterprise_gateway/_version.py | 2 +- enterprise_gateway/base/handlers.py | 22 +- enterprise_gateway/client/gateway_client.py | 276 ++++--- enterprise_gateway/enterprisegatewayapp.py | 172 +++-- enterprise_gateway/itests/__init__.py | 1 + .../kernels/authorization_test/kernel.json | 11 +- .../itests/test_authorization.py | 17 +- enterprise_gateway/itests/test_base.py | 7 +- .../itests/test_python_kernel.py | 62 +- enterprise_gateway/itests/test_r_kernel.py | 60 +- .../itests/test_scala_kernel.py | 60 +- enterprise_gateway/mixins.py | 613 ++++++++++------ enterprise_gateway/services/api/handlers.py | 21 +- enterprise_gateway/services/api/swagger.json | 102 +-- enterprise_gateway/services/api/swagger.yaml | 58 +- .../services/kernels/handlers.py | 84 ++- .../services/kernels/remotemanager.py | 295 +++++--- .../services/kernelspecs/__init__.py | 2 +- .../services/kernelspecs/handlers.py | 109 +-- .../services/kernelspecs/kernelspec_cache.py | 94 ++- .../services/processproxies/conductor.py | 598 +++++++++------ .../services/processproxies/container.py | 105 ++- .../services/processproxies/crd.py | 35 +- .../services/processproxies/distributed.py | 94 ++- .../services/processproxies/docker_swarm.py | 168 +++-- .../services/processproxies/k8s.py | 199 +++-- .../services/processproxies/processproxy.py | 693 +++++++++++------- .../services/processproxies/spark_operator.py | 17 +- .../services/processproxies/yarn.py | 322 +++++--- .../services/sessions/handlers.py | 19 +- .../services/sessions/kernelsessionmanager.py | 158 ++-- .../services/sessions/sessionmanager.py | 38 +- enterprise_gateway/tests/__init__.py | 2 +- .../kernels/kernel_defaults_test/kernel.json | 8 +- .../tests/resources/public/index.html | 14 +- .../tests/test_enterprise_gateway.py | 112 +-- enterprise_gateway/tests/test_gatewayapp.py | 109 +-- enterprise_gateway/tests/test_handlers.py | 529 +++++++------ .../tests/test_kernelspec_cache.py | 89 ++- enterprise_gateway/tests/test_mixins.py | 60 +- etc/docker/demo-base/README.md | 24 +- etc/docker/docker-compose.yml | 1 - etc/docker/enterprise-gateway-demo/README.md | 40 +- .../bootstrap-enterprise-gateway.sh | 2 +- .../start-enterprise-gateway.sh.template | 2 - etc/docker/enterprise-gateway/README.md | 19 +- .../start-enterprise-gateway.sh | 2 - etc/docker/kernel-image-puller/README.md | 21 +- .../kernel_image_puller.py | 72 +- .../kernel-image-puller/requirements.txt | 2 +- etc/docker/kernel-py/README.md | 10 +- etc/docker/kernel-r/Dockerfile | 1 - etc/docker/kernel-r/README.md | 8 +- etc/docker/kernel-scala/README.md | 10 +- etc/docker/kernel-spark-py/README.md | 10 +- etc/docker/kernel-spark-r/README.md | 8 +- etc/docker/kernel-tf-gpu-py/README.md | 6 +- etc/docker/kernel-tf-py/README.md | 8 +- .../R/scripts/server_listener.py | 145 ++-- .../bootstrap/bootstrap-kernel.sh | 1 - .../docker/scripts/launch_docker.py | 189 +++-- .../kubernetes/scripts/launch_kubernetes.py | 178 +++-- .../scripts/launch_custom_resource.py | 116 ++- .../python/scripts/launch_ipykernel.py | 344 ++++++--- etc/kernel-resources/ir/kernel.js | 149 ++-- etc/kernelspecs/R_docker/kernel.json | 3 +- etc/kernelspecs/R_kubernetes/kernel.json | 3 +- .../python_distributed/kernel.json | 2 +- etc/kernelspecs/python_docker/kernel.json | 3 +- etc/kernelspecs/python_kubernetes/kernel.json | 3 +- etc/kernelspecs/python_tf_docker/kernel.json | 3 +- .../python_tf_gpu_docker/kernel.json | 3 +- .../python_tf_gpu_kubernetes/kernel.json | 3 +- .../python_tf_kubernetes/kernel.json | 3 +- etc/kernelspecs/scala_docker/kernel.json | 3 +- etc/kernelspecs/scala_kubernetes/kernel.json | 3 +- .../kernel.json | 4 +- .../spark_python_yarn_cluster/bin/run.sh | 2 +- etc/kubernetes/enterprise-gateway.yaml | 8 +- .../helm/enterprise-gateway/Chart.yaml | 1 - .../templates/daemonset.yaml | 2 +- .../templates/deployment.yaml | 2 +- .../enterprise-gateway/templates/ingress.yaml | 1 - .../helm/enterprise-gateway/values.yaml | 6 +- readthedocs.yml | 4 +- requirements.yml | 4 +- setup.cfg | 2 - setup.py | 111 +-- website/_data/navigation.yml | 2 +- website/_layouts/home.html | 2 +- website/_layouts/page.html | 2 +- website/_sass/_base.scss | 0 website/_sass/_mixins.scss | 2 +- website/css/animate.min.css | 2 +- website/css/bootstrap.min.css | 2 +- website/font-awesome/css/font-awesome.min.css | 2 +- .../fonts/fontawesome-webfont.svg | 2 +- website/font-awesome/less/variables.less | 1 - website/font-awesome/scss/_variables.scss | 1 - .../fonts/glyphicons-halflings-regular.svg | 2 +- website/js/bootstrap.min.js | 2 +- website/js/cbpAnimatedHeader.js | 4 +- website/js/classie.js | 2 +- website/js/jquery.easing.min.js | 38 +- website/js/jquery.js | 2 +- website/js/wow.min.js | 2 +- website/platform-kubernetes.md | 5 +- website/platform-spark.md | 25 +- website/privacy-policy.md | 1 - website/publish.sh | 2 - 160 files changed, 5268 insertions(+), 3858 deletions(-) mode change 100755 => 100644 website/_sass/_base.scss mode change 100755 => 100644 website/_sass/_mixins.scss diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 19e7e8205..5e4379eda 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,9 +1,10 @@ Help us improve the Jupyter Enterprise Gateway project by reporting issues -or asking questions. +or asking questions. ## Description ## Screenshots / Logs + If applicable, add screenshots and/or logs to help explain your problem. To generate better logs, please run the gateway with `--debug` command line parameter. @@ -12,4 +13,3 @@ To generate better logs, please run the gateway with `--debug` command line para - Enterprise Gateway Version [e.g. 1.x, 2.x, ...] - Platform: [e.g. YARN, Kubernetes ...] - Others [e.g. Jupyter Server 5.7, JupyterHub 1.0, etc] - diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ce0f6c574..099b46e1f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -13,62 +13,62 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python-version: [ '3.7', '3.8', '3.9', '3.10' ] + python-version: ["3.7", "3.8", "3.9", "3.10"] steps: - - name: Checkout - uses: actions/checkout@v2 - with: - clean: true - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - architecture: 'x64' - - name: Display dependency info - run: | - python --version - pip --version - conda --version - - name: Add SBT launcher - run: | - mkdir -p $HOME/.sbt/launchers/1.3.12 - curl -L -o $HOME/.sbt/launchers/1.3.12/sbt-launch.jar https://repo1.maven.org/maven2/org/scala-sbt/sbt-launch/1.3.12/sbt-launch.jar - - name: Install Python dependencies - run: | - python -m pip install --upgrade setuptools pip websocket-client flake8 pytest pytest-tornasync ipykernel coverage codecov - - name: Build Jupyter Enterprise Gateway conda env - run: | - SA="source $CONDA_HOME/bin/activate" make env - - name: Build and install Jupyter Enterprise Gateway - uses: nick-invision/retry@v1.0.0 - with: - timeout_minutes: 10 - max_attempts: 2 - command: | - SA="source $CONDA_HOME/bin/activate" make clean dist enterprise-gateway-demo - python -m pip install --upgrade dist/*.whl - - name: Log current Python dependencies version - run: | - pip freeze - - name: Log current Enterprise Gateway version - run: | - jupyter enterprisegateway --help - - name: Run tests - uses: nick-invision/retry@v1.0.0 - with: - timeout_minutes: 3 - max_attempts: 1 - command: | - pytest -v -s enterprise_gateway/tests - - name: Run integration tests - run: | - SA="source $CONDA_HOME/bin/activate" make itest-yarn - - name: Collect logs - run: | - python --version - pip --version - pip list - docker logs itest-yarn + - name: Checkout + uses: actions/checkout@v2 + with: + clean: true + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + architecture: "x64" + - name: Display dependency info + run: | + python --version + pip --version + conda --version + - name: Add SBT launcher + run: | + mkdir -p $HOME/.sbt/launchers/1.3.12 + curl -L -o $HOME/.sbt/launchers/1.3.12/sbt-launch.jar https://repo1.maven.org/maven2/org/scala-sbt/sbt-launch/1.3.12/sbt-launch.jar + - name: Install Python dependencies + run: | + python -m pip install --upgrade setuptools pip websocket-client flake8 pytest pytest-tornasync ipykernel coverage codecov + - name: Build Jupyter Enterprise Gateway conda env + run: | + SA="source $CONDA_HOME/bin/activate" make env + - name: Build and install Jupyter Enterprise Gateway + uses: nick-invision/retry@v1.0.0 + with: + timeout_minutes: 10 + max_attempts: 2 + command: | + SA="source $CONDA_HOME/bin/activate" make clean dist enterprise-gateway-demo + python -m pip install --upgrade dist/*.whl + - name: Log current Python dependencies version + run: | + pip freeze + - name: Log current Enterprise Gateway version + run: | + jupyter enterprisegateway --help + - name: Run tests + uses: nick-invision/retry@v1.0.0 + with: + timeout_minutes: 3 + max_attempts: 1 + command: | + pytest -v -s enterprise_gateway/tests + - name: Run integration tests + run: | + SA="source $CONDA_HOME/bin/activate" make itest-yarn + - name: Collect logs + run: | + python --version + pip --version + pip list + docker logs itest-yarn link_check: runs-on: ubuntu-latest diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d197b58f3..5e17aeea0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,42 +5,47 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.1.0 hooks: - # - id: end-of-file-fixer - # - id: check-case-conflict - # - id: check-executables-have-shebangs - # - id: requirements-txt-fixer - # - id: check-added-large-files - # - id: check-case-conflict - # - id: check-toml - # - id: check-yaml + - id: end-of-file-fixer + - id: check-case-conflict + - id: check-executables-have-shebangs + - id: requirements-txt-fixer + - id: check-added-large-files + - id: check-case-conflict + - id: check-toml + - id: check-yaml + exclude: etc/kubernetes/.*.yaml - id: debug-statements - # - id: forbid-new-submodules - # - id: check-builtin-literals - # - id: trailing-whitespace + - id: forbid-new-submodules + - id: trailing-whitespace - # - repo: https://github.com/psf/black - # rev: 22.3.0 - # hooks: - # - id: black - # args: ["--line-length", "100"] + - repo: https://github.com/psf/black + rev: 22.3.0 + hooks: + - id: black + args: ["--line-length", "100"] - # - repo: https://github.com/PyCQA/isort - # rev: 5.10.1 - # hooks: - # - id: isort - # files: \.py$ - # args: [--profile=black] + - repo: https://github.com/PyCQA/isort + rev: 5.10.1 + hooks: + - id: isort + files: \.py$ + args: [--profile=black] - # - repo: https://github.com/pre-commit/mirrors-prettier - # rev: v2.6.1 - # hooks: - # - id: prettier + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v2.6.1 + hooks: + - id: prettier + exclude: | + (?x)^( + etc/kubernetes/.*.yaml| + website/.* + )$ - # - repo: https://github.com/asottile/pyupgrade - # rev: v2.31.1 - # hooks: - # - id: pyupgrade - # args: [--py37-plus] + - repo: https://github.com/asottile/pyupgrade + rev: v2.31.1 + hooks: + - id: pyupgrade + args: [--py37-plus] - repo: https://github.com/PyCQA/doc8 rev: 0.11.1 diff --git a/LICENSE.md b/LICENSE.md index 333a1d745..7ab7a0ef6 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -25,7 +25,7 @@ software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER @@ -48,8 +48,8 @@ Jupyter uses a shared copyright model. Each contributor maintains copyright over their contributions to Jupyter. But, it is important to note that these contributions are typically only changes to the repositories. Thus, the Jupyter source code, in its entirety is not the copyright of any single person or -institution. Instead, it is the collective copyright of the entire Jupyter -Development Team. If individual contributors want to maintain a record of what +institution. Instead, it is the collective copyright of the entire Jupyter +Development Team. If individual contributors want to maintain a record of what changes/contributions they have specific copyright on, they should indicate their copyright in the commit message of the change, when they commit the change to one of the Jupyter repositories. diff --git a/MANIFEST.in b/MANIFEST.in index 7d7d5fe81..5d68b4af3 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -15,4 +15,3 @@ exclude *.yaml prune enterprise_gateway/tests prune enterprise_gateway/itests - diff --git a/README.md b/README.md index 8840bebf0..766f89a7d 100644 --- a/README.md +++ b/README.md @@ -17,9 +17,9 @@ including Apache Spark managed by YARN, IBM Spectrum Conductor, Kubernetes or Do It provides out of the box support for the following kernels: -* Python using IPython kernel -* R using IRkernel -* Scala using Apache Toree kernel +- Python using IPython kernel +- R using IRkernel +- Scala using Apache Toree kernel Full Documentation for Jupyter Enterprise Gateway can be found [here](https://jupyter-enterprise-gateway.readthedocs.io/en/latest) @@ -28,24 +28,24 @@ you should use [JupyterHub](https://github.com/jupyterhub/jupyterhub). ## Technical Overview -Jupyter Enterprise Gateway is a web server that provides headless access to Jupyter kernels within -an enterprise. Inspired by Jupyter Kernel Gateway, Jupyter Enterprise Gateway provides feature parity with Kernel Gateway's [jupyter-websocket mode](https://jupyter-kernel-gateway.readthedocs.io/en/latest/websocket-mode.html) in addition to the following: -* Adds support for remote kernels hosted throughout the enterprise where kernels can be launched in -the following ways: - * Local to the Enterprise Gateway server (today's Kernel Gateway behavior) - * On specific nodes of the cluster utilizing a round-robin algorithm - * On nodes identified by an associated resource manager -* Provides support for Apache Spark managed by YARN, IBM Spectrum Conductor, Kubernetes or Docker Swarm out of the box. Others can be configured via Enterprise Gateway's extensible framework. -* Secure communication from the client, through the Enterprise Gateway server, to the kernels -* Multi-tenant capabilities -* Persistent kernel sessions -* Ability to associate profiles consisting of configuration settings to a kernel for a given user (see [Project Roadmap](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/roadmap.html)) +Jupyter Enterprise Gateway is a web server that provides headless access to Jupyter kernels within +an enterprise. Inspired by Jupyter Kernel Gateway, Jupyter Enterprise Gateway provides feature parity with Kernel Gateway's [jupyter-websocket mode](https://jupyter-kernel-gateway.readthedocs.io/en/latest/websocket-mode.html) in addition to the following: + +- Adds support for remote kernels hosted throughout the enterprise where kernels can be launched in + the following ways: + _ Local to the Enterprise Gateway server (today's Kernel Gateway behavior) + _ On specific nodes of the cluster utilizing a round-robin algorithm \* On nodes identified by an associated resource manager +- Provides support for Apache Spark managed by YARN, IBM Spectrum Conductor, Kubernetes or Docker Swarm out of the box. Others can be configured via Enterprise Gateway's extensible framework. +- Secure communication from the client, through the Enterprise Gateway server, to the kernels +- Multi-tenant capabilities +- Persistent kernel sessions +- Ability to associate profiles consisting of configuration settings to a kernel for a given user (see [Project Roadmap](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/roadmap.html)) ![Deployment Diagram](https://github.com/jupyter-server/enterprise_gateway/blob/master/docs/source/images/deployment.png?raw=true) ## Installation -Detailed installation instructions are located in the +Detailed installation instructions are located in the [Users Guide](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/users/index.html) of the project docs. Here's a quick start using `pip`: @@ -60,17 +60,16 @@ jupyter enterprisegateway --help-all jupyter enterprisegateway ``` -Please check the [configuration options within the Operators Guide](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/operators/index.html#configuring-enterprise-gateway) +Please check the [configuration options within the Operators Guide](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/operators/index.html#configuring-enterprise-gateway) for information about the supported options. ## System Architecture -The [System Architecture page](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/system-architecture.html) +The [System Architecture page](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/system-architecture.html) includes information about Enterprise Gateway's remote kernel, process proxy, and launcher frameworks. ## Contributing -The [Contribution page](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/contrib.html) includes -information about how to contribute to Enterprise Gateway along with our roadmap. While there, you'll want to +The [Contribution page](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/contrib.html) includes +information about how to contribute to Enterprise Gateway along with our roadmap. While there, you'll want to [set up a development environment](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/devinstall.html) and check out typical developer tasks. - diff --git a/conftest.py b/conftest.py index 4e3fa939f..668cb28f1 100644 --- a/conftest.py +++ b/conftest.py @@ -1,5 +1,3 @@ - - def pytest_addoption(parser): parser.addoption("--host", action="store", default="localhost:8888") parser.addoption("--username", action="store", default="elyra") @@ -9,9 +7,9 @@ def pytest_addoption(parser): def pytest_generate_tests(metafunc): # This is called for every test. Only get/set command line arguments # if the argument is specified in the list of test "fixturenames". - if 'host' in metafunc.fixturenames: + if "host" in metafunc.fixturenames: metafunc.parametrize("host", [metafunc.config.option.host]) - if 'username' in metafunc.fixturenames: + if "username" in metafunc.fixturenames: metafunc.parametrize("username", [metafunc.config.option.username]) - if 'impersonation' in metafunc.fixturenames: + if "impersonation" in metafunc.fixturenames: metafunc.parametrize("impersonation", [metafunc.config.option.impersonation]) diff --git a/docs/doc-requirements.txt b/docs/doc-requirements.txt index 69d126a13..f7f85c22a 100644 --- a/docs/doc-requirements.txt +++ b/docs/doc-requirements.txt @@ -5,9 +5,9 @@ mistune<1 myst-parser pydata_sphinx_theme sphinx +sphinx-markdown-tables +sphinx_book_theme sphinxcontrib-openapi sphinxcontrib_github_alt sphinxemoji -sphinx_book_theme -sphinx-markdown-tables tornado diff --git a/docs/source/_static/custom.css b/docs/source/_static/custom.css index 7bfd65609..1ceffe0b6 100644 --- a/docs/source/_static/custom.css +++ b/docs/source/_static/custom.css @@ -1,3 +1,3 @@ body div.sphinxsidebarwrapper p.logo { - text-align: left; -} \ No newline at end of file + text-align: left; +} diff --git a/docs/source/conf.py b/docs/source/conf.py index d30800910..f097b9a60 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # This file is execfile()d with the current directory set to its # containing dir. @@ -19,7 +18,7 @@ # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = '3.0' +needs_sphinx = "3.0" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -37,26 +36,26 @@ ] myst_enable_extensions = ["html_image"] -myst_heading_anchors = 4 # Needs to be 4 or higher +myst_heading_anchors = 4 # Needs to be 4 or higher # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. source_suffix = { - '.rst': 'restructuredtext', - '.txt': 'markdown', - '.md': 'markdown', + ".rst": "restructuredtext", + ".txt": "markdown", + ".md": "markdown", } # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'Jupyter Enterprise Gateway' +project = "Jupyter Enterprise Gateway" copyright = "2022, Project Jupyter" author = "Jupyter Server Team" @@ -64,16 +63,16 @@ # |version| and |release|, also used in various other places throughout the # built documents. # -_version_py = os.path.join('..', '..', 'enterprise_gateway', '_version.py') +_version_py = os.path.join("..", "..", "enterprise_gateway", "_version.py") version_ns = {} -with open(_version_py, mode='r') as version_file: +with open(_version_py) as version_file: exec(version_file.read(), version_ns) # The short X.Y version. -version = version_ns['__version__'][:3] +version = version_ns["__version__"][:3] # The full version, including alpha/beta/rc tags. -release = version_ns['__version__'] +release = version_ns["__version__"] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -90,7 +89,7 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The reST default role (used for this markup: `text`) to use for all # documents. @@ -108,7 +107,7 @@ # show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'default' +pygments_style = "default" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] @@ -166,7 +165,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied @@ -229,20 +228,17 @@ # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'EnterpriseGatewaydoc' +htmlhelp_basename = "EnterpriseGatewaydoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # 'preamble': '', - # Latex figure (float) alignment # 'figure_align': 'htbp', } @@ -251,13 +247,13 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - ( - master_doc, - 'EnterpriseGateway.tex', - 'Enterprise Gateway Documentation', - 'https://jupyter.org', - 'manual' - ), + ( + master_doc, + "EnterpriseGateway.tex", + "Enterprise Gateway Documentation", + "https://jupyter.org", + "manual", + ), ] # The name of an image file (relative to this directory) to place at the top of @@ -285,15 +281,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - 'enterprise_gateway', - 'Enterprise Gateway Documentation', - [author], - 1 - ) -] +man_pages = [(master_doc, "enterprise_gateway", "Enterprise Gateway Documentation", [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False @@ -305,15 +293,15 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ( - master_doc, - 'enterprise_gateway', - 'Enterprise Gateway Documentation', - author, - 'EnterpriseGateway', - 'One line description of project.', - 'Miscellaneous' - ), + ( + master_doc, + "enterprise_gateway", + "Enterprise Gateway Documentation", + author, + "EnterpriseGateway", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. @@ -375,7 +363,7 @@ # epub_post_files = [] # A list of files that should not be packed into the epub file. -epub_exclude_files = ['search.html'] +epub_exclude_files = ["search.html"] # The depth of the table of contents in toc.ncx. # epub_tocdepth = 3 @@ -401,7 +389,7 @@ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - 'python': ('https://docs.python.org/', None), + "python": ("https://docs.python.org/", None), "ipython": ("https://ipython.readthedocs.io/en/stable/", None), "jupyter": ("https://jupyter.readthedocs.io/en/latest/", None), } @@ -411,7 +399,7 @@ # Read The Docs # on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org -on_rtd = os.environ.get('READTHEDOCS', None) == 'True' +on_rtd = os.environ.get("READTHEDOCS", None) == "True" # if not on_rtd: # only import and set the theme if we're building docs locally # import sphinx_rtd_theme diff --git a/docs/source/contributors/contrib.md b/docs/source/contributors/contrib.md index 6de36f3ac..5a6da3970 100644 --- a/docs/source/contributors/contrib.md +++ b/docs/source/contributors/contrib.md @@ -1,11 +1,10 @@ # Contributing to Jupyter Enterprise Gateway -Thank you for your interest in Jupyter Enterprise Gateway! If you would like to contribute to the -project please first take a look at the +Thank you for your interest in Jupyter Enterprise Gateway! If you would like to contribute to the +project please first take a look at the [Project Jupyter Contributor Documentation](https://jupyter.readthedocs.io/en/latest/contributing/content-contributor.html). -Enterprise Gateway has recently joined the [Jupyter Server organization](https://github.com/jupyter-server). Please check out our [team compass page](https://github.com/jupyter-server/team-compass#jupyter-server-team-compass) and try to attend our weekly dev meeting as we have a common goal of making all Jupyter server-side applications better! - - Prior to your contribution, we strongly recommend getting acquainted with Enterprise Gateway by checking - out the [System Architecture](system-architecture.md) and [Development Workflow](devinstall.md) pages. +Enterprise Gateway has recently joined the [Jupyter Server organization](https://github.com/jupyter-server). Please check out our [team compass page](https://github.com/jupyter-server/team-compass#jupyter-server-team-compass) and try to attend our weekly dev meeting as we have a common goal of making all Jupyter server-side applications better! +Prior to your contribution, we strongly recommend getting acquainted with Enterprise Gateway by checking +out the [System Architecture](system-architecture.md) and [Development Workflow](devinstall.md) pages. diff --git a/docs/source/contributors/debug.md b/docs/source/contributors/debug.md index 67df2d447..6c43f7f02 100644 --- a/docs/source/contributors/debug.md +++ b/docs/source/contributors/debug.md @@ -1,10 +1,11 @@ # Debugging Jupyter Enterprise Gateway -This page discusses how to go about debugging Enterprise Gateway. We also provide troubleshooting information + +This page discusses how to go about debugging Enterprise Gateway. We also provide troubleshooting information in our [Troubleshooting Guide](../other/troubleshooting.md). ## Configuring your IDE -While your mileage may vary depending on which IDE you are using, the steps below (using PyCharm as an example) should be useful for configuring a debugging session for Enterprise Gateway with minimum +While your mileage may vary depending on which IDE you are using, the steps below (using PyCharm as an example) should be useful for configuring a debugging session for Enterprise Gateway with minimum adjustments for different IDEs. ### Creating a new Debug Configuration @@ -14,11 +15,13 @@ Go to Run->Edit Configuration and create a new python configuration with the fol ![Enterprise Gateway debug configuration](../images/debug_configuration.png) **Script Path:** + ```bash /Users/jovyan/opensource/jupyter/elyra/scripts/jupyter-enterprisegateway -``` +``` **Parameters:** + ```bash --ip=0.0.0.0 --log-level=DEBUG @@ -27,16 +30,18 @@ Go to Run->Edit Configuration and create a new python configuration with the fol ``` **Environment Variables:** + ```bash EG_ENABLE_TUNNELING=False ``` **Working Directory:** + ```bash /Users/jovyan/opensource/jupyter/elyra/scripts ``` ### Running in debug mode -Now that you have handled the necessary configuration, use Run-Debug and select the debug configuration +Now that you have handled the necessary configuration, use Run-Debug and select the debug configuration you just created and happy debugging! diff --git a/docs/source/contributors/devinstall.md b/docs/source/contributors/devinstall.md index 975f753b6..0c8251ad6 100644 --- a/docs/source/contributors/devinstall.md +++ b/docs/source/contributors/devinstall.md @@ -1,7 +1,7 @@ # Development Workflow -Here are instructions for setting up a development environment for the [Jupyter Enterprise Gateway](https://github.com/jupyter-server/enterprise_gateway) -server. It also includes common steps in the developer workflow such as building Enterprise Gateway, +Here are instructions for setting up a development environment for the [Jupyter Enterprise Gateway](https://github.com/jupyter-server/enterprise_gateway) +server. It also includes common steps in the developer workflow such as building Enterprise Gateway, running tests, building docs, packaging kernel specifications, etc. ## Prerequisites @@ -20,11 +20,13 @@ cd !$ # clone this repo git clone https://github.com/jupyter-server/enterprise_gateway.git ``` + ## Make Enterprise Gateway's build environment is centered around `make` and the corresponding [`Makefile`](https://github.com/jupyter-server/enterprise_gateway/blob/master/Makefile). Entering `make` with no parameters yields the following: + ``` activate Print instructions to activate the virtualenv (default: enterprise-gateway-dev) clean-images Remove docker images (includes kernel-based images) @@ -48,6 +50,7 @@ nuke Make clean + remove conda env release Make a wheel + source release on PyPI test Run unit tests ``` + Some of the more useful commands are listed below. ## Build the conda environment @@ -59,15 +62,16 @@ running the enterprise gateway server, running tests, and building documentation make env ``` -By default, the env built will be named `enterprise-gateway-dev`. To produce a different conda env, -you can specify the name via the `ENV=` parameter. +By default, the env built will be named `enterprise-gateway-dev`. To produce a different conda env, +you can specify the name via the `ENV=` parameter. ```bash make ENV=my-conda-env env ``` + ```{admonition} Important! :class: warning -If using a non-default conda env, all `make` commands should include the `ENV=` parameter, +If using a non-default conda env, all `make` commands should include the `ENV=` parameter, otherwise the command will use the default environment. ``` @@ -78,27 +82,28 @@ Build a wheel file that can then be installed via `pip install` ``` make bdist ``` + The wheel file will reside in the `dist` directory. ## Build the kernelspec tar file -Enterprise Gateway includes several sets of kernel specifications for each of the three primary kernels: `IPython Kernel`,`IRkernel`, -and `Apache Toree` to demonstrate remote kernels and their corresponding launchers. These sets of files are then added to tar files corresponding to their target resource managers. In addition, a _combined_ tar file is also built containing all kernel specifications. Like the wheel file, these tar files will reside in the `dist` directory. +Enterprise Gateway includes several sets of kernel specifications for each of the three primary kernels: `IPython Kernel`,`IRkernel`, +and `Apache Toree` to demonstrate remote kernels and their corresponding launchers. These sets of files are then added to tar files corresponding to their target resource managers. In addition, a _combined_ tar file is also built containing all kernel specifications. Like the wheel file, these tar files will reside in the `dist` directory. ```bash make kernelspecs ``` ```{note} -Because the scala launcher requires a jar file, `make kernelspecs` requires the use of `sbt` to build the -scala launcher jar. Please consult the [sbt site](https://www.scala-sbt.org/) for directions to +Because the scala launcher requires a jar file, `make kernelspecs` requires the use of `sbt` to build the +scala launcher jar. Please consult the [sbt site](https://www.scala-sbt.org/) for directions to install/upgrade `sbt` on your platform. We currently use version 1.3.12. ``` ## Build distribution files Builds the files necessary for a given release: the wheel file, the source tar file, and the kernel specification tar -files. This is essentially a helper target consisting of the `bdist` `sdist` and `kernelspecs` targets. +files. This is essentially a helper target consisting of the `bdist` `sdist` and `kernelspecs` targets. ```bash make dist @@ -121,7 +126,8 @@ Run Sphinx to build the HTML documentation. ```bash make docs ``` -This command actually issues `make requirements html` from the `docs` sub-directory. + +This command actually issues `make requirements html` from the `docs` sub-directory. ## Run the unit tests @@ -133,7 +139,7 @@ make test ## Run the integration tests -Run the integration tests suite. +Run the integration tests suite. These tests will bootstrap the [`elyra/enterprise-gateway-demo`](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) docker image with Apache Spark using YARN resource manager and Jupyter Enterprise Gateway and perform various tests for each kernel in local, YARN client, and YARN cluster modes. @@ -144,7 +150,7 @@ make itest-yarn ## Build the docker images -The following can be used to build all docker images used within the project. See [docker images](docker.md) for specific details. +The following can be used to build all docker images used within the project. See [docker images](docker.md) for specific details. ```bash make docker-images diff --git a/docs/source/contributors/docker.md b/docs/source/contributors/docker.md index 0633c840c..b990b3ced 100644 --- a/docs/source/contributors/docker.md +++ b/docs/source/contributors/docker.md @@ -8,27 +8,27 @@ The following sections describe the docker images used within Kubernetes and Doc ## elyra/enterprise-gateway -The primary image for Kubernetes and Docker Swarm support, [elyra/enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) contains the Enterprise Gateway server software and default kernel specifications. For Kubernetes it is deployed using the [enterprise-gateway.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/enterprise-gateway.yaml) file or [helm chart](https://github.com/jupyter-server/enterprise_gateway/tree/master/etc/kubernetes/helm/enterprise-gateway). For Docker Swarm, deployment can be accomplished using [docker-componse.yml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/docker/docker-compose.yml). +The primary image for Kubernetes and Docker Swarm support, [elyra/enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) contains the Enterprise Gateway server software and default kernel specifications. For Kubernetes it is deployed using the [enterprise-gateway.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/enterprise-gateway.yaml) file or [helm chart](https://github.com/jupyter-server/enterprise_gateway/tree/master/etc/kubernetes/helm/enterprise-gateway). For Docker Swarm, deployment can be accomplished using [docker-componse.yml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/docker/docker-compose.yml). We recommend that a persistent/mounted volume be used so that the kernel specifications can be accessed outside the container since we've found those to require post-deployment modifications from time to time. ## elyra/kernel-py -Image [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py/) contains the IPython kernel. It is currently built on the [jupyter/scipy-notebook](https://hub.docker.com/r/jupyter/scipy-notebook) image with additional support necessary for remote operation. +Image [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py/) contains the IPython kernel. It is currently built on the [jupyter/scipy-notebook](https://hub.docker.com/r/jupyter/scipy-notebook) image with additional support necessary for remote operation. ## elyra/kernel-spark-py -Image [elyra/kernel-spark-py](https://hub.docker.com/r/elyra/kernel-spark-py/) is built on [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py) and includes the Spark 2.4 distribution for use in Kubernetes clusters. Please note that the ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. +Image [elyra/kernel-spark-py](https://hub.docker.com/r/elyra/kernel-spark-py/) is built on [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py) and includes the Spark 2.4 distribution for use in Kubernetes clusters. Please note that the ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. ## elyra/kernel-tf-py -Image [elyra/kernel-tf-py](https://hub.docker.com/r/elyra/kernel-tf-py/) contains the IPython kernel. It is currently built on the [jupyter/tensorflow-notebook](https://hub.docker.com/r/jupyter/tensorflow-notebook) image with additional support necessary for remote operation. +Image [elyra/kernel-tf-py](https://hub.docker.com/r/elyra/kernel-tf-py/) contains the IPython kernel. It is currently built on the [jupyter/tensorflow-notebook](https://hub.docker.com/r/jupyter/tensorflow-notebook) image with additional support necessary for remote operation. ## elyra/kernel-scala -Image [elyra/kernel-scala](https://hub.docker.com/r/elyra/kernel-scala/) contains the Scala (Apache Toree) kernel and is built on [elyra/spark](https://hub.docker.com/r/elyra/spark) which is, itself, built using the scripts provided by the Spark 2.4 distribution for use in Kubernetes clusters. As a result, the ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. +Image [elyra/kernel-scala](https://hub.docker.com/r/elyra/kernel-scala/) contains the Scala (Apache Toree) kernel and is built on [elyra/spark](https://hub.docker.com/r/elyra/spark) which is, itself, built using the scripts provided by the Spark 2.4 distribution for use in Kubernetes clusters. As a result, the ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. -Since Apache Toree is currently tied to Spark, creation of a *vanilla* mode Scala kernel is not high on our current set of priorities. +Since Apache Toree is currently tied to Spark, creation of a _vanilla_ mode Scala kernel is not high on our current set of priorities. ## elyra/kernel-r @@ -38,16 +38,15 @@ Image [elyra/kernel-r](https://hub.docker.com/r/elyra/kernel-r/) contains the IR Image [elyra/kernel-spark-r](https://hub.docker.com/r/elyra/kernel-spark-r/) also contains the IRKernel but is built on [elyra/kernel-r](https://hub.docker.com/r/elyra/kernel-r) and includes the Spark 2.4 distribution for use in Kubernetes clusters. - ## Ancillary Docker Images The project produces two docker images to make testing easier: `elyra/demo-base` and `elyra/enterprise-gateway-demo`. ### elyra/demo-base -The [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/) image is considered the base image upon which [elyra/enterprise-gateway-demo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) is built. It consists of a Hadoop YARN installation that includes Spark, Java, miniconda, and various kernel installations. +The [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/) image is considered the base image upon which [elyra/enterprise-gateway-demo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) is built. It consists of a Hadoop YARN installation that includes Spark, Java, miniconda, and various kernel installations. -The primary use of this image is to quickly build elyra/enterprise-gateway images for testing and development purposes. To build a local image, run `make demo-base`. +The primary use of this image is to quickly build elyra/enterprise-gateway images for testing and development purposes. To build a local image, run `make demo-base`. This image can be used to start a separate Hadoop YARN cluster that, when combined with another instance of elyra/enterprise-gateway can better demonstrate remote kernel functionality. @@ -55,8 +54,8 @@ This image can be used to start a separate Hadoop YARN cluster that, when combin Built on [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/), [elyra/enterprise-gateway-demo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) also includes the various example kernel specifications contained in the repository. -By default, this container will start with enterprise gateway running as a service user named `jovyan`. This user is enabled for `sudo` so that it can emulate other users where necessary. Other users included in this image are `elyra`, `bob` and `alice` (names commonly used in security-based examples). +By default, this container will start with enterprise gateway running as a service user named `jovyan`. This user is enabled for `sudo` so that it can emulate other users where necessary. Other users included in this image are `elyra`, `bob` and `alice` (names commonly used in security-based examples). -We plan on producing one image per release to the [enterprise-gateway-demo docker repo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) where the image's tag reflects the corresponding release. +We plan on producing one image per release to the [enterprise-gateway-demo docker repo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) where the image's tag reflects the corresponding release. -To build a local image, run `make enterprise-gateway-demo`. Because this is a development build, the tag for this image will not reflect the value of the VERSION variable in the root `Makefile` but will be 'dev'. +To build a local image, run `make enterprise-gateway-demo`. Because this is a development build, the tag for this image will not reflect the value of the VERSION variable in the root `Makefile` but will be 'dev'. diff --git a/docs/source/contributors/roadmap.md b/docs/source/contributors/roadmap.md index ef4bc622f..25dd8e8af 100644 --- a/docs/source/contributors/roadmap.md +++ b/docs/source/contributors/roadmap.md @@ -1,33 +1,36 @@ # Project Roadmap -We have plenty to do, now and in the future. Here's where we're headed: + +We have plenty to do, now and in the future. Here's where we're headed: ## Planned for 3.0 -* Spark 3.0 support - * Includes pod template files + +- Spark 3.0 support + - Includes pod template files ## Planned for 4.0 -* Kernel Provisioners - * Provisioners will replace process proxies and enable Enterprise Gateway to remove its cap on `jupyter_client < 7`. -* Parameterized Kernels - * Enable the ability to prompt for parameters - * These will likely be based on kernel provisioners (4.0) +- Kernel Provisioners + - Provisioners will replace process proxies and enable Enterprise Gateway to remove its cap on `jupyter_client < 7`. +- Parameterized Kernels + - Enable the ability to prompt for parameters + - These will likely be based on kernel provisioners (4.0) ## Wish list -* High Availability - * Session persistence using a shared location (NoSQL DB) (File persistence has been implemented) - * Active/active support -* Multi-gateway support on client-side - * Enables the ability for a single Jupyter Server to be configured against multiple Gateway servers simultaneously. This work will primarily be in Jupyter Server. -* Pluggable load-balancers into `DistributedProcessProxy` (currently uses simple round-robin) -* Support for other resource managers - * Slurm? - * Mesos? -* User Environments - * Improve the way user files are made available to remote kernels -* Administration UI - * Dashboard with running kernels - * Lifecycle management - * Time running, stop/kill, Profile Management, etc + +- High Availability + - Session persistence using a shared location (NoSQL DB) (File persistence has been implemented) + - Active/active support +- Multi-gateway support on client-side + - Enables the ability for a single Jupyter Server to be configured against multiple Gateway servers simultaneously. This work will primarily be in Jupyter Server. +- Pluggable load-balancers into `DistributedProcessProxy` (currently uses simple round-robin) +- Support for other resource managers + - Slurm? + - Mesos? +- User Environments + - Improve the way user files are made available to remote kernels +- Administration UI + - Dashboard with running kernels + - Lifecycle management + - Time running, stop/kill, Profile Management, etc We'd love to hear any other use cases you might have and look forward to your contributions to Jupyter Enterprise Gateway! diff --git a/docs/source/contributors/system-architecture.md b/docs/source/contributors/system-architecture.md index 55f96ee54..2d35b75d2 100644 --- a/docs/source/contributors/system-architecture.md +++ b/docs/source/contributors/system-architecture.md @@ -3,44 +3,51 @@ Below are sections presenting details of the Enterprise Gateway internals and other related items. While we will attempt to maintain its consistency, the ultimate answers are in the code itself. ## Enterprise Gateway Process Proxy Extensions -Enterprise Gateway is follow-on project to Jupyter Kernel Gateway with additional abilities to support remote kernel sessions on behalf of multiple users within resource-managed frameworks such as [Apache Hadoop YARN](https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/YARN.html) or [Kubernetes](https://kubernetes.io/). Enterprise Gateway introduces these capabilities by extending the existing class hierarchies for `AsyncKernelManager` and `AsyncMultiKernelManager` classes, along with an additional abstraction known as a *process proxy*. + +Enterprise Gateway is follow-on project to Jupyter Kernel Gateway with additional abilities to support remote kernel sessions on behalf of multiple users within resource-managed frameworks such as [Apache Hadoop YARN](https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/YARN.html) or [Kubernetes](https://kubernetes.io/). Enterprise Gateway introduces these capabilities by extending the existing class hierarchies for `AsyncKernelManager` and `AsyncMultiKernelManager` classes, along with an additional abstraction known as a _process proxy_. ### Overview + At its basic level, a running kernel consists of two components for its communication - a set of ports and a process. ### Kernel Ports -The first component is a set of five zero-MQ ports used to convey the Jupyter protocol between the Notebook -and the underlying kernel. In addition to the 5 ports, is an IP address, a key, and a signature scheme -indicator used to interpret the key. These eight pieces of information are conveyed to the kernel via a -json file, known as the connection file. + +The first component is a set of five zero-MQ ports used to convey the Jupyter protocol between the Notebook +and the underlying kernel. In addition to the 5 ports, is an IP address, a key, and a signature scheme +indicator used to interpret the key. These eight pieces of information are conveyed to the kernel via a +json file, known as the connection file. Within the base framework, the IP address must be a local IP address meaning that the kernel cannot be -remote from the library launching the kernel. The enforcement of this restriction is down in the `jupyter_client` module - two levels below Enterprise Gateway. +remote from the library launching the kernel. The enforcement of this restriction is down in the `jupyter_client` module - two levels below Enterprise Gateway. -This component is the core communication mechanism between the Notebook and the kernel. All aspects, including -lifecycle management, can occur via this component. The kernel process (below) comes into play only when +This component is the core communication mechanism between the Notebook and the kernel. All aspects, including +lifecycle management, can occur via this component. The kernel process (below) comes into play only when port-based communication becomes unreliable or additional information is required. ### Kernel Process -When a kernel is launched, one of the fields of the kernel's associated kernel specification is used to -identify a command to invoke. In today's implementation, this command information, along with other -environment variables (also described in the kernel specification), is passed to `popen()` which returns -a process class. This class supports four basic methods following its creation: + +When a kernel is launched, one of the fields of the kernel's associated kernel specification is used to +identify a command to invoke. In today's implementation, this command information, along with other +environment variables (also described in the kernel specification), is passed to `popen()` which returns +a process class. This class supports four basic methods following its creation: + 1. `poll()` to determine if the process is still running 2. `wait()` to block the caller until the process has terminated -3. `send_signal(signum)` to send a signal to the process +3. `send_signal(signum)` to send a signal to the process 4. `kill()` to terminate the process As you can see, other forms of process communication can be achieved by abstracting the launch mechanism. ### Kernel Specifications -The primary vehicle for indicating a given kernel should be handled in a different manner is the kernel -specification, otherwise known as the _kernel spec_. Enterprise Gateway leverages the natively extensible `metadata` stanza within the kernel specification to introduce a new stanza named `process_proxy`. + +The primary vehicle for indicating a given kernel should be handled in a different manner is the kernel +specification, otherwise known as the _kernel spec_. Enterprise Gateway leverages the natively extensible `metadata` stanza within the kernel specification to introduce a new stanza named `process_proxy`. The `process_proxy` stanza identifies the class that provides the kernel's process abstraction -(while allowing for future extensions). This class then provides the kernel's lifecycle management operations relative to the managed resource or functional equivalent. +(while allowing for future extensions). This class then provides the kernel's lifecycle management operations relative to the managed resource or functional equivalent. Here's an example of a kernel specification that uses the `DistributedProcessProxy` class for its abstraction: + ```json { "language": "scala", @@ -68,75 +75,81 @@ Here's an example of a kernel specification that uses the `DistributedProcessPro ] } ``` + See the [Process Proxy](#process-proxy) section for more details on process proxies and those provided as part of the Enterprise Gateway release. ## Remote Mapping Kernel Manager + `RemoteMappingKernelManager` is a subclass of Jupyter Server's [`AsyncMappingKernelManager`](https://github.com/jupyter-server/jupyter_server/blob/745f5ba3f00280c1e1900326a7e08463d48a3912/jupyter_server/services/kernels/kernelmanager.py#L633) and provides two functions. + 1. It provides the vehicle for making the `RemoteKernelManager` class known and available. -2. It overrides `start_kernel` to look at the target kernel's kernel spec to see if it contains a remote process proxy class entry. If so, it records the name of the class in its member variable to be made available to the kernel start logic. +2. It overrides `start_kernel` to look at the target kernel's kernel spec to see if it contains a remote process proxy class entry. If so, it records the name of the class in its member variable to be made available to the kernel start logic. ## Remote Kernel Manager + `RemoteKernelManager` is a subclass of jupyter_client's [`AsyncIOLoopKernelManager` class](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/ioloop/manager.py#L62) and provides the -primary integration points for remote process proxy invocations. It implements a number of methods which allow -Enterprise Gateway to circumvent functionality that might otherwise be prevented. As a result, some of these overrides may -not be necessary if lower layers of the Jupyter framework were modified. For example, some methods are required +primary integration points for remote process proxy invocations. It implements a number of methods which allow +Enterprise Gateway to circumvent functionality that might otherwise be prevented. As a result, some of these overrides may +not be necessary if lower layers of the Jupyter framework were modified. For example, some methods are required because Jupyter makes assumptions that the kernel process is local. -Its primary functionality, however, is to override the `_launch_kernel` method (which is the method closest to -the process invocation) and instantiates the appropriate process proxy instance - which is then returned in -place of the process instance used in today's implementation. Any interaction with the process then takes +Its primary functionality, however, is to override the `_launch_kernel` method (which is the method closest to +the process invocation) and instantiates the appropriate process proxy instance - which is then returned in +place of the process instance used in today's implementation. Any interaction with the process then takes place via the process proxy. -Both `RemoteMappingKernelManager` and `RemoteKernelManager` class definitions can be found in +Both `RemoteMappingKernelManager` and `RemoteKernelManager` class definitions can be found in [remotemanager.py](https://github.com/jupyter-server/enterprise_gateway/blob/master/enterprise_gateway/services/kernels/remotemanager.py) ## Process Proxy -Process proxy classes derive from the abstract base class `BaseProcessProxyABC` - which defines the four basic -process methods. There are two immediate subclasses of `BaseProcessProxyABC` - `LocalProcessProxy` -and `RemoteProcessProxy`. -`LocalProcessProxy` is essentially a pass-through to the current implementation. Kernel specifications that do not contain -a `process_proxy` stanza will use `LocalProcessProxy`. +Process proxy classes derive from the abstract base class `BaseProcessProxyABC` - which defines the four basic +process methods. There are two immediate subclasses of `BaseProcessProxyABC` - `LocalProcessProxy` +and `RemoteProcessProxy`. -`RemoteProcessProxy` is an abstract base class representing remote kernel processes. Currently, there are seven +`LocalProcessProxy` is essentially a pass-through to the current implementation. Kernel specifications that do not contain +a `process_proxy` stanza will use `LocalProcessProxy`. + +`RemoteProcessProxy` is an abstract base class representing remote kernel processes. Currently, there are seven built-in subclasses of `RemoteProcessProxy` ... -- `DistributedProcessProxy` - largely a proof of concept class, `DistributedProcessProxy` is responsible for the launch -and management of kernels distributed across an explicitly defined set of hosts using ssh. Hosts are determined -via a round-robin algorithm (that we should make pluggable someday). -- `YarnClusterProcessProxy` - is responsible for the discovery and management of kernels hosted as Hadoop YARN applications -within a managed cluster. + +- `DistributedProcessProxy` - largely a proof of concept class, `DistributedProcessProxy` is responsible for the launch + and management of kernels distributed across an explicitly defined set of hosts using ssh. Hosts are determined + via a round-robin algorithm (that we should make pluggable someday). +- `YarnClusterProcessProxy` - is responsible for the discovery and management of kernels hosted as Hadoop YARN applications + within a managed cluster. - `KubernetesProcessProxy` - is responsible for the discovery and management of kernels hosted -within a Kubernetes cluster. + within a Kubernetes cluster. - `DockerSwarmProcessProxy` - is responsible for the discovery and management of kernels hosted -within a Docker Swarm cluster. + within a Docker Swarm cluster. - `DockerProcessProxy` - is responsible for the discovery and management of kernels hosted -within Docker configuration. Note: because these kernels will always run local to the corresponding Enterprise Gateway instance, these process proxies are of limited use. + within Docker configuration. Note: because these kernels will always run local to the corresponding Enterprise Gateway instance, these process proxies are of limited use. - `ConductorClusterProcessProxy` - is responsible for the discovery and management of kernels hosted -within an IBM Spectrum Conductor cluster. -- `SparkOperatorProcessProxy` - is responsible for the discovery and management of kernels hosted -within a Kubernetes cluster but created as a `SparkApplication` instead of a Pod. The `SparkApplication` is a Kubernetes custom resource -defined inside the project [spark-on-k8s-operator](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator), which -makes all kinds of spark on k8s components better organized and easy to configure. + within an IBM Spectrum Conductor cluster. +- `SparkOperatorProcessProxy` - is responsible for the discovery and management of kernels hosted + within a Kubernetes cluster but created as a `SparkApplication` instead of a Pod. The `SparkApplication` is a Kubernetes custom resource + defined inside the project [spark-on-k8s-operator](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator), which + makes all kinds of spark on k8s components better organized and easy to configure. ```{note} Before you run a kernel associated with `SparkOperatorProcessProxy`, please ensure that spark operator is installed in an existing namespace of your Kubernetes cluster. ``` -You might notice that the last six process proxies do not necessarily control the *launch* of the kernel. This is -because the native jupyter framework is utilized such that the script that is invoked by the framework is what -launches the kernel against that particular resource manager. As a result, the *startup time* actions of these process -proxies is more about discovering where the kernel *landed* within the cluster in order to establish a mechanism for -determining lifetime. *Discovery* typically consists of using the resource manager's API to locate the kernel whose name includes its kernel ID +You might notice that the last six process proxies do not necessarily control the _launch_ of the kernel. This is +because the native jupyter framework is utilized such that the script that is invoked by the framework is what +launches the kernel against that particular resource manager. As a result, the _startup time_ actions of these process +proxies is more about discovering where the kernel _landed_ within the cluster in order to establish a mechanism for +determining lifetime. _Discovery_ typically consists of using the resource manager's API to locate the kernel whose name includes its kernel ID in some fashion. -On the other hand, the `DistributedProcessProxy` essentially wraps the kernel specification's argument vector (i.e., invocation +On the other hand, the `DistributedProcessProxy` essentially wraps the kernel specification's argument vector (i.e., invocation string) in a remote shell since the host is determined by Enterprise Gateway, eliminating the discovery step from its implementation. These class definitions can be found in the [processproxies package](https://github.com/jupyter-server/enterprise_gateway/blob/master/enterprise_gateway/services/processproxies). However, -Enterprise Gateway is architected such that additional process proxy implementations can be provided and are not -required to be located within the Enterprise Gateway hierarchy - i.e., we embrace a *bring your own process proxy* model. +Enterprise Gateway is architected such that additional process proxy implementations can be provided and are not +required to be located within the Enterprise Gateway hierarchy - i.e., we embrace a _bring your own process proxy_ model. ![Process Class Hierarchy](../images/process_proxy_hierarchy.png) @@ -146,98 +159,110 @@ The process proxy constructor looks as follows: def __init__(self, kernel_manager, proxy_config): ``` -where -* `kernel_manager` is an instance of a `RemoteKernelManager` class. -* `proxy_config` is a dictionary of configuration values present in the `kernel.json` file. These -values can be used to override or amend various global configuration values on a per-kernel basis. See -[Process Proxy Configuration](#process-proxy-configuration) for more information. +where + +- `kernel_manager` is an instance of a `RemoteKernelManager` class. +- `proxy_config` is a dictionary of configuration values present in the `kernel.json` file. These + values can be used to override or amend various global configuration values on a per-kernel basis. See + [Process Proxy Configuration](#process-proxy-configuration) for more information. ```python @abstractmethod def launch_process(self, kernel_cmd, *kw): ``` + where -* `kernel_cmd` is a list (argument vector) that should be invoked to launch the kernel. This parameter is -an artifact of the kernel manager `_launch_kernel()` method. -* `**kw` is a set keyword arguments which includes an `env` dictionary element consisting of the names -and values of which environment variables to set at launch time. - -The `launch_process()` method is the primary method exposed on the Process Proxy classes. It's responsible for -performing the appropriate actions relative to the target type. The process must be in a running state prior -to returning from this method - otherwise attempts to use the connections will not be successful since the + +- `kernel_cmd` is a list (argument vector) that should be invoked to launch the kernel. This parameter is + an artifact of the kernel manager `_launch_kernel()` method. +- `**kw` is a set keyword arguments which includes an `env` dictionary element consisting of the names + and values of which environment variables to set at launch time. + +The `launch_process()` method is the primary method exposed on the Process Proxy classes. It's responsible for +performing the appropriate actions relative to the target type. The process must be in a running state prior +to returning from this method - otherwise attempts to use the connections will not be successful since the (remote) kernel needs to have created the sockets. -All process proxy subclasses should ensure `BaseProcessProxyABC.launch_process()` is called - which will automatically -place a variable named `KERNEL_ID` (consisting of the kernel's unique ID) into the corresponding kernel's environment +All process proxy subclasses should ensure `BaseProcessProxyABC.launch_process()` is called - which will automatically +place a variable named `KERNEL_ID` (consisting of the kernel's unique ID) into the corresponding kernel's environment variable list since `KERNEL_ID` is a primary mechanism for associating remote applications to a specific kernel instance. ```python def poll(self): ``` -The `poll()` method is used by the Jupyter framework to determine if the process is still alive. By default, the -framework's heartbeat mechanism calls `poll()` every 3 seconds. This method returns `None` if the process is still running, `False` otherwise (per the `popen()` contract). + +The `poll()` method is used by the Jupyter framework to determine if the process is still alive. By default, the +framework's heartbeat mechanism calls `poll()` every 3 seconds. This method returns `None` if the process is still running, `False` otherwise (per the `popen()` contract). ```python def wait(self): ``` -The `wait()` method is used by the Jupyter framework when terminating a kernel. Its purpose is to block return -to the caller until the process has terminated. Since this could be awhile, it's best to return control in a + +The `wait()` method is used by the Jupyter framework when terminating a kernel. Its purpose is to block return +to the caller until the process has terminated. Since this could be awhile, it's best to return control in a reasonable amount of time since the kernel instance is destroyed anyway. This method does not return a value. ```python def send_signal(self, signum): ``` -The `send_signal()` method is used by the Jupyter framework to send a signal to the process. Currently, `SIGINT (2)` + +The `send_signal()` method is used by the Jupyter framework to send a signal to the process. Currently, `SIGINT (2)` (to interrupt the kernel) is the signal sent. -It should be noted that for normal processes - both local and remote - `poll()` and `kill()` functionality can +It should be noted that for normal processes - both local and remote - `poll()` and `kill()` functionality can be implemented via `send_signal` with `signum` values of `0` and `9`, respectively. -This method returns `None` if the process is still running, `False` otherwise. +This method returns `None` if the process is still running, `False` otherwise. ```python def kill(self): ``` -The `kill()` method is used by the Jupyter framework to terminate the kernel process. This method is only necessary when the request to shutdown the kernel - sent via the control port of the zero-MQ ports - does not respond in an appropriate amount of time. + +The `kill()` method is used by the Jupyter framework to terminate the kernel process. This method is only necessary when the request to shutdown the kernel - sent via the control port of the zero-MQ ports - does not respond in an appropriate amount of time. This method returns `None` if the process is killed successfully, `False` otherwise. ### RemoteProcessProxy -As noted above, `RemoteProcessProxy` is an abstract base class that derives from `BaseProcessProxyABC`. Subclasses + +As noted above, `RemoteProcessProxy` is an abstract base class that derives from `BaseProcessProxyABC`. Subclasses of `RemoteProcessProxy` must implement two methods - `confirm_remote_startup()` and `handle_timeout()`: + ```python @abstractmethod def confirm_remote_startup(self, kernel_cmd, **kw): ``` + where -* `kernel_cmd` is a list (argument vector) that should be invoked to launch the kernel. This parameter is an -artifact of the kernel manager `_launch_kernel()` method. -* `**kw` is a set key-word arguments. -`confirm_remote_startup()` is responsible for detecting that the remote kernel has been appropriately launched and is ready to receive requests. This can include gathering application status from the remote resource manager but is really a function of having received the connection information from the remote kernel launcher. (See [Kernel Launchers](#kernel-launchers)) +- `kernel_cmd` is a list (argument vector) that should be invoked to launch the kernel. This parameter is an + artifact of the kernel manager `_launch_kernel()` method. +- `**kw` is a set key-word arguments. + +`confirm_remote_startup()` is responsible for detecting that the remote kernel has been appropriately launched and is ready to receive requests. This can include gathering application status from the remote resource manager but is really a function of having received the connection information from the remote kernel launcher. (See [Kernel Launchers](#kernel-launchers)) ```python @abstractmethod def handle_timeout(self): ``` -`handle_timeout()` is responsible for detecting that the remote kernel has failed to startup in an acceptable time. It -should be called from `confirm_remote_startup()`. If the timeout expires, `handle_timeout()` should throw HTTP +`handle_timeout()` is responsible for detecting that the remote kernel has failed to startup in an acceptable time. It +should be called from `confirm_remote_startup()`. If the timeout expires, `handle_timeout()` should throw HTTP Error 500 (`Internal Server Error`). -Kernel launch timeout expiration is expressed via the environment variable `KERNEL_LAUNCH_TIMEOUT`. If this -value does not exist, it defaults to the Enterprise Gateway process environment variable `EG_KERNEL_LAUNCH_TIMEOUT` - which -defaults to 30 seconds if unspecified. Since all `KERNEL_` environment variables "flow" from the Notebook server, the launch +Kernel launch timeout expiration is expressed via the environment variable `KERNEL_LAUNCH_TIMEOUT`. If this +value does not exist, it defaults to the Enterprise Gateway process environment variable `EG_KERNEL_LAUNCH_TIMEOUT` - which +defaults to 30 seconds if unspecified. Since all `KERNEL_` environment variables "flow" from the Notebook server, the launch timeout can be specified as a client attribute of the Notebook session. #### YarnClusterProcessProxy -As part of its base offering, Enterprise Gateway provides an implementation of a process proxy that communicates with the Hadoop YARN resource manager that has been instructed to launch a kernel on one of its worker nodes. The node on which the kernel is launched is up to the resource manager - which enables an optimized distribution of kernel resources. -Derived from `RemoteProcessProxy`, `YarnClusterProcessProxy` uses the `yarn-api-client` library to locate the kernel and monitor its lifecycle. However, once the kernel has returned its connection information, the primary kernel operations naturally take place over the ZeroMQ ports. +As part of its base offering, Enterprise Gateway provides an implementation of a process proxy that communicates with the Hadoop YARN resource manager that has been instructed to launch a kernel on one of its worker nodes. The node on which the kernel is launched is up to the resource manager - which enables an optimized distribution of kernel resources. + +Derived from `RemoteProcessProxy`, `YarnClusterProcessProxy` uses the `yarn-api-client` library to locate the kernel and monitor its lifecycle. However, once the kernel has returned its connection information, the primary kernel operations naturally take place over the ZeroMQ ports. -This process proxy is reliant on the `--EnterpriseGatewayApp.yarn_endpoint` command line option or the `EG_YARN_ENDPOINT` environment variable to determine where the YARN resource manager is located. To accommodate increased flexibility, the endpoint definition can be defined within the process proxy stanza of the kernel specification, enabling the ability to direct specific kernels to different YARN clusters. +This process proxy is reliant on the `--EnterpriseGatewayApp.yarn_endpoint` command line option or the `EG_YARN_ENDPOINT` environment variable to determine where the YARN resource manager is located. To accommodate increased flexibility, the endpoint definition can be defined within the process proxy stanza of the kernel specification, enabling the ability to direct specific kernels to different YARN clusters. -In cases where the YARN cluster is configured for high availability, then the `--EnterpriseGatewayApp.alt_yarn_endpoint` command line option or the `EG_ALT_YARN_ENDPOINT` environment variable should also be defined. When set, the underlying `yarn-api-client` library will choose the active Resource Manager between the two. +In cases where the YARN cluster is configured for high availability, then the `--EnterpriseGatewayApp.alt_yarn_endpoint` command line option or the `EG_ALT_YARN_ENDPOINT` environment variable should also be defined. When set, the underlying `yarn-api-client` library will choose the active Resource Manager between the two. ```{note} If Enterprise Gateway is running on an edge node of the cluster and has a valid `yarn-site.xml` file in HADOOP_CONF_DIR, neither of these values are required (default = None). In such cases, the `yarn-api-client` library will choose the active Resource Manager from the configuration files. @@ -248,19 +273,20 @@ If Enterprise Gateway is running on an edge node of the cluster and has a valid ``` #### DistributedProcessProxy + Like `YarnClusterProcessProxy`, Enterprise Gateway also provides an implementation of a basic -round-robin remoting mechanism that is part of the `DistributedProcessProxy` class. This class -uses the `--EnterpriseGatewayApp.remote_hosts` command line option (or `EG_REMOTE_HOSTS` -environment variable) to determine on which hosts a given kernel should be launched. It uses +round-robin remoting mechanism that is part of the `DistributedProcessProxy` class. This class +uses the `--EnterpriseGatewayApp.remote_hosts` command line option (or `EG_REMOTE_HOSTS` +environment variable) to determine on which hosts a given kernel should be launched. It uses a basic round-robin algorithm to index into the list of remote hosts for selecting the target -host. It then uses ssh to launch the kernel on the target host. As a result, all kernel specification -files must reside on the remote hosts in the same directory structure as on the Enterprise +host. It then uses ssh to launch the kernel on the target host. As a result, all kernel specification +files must reside on the remote hosts in the same directory structure as on the Enterprise Gateway server. -It should be noted that kernels launched with this process proxy run in YARN _client_ mode - so their -resources (within the kernel process itself) are not managed by the Hadoop YARN resource manager. +It should be noted that kernels launched with this process proxy run in YARN _client_ mode - so their +resources (within the kernel process itself) are not managed by the Hadoop YARN resource manager. -Like the yarn endpoint parameter the `remote_hosts` parameter can be specified within the +Like the yarn endpoint parameter the `remote_hosts` parameter can be specified within the process proxy configuration to override the global value - enabling finer-grained kernel distributions. ```{seealso} @@ -268,79 +294,83 @@ process proxy configuration to override the global value - enabling finer-graine ``` #### KubernetesProcessProxy + With the popularity of Kubernetes within the enterprise, Enterprise Gateway provides an implementation -of a process proxy that communicates with the Kubernetes resource manager via the Kubernetes API. Unlike +of a process proxy that communicates with the Kubernetes resource manager via the Kubernetes API. Unlike the other offerings, in the case of Kubernetes, Enterprise Gateway is itself deployed within the Kubernetes -cluster as a *Service* and *Deployment*. The primary vehicle by which this is accomplished is via the -[enterprise-gateway.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/enterprise-gateway.yaml) -file that contains the necessary metadata to define its deployment. Enterprise Gateway also provides a [helm chart](https://github.com/jupyter-server/enterprise_gateway/tree/master/etc/kubernetes/helm/enterprise-gateway) for those deployments utilizing [Helm](https://helm.sh/). +cluster as a _Service_ and _Deployment_. The primary vehicle by which this is accomplished is via the +[enterprise-gateway.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/enterprise-gateway.yaml) +file that contains the necessary metadata to define its deployment. Enterprise Gateway also provides a [helm chart](https://github.com/jupyter-server/enterprise_gateway/tree/master/etc/kubernetes/helm/enterprise-gateway) for those deployments utilizing [Helm](https://helm.sh/). ```{seealso} [Kubernetes deployments](../operators/deploy-kubernetes.md) in the Operators Guide for details. ``` #### DockerSwarmProcessProxy -Enterprise Gateway provides an implementation of a process proxy that communicates with the Docker Swarm resource manager via the Docker API. When used, the kernels are launched as swarm services and can reside anywhere in the managed cluster. To leverage kernels configured in this manner, Enterprise Gateway can be deployed + +Enterprise Gateway provides an implementation of a process proxy that communicates with the Docker Swarm resource manager via the Docker API. When used, the kernels are launched as swarm services and can reside anywhere in the managed cluster. To leverage kernels configured in this manner, Enterprise Gateway can be deployed either as a Docker Swarm _service_ or a traditional Docker container. -A similar `DockerProcessProxy` implementation has also been provided. When used, the corresponding kernel will be launched as a traditional docker container that runs local to the launching Enterprise Gateway instance. As a result, its use has limited value. +A similar `DockerProcessProxy` implementation has also been provided. When used, the corresponding kernel will be launched as a traditional docker container that runs local to the launching Enterprise Gateway instance. As a result, its use has limited value. ```{seealso} [Docker and Docker Swarm deployments](../operators/deploy-docker.md) in the Operators Guide for details. ``` #### ConductorClusterProcessProxy -Enterprise Gateway also provides an implementation of a process proxy + +Enterprise Gateway also provides an implementation of a process proxy that communicates with an IBM Spectrum Conductor resource manager that has been instructed to launch a kernel -on one of its worker nodes. The node on which the kernel is launched is up to the resource +on one of its worker nodes. The node on which the kernel is launched is up to the resource manager - which enables an optimized distribution of kernel resources. Derived from `RemoteProcessProxy`, `ConductorClusterProcessProxy` uses Conductor's REST-ful API -to locate the kernel and monitor its life-cycle. However, once the kernel has returned its +to locate the kernel and monitor its life-cycle. However, once the kernel has returned its connection information, the primary kernel operations naturally take place over the ZeroMQ ports. -This process proxy is reliant on the `--EnterpriseGatewayApp.conductor_endpoint` command line -option or the `EG_CONDUCTOR_ENDPOINT` environment variable to determine where the Conductor resource manager is -located. +This process proxy is reliant on the `--EnterpriseGatewayApp.conductor_endpoint` command line +option or the `EG_CONDUCTOR_ENDPOINT` environment variable to determine where the Conductor resource manager is +located. ```{seealso} [IBM Spectrum Conductor deployments](../operators/deploy-conductor.md) in the Operators Guide for details. ``` #### CustomResourceProcessProxy -Enterprise Gateway also provides a implementation of a process proxy derived from `KubernetesProcessProxy` -called `CustomResourceProcessProxy`. + +Enterprise Gateway also provides a implementation of a process proxy derived from `KubernetesProcessProxy` +called `CustomResourceProcessProxy`. Instead of creating kernels based on a Kubernetes pod, `CustomResourceProcessProxy` -manages kernels via a custom resource definition (CRD). For example, `SparkApplication` is a CRD that includes +manages kernels via a custom resource definition (CRD). For example, `SparkApplication` is a CRD that includes many components of a Spark-on-Kubernetes application. If you are going to extend `CustomResourceProcessProxy`, just follow steps below: - override custom resource related variables(i.e. `group`, `version` and `plural` -and `get_container_status` method, wrt [launch_kubernetes.py](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py). + and `get_container_status` method, wrt [launch_kubernetes.py](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py). - define a jinja template like -[kernel-pod.yaml.j2](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2). -As a generic design, the template file should be named as {crd_group}-{crd_version} so that you can reuse -[launch_kubernetes.py](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py) in the kernelspec. + [kernel-pod.yaml.j2](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2). + As a generic design, the template file should be named as {crd_group}-{crd_version} so that you can reuse + [launch_kubernetes.py](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py) in the kernelspec. - define a kernel specification like [spark_python_operator/kernel.json](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernelspecs/spark_python_operator/kernel.json). - ### Process Proxy Configuration -Each `kernel.json`'s `process-proxy` stanza can specify an optional `config` stanza that is converted + +Each `kernel.json`'s `process-proxy` stanza can specify an optional `config` stanza that is converted into a dictionary of name/value pairs and passed as an argument to each process-proxy constructor relative to the class identified by the `class_name` entry. How each dictionary entry is interpreted is completely a function of the constructor relative to that process-proxy -class or its superclass. For example, an alternate list of remote hosts has meaning to the `DistributedProcessProxy` but -not to its superclasses. As a result, the superclass constructors will not attempt to interpret that value. +class or its superclass. For example, an alternate list of remote hosts has meaning to the `DistributedProcessProxy` but +not to its superclasses. As a result, the superclass constructors will not attempt to interpret that value. In addition, certain dictionary entries can override or amend system-level configuration values set on the command-line, thereby -allowing administrators to tune behaviors down to the kernel level. For example, an administrator might want to -constrain Python kernels configured to use specific resources to an entirely different set of hosts (and ports) that other -remote kernels might be targeting in order to isolate valuable resources. Similarly, an administrator might want to +allowing administrators to tune behaviors down to the kernel level. For example, an administrator might want to +constrain Python kernels configured to use specific resources to an entirely different set of hosts (and ports) that other +remote kernels might be targeting in order to isolate valuable resources. Similarly, an administrator might want to only authorize specific users to a given kernel. In such situations, one might find the following `process-proxy` stanza: @@ -360,31 +390,33 @@ In such situations, one might find the following `process-proxy` stanza: } ``` -In this example, the kernel associated with this `kernel.json` file is relegated to the hosts `priv_host1` and `priv_host2` -where kernel ports will be restricted to a range between `40000` and `41000` and only users `bob` and `alice` can +In this example, the kernel associated with this `kernel.json` file is relegated to the hosts `priv_host1` and `priv_host2` +where kernel ports will be restricted to a range between `40000` and `41000` and only users `bob` and `alice` can launch such kernels (provided neither appear in the global set of `unauthorized_users` since denial takes precedence). For a current enumeration of which system-level configuration values can be overridden or amended on a per-kernel basis see [Per-kernel overrides](../operators/config-kernel-override.md). ## Kernel Launchers + As noted above, a kernel is considered started once the `launch_process()` method has conveyed its connection information back to the Enterprise Gateway server process. Conveyance of connection information from a remote kernel is the responsibility of the remote kernel _launcher_. Kernel launchers provide a means of normalizing behaviors across kernels while avoiding kernel modifications. Besides providing a location where connection file creation can occur, they also provide a 'hook' for other kinds of behaviors - like establishing virtual environments or sandboxes, providing collaboration behavior, adhering to port range restrictions, etc. There are four primary tasks of a kernel launcher: + 1. Creation of the connection file and ZMQ ports on the remote (target) system along with a _gateway listener_ socket 2. Conveyance of the connection (and listener socket) information back to the Enterprise Gateway process 3. Invocation of the target kernel 4. Listen for interrupt and shutdown requests from Enterprise Gateway and carry out the action when appropriate -Kernel launchers are minimally invoked with three parameters (all of which are conveyed by the `argv` stanza of the corresponding `kernel.json` file) - the kernel's ID as created by the server and conveyed via the placeholder `{kernel_id}`, a response address consisting of the Enterprise Gateway server IP and port on which to return the connection information similarly represented by the placeholder `{response_address}`, and a public-key used by the launcher to encrypt an AES key that encrypts the kernel's connection information back to the server and represented by the placeholder `{public_key}`. +Kernel launchers are minimally invoked with three parameters (all of which are conveyed by the `argv` stanza of the corresponding `kernel.json` file) - the kernel's ID as created by the server and conveyed via the placeholder `{kernel_id}`, a response address consisting of the Enterprise Gateway server IP and port on which to return the connection information similarly represented by the placeholder `{response_address}`, and a public-key used by the launcher to encrypt an AES key that encrypts the kernel's connection information back to the server and represented by the placeholder `{public_key}`. -The kernel's ID is identified by the parameter `--RemoteProcessProxy.kernel-id`. Its value (`{kernel_id}`) is essentially used to build a connection file to pass to the to-be-launched kernel, along with any other things - like log files, etc. +The kernel's ID is identified by the parameter `--RemoteProcessProxy.kernel-id`. Its value (`{kernel_id}`) is essentially used to build a connection file to pass to the to-be-launched kernel, along with any other things - like log files, etc. -The response address is identified by the parameter `--RemoteProcessProxy.response-address`. Its value (`{response_address}`) consists of a string of the form `` where the IPV4 address points back to the Enterprise Gateway server - which is listening for a response on the provided port. The port's default value is `8877`, but can be specified via the environment variable `EG_RESPONSE_PORT`. +The response address is identified by the parameter `--RemoteProcessProxy.response-address`. Its value (`{response_address}`) consists of a string of the form `` where the IPV4 address points back to the Enterprise Gateway server - which is listening for a response on the provided port. The port's default value is `8877`, but can be specified via the environment variable `EG_RESPONSE_PORT`. -The public key is identified by the parameter `--RemoteProcessProxy.public-key`. Its value (`{public_key}`) is used to encrypt an AES key created by the launcher to encrypt the kernel's connection information. The server, upon receipt of the response, uses the corresponding private key to decrypt the AES key, which it then uses to decrypt the connection information. Both the public and private keys are ephemeral; created upon Enterprise Gateway's startup. They can be ephemeral because they are only needed during a kernel's startup and never again. +The public key is identified by the parameter `--RemoteProcessProxy.public-key`. Its value (`{public_key}`) is used to encrypt an AES key created by the launcher to encrypt the kernel's connection information. The server, upon receipt of the response, uses the corresponding private key to decrypt the AES key, which it then uses to decrypt the connection information. Both the public and private keys are ephemeral; created upon Enterprise Gateway's startup. They can be ephemeral because they are only needed during a kernel's startup and never again. Here's a [kernel.json](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernelspecs/spark_python_yarn_cluster/kernel.json) file illustrating these parameters... @@ -413,24 +445,28 @@ Here's a [kernel.json](https://github.com/jupyter-server/enterprise_gateway/blob ] } ``` -Other options supported by launchers include: -* `--RemoteProcessProxy.port-range {port_range}` - passes configured port-range to launcher where launcher applies that range to kernel ports. The port-range may be configured globally or on a per-kernel specification basis, as previously described. -* `--RemoteProcessProxy.spark-context-initialization-mode [lazy|eager|none]` - indicates the *timeframe* in which the spark context will be created. - - `lazy` (default) attempts to defer initialization as late as possible - although this can vary depending on the + +Other options supported by launchers include: + +- `--RemoteProcessProxy.port-range {port_range}` - passes configured port-range to launcher where launcher applies that range to kernel ports. The port-range may be configured globally or on a per-kernel specification basis, as previously described. +- `--RemoteProcessProxy.spark-context-initialization-mode [lazy|eager|none]` - indicates the _timeframe_ in which the spark context will be created. + + - `lazy` (default) attempts to defer initialization as late as possible - although this can vary depending on the underlying kernel and launcher implementation. - - `eager` attempts to create the spark context as soon as possible. - - `none` skips spark context creation altogether. - - Note that some launchers may not be able to support all modes. For example, the scala launcher uses the Apache Toree - kernel - which currently assumes a spark context will exist. As a result, a mode of `none` doesn't apply. - Similarly, the `lazy` and `eager` modes in the Python launcher are essentially the same, with the spark context - creation occurring immediately, but in the background thereby minimizing the kernel's startup time. - -Kernel.json files also include a `LAUNCH_OPTS:` section in the `env` stanza to allow for custom -parameters to be conveyed in the launcher's environment. `LAUNCH_OPTS` are then referenced in + - `eager` attempts to create the spark context as soon as possible. + - `none` skips spark context creation altogether. + + Note that some launchers may not be able to support all modes. For example, the scala launcher uses the Apache Toree + kernel - which currently assumes a spark context will exist. As a result, a mode of `none` doesn't apply. + Similarly, the `lazy` and `eager` modes in the Python launcher are essentially the same, with the spark context + creation occurring immediately, but in the background thereby minimizing the kernel's startup time. + +Kernel.json files also include a `LAUNCH_OPTS:` section in the `env` stanza to allow for custom +parameters to be conveyed in the launcher's environment. `LAUNCH_OPTS` are then referenced in the [run.sh](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh) -script as the initial arguments to the launcher +script as the initial arguments to the launcher (see [launch_ipykernel.py](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/python/scripts/launch_ipykernel.py)) ... + ```bash eval exec \ "${SPARK_HOME}/bin/spark-submit" \ @@ -441,14 +477,16 @@ eval exec \ ``` ## Extending Enterprise Gateway + Theoretically speaking, enabling a kernel for use in other frameworks amounts to the following: + 1. Build a kernel specification file that identifies the process proxy class to be used. -2. Implement the process proxy class such that it supports the four primitive functions of -`poll()`, `wait()`, `send_signal(signum)` and `kill()` along with `launch_process()`. -3. If the process proxy corresponds to a remote process, derive the process proxy class from -`RemoteProcessProxy` and implement `confirm_remote_startup()` and `handle_timeout()`. -4. Insert invocation of a launcher (if necessary) which builds the connection file and -returns its contents on the `{response_address}` socket and following the encryption protocol set forth in the other launchers. +2. Implement the process proxy class such that it supports the four primitive functions of + `poll()`, `wait()`, `send_signal(signum)` and `kill()` along with `launch_process()`. +3. If the process proxy corresponds to a remote process, derive the process proxy class from + `RemoteProcessProxy` and implement `confirm_remote_startup()` and `handle_timeout()`. +4. Insert invocation of a launcher (if necessary) which builds the connection file and + returns its contents on the `{response_address}` socket and following the encryption protocol set forth in the other launchers. ```{seealso} This topic is covered in the [Developers Guide](../developers/index.rst). diff --git a/docs/source/developers/custom-images.md b/docs/source/developers/custom-images.md index ff694f71d..c03816a7d 100644 --- a/docs/source/developers/custom-images.md +++ b/docs/source/developers/custom-images.md @@ -1,9 +1,10 @@ # Custom Kernel Images -This section presents information needed for how a custom kernel image could be built for your own uses with Enterprise Gateway. This is typically necessary if one desires to extend the existing image with additional supporting libraries or an image that encapsulates a different set of functionality altogether. + +This section presents information needed for how a custom kernel image could be built for your own uses with Enterprise Gateway. This is typically necessary if one desires to extend the existing image with additional supporting libraries or an image that encapsulates a different set of functionality altogether. ## Extending Existing Kernel Images -A common form of customization occurs when the existing kernel image is serving the fundamentals but the user wishes it be extended with additional libraries to prevent the need of their imports within the Notebook interactions. Since the image already meets the [basic requirements](#requirements-for-custom-kernel-images), this is really just a matter of referencing the existing image in the `FROM` statement and installing additional libraries. Because the EG kernel images do not run as the `root` user, you may need to switch users to perform the update. +A common form of customization occurs when the existing kernel image is serving the fundamentals but the user wishes it be extended with additional libraries to prevent the need of their imports within the Notebook interactions. Since the image already meets the [basic requirements](#requirements-for-custom-kernel-images), this is really just a matter of referencing the existing image in the `FROM` statement and installing additional libraries. Because the EG kernel images do not run as the `root` user, you may need to switch users to perform the update. ```dockerfile FROM elyra/kernel-py:VERSION @@ -12,52 +13,59 @@ USER root # switch to root user to perform installation (if necessary) RUN pip install my-libraries -USER $NB_UID # switch back to the jovyan user +USER $NB_UID # switch back to the jovyan user ``` ## Bringing Your Own Kernel Image Users that do not wish to extend an existing kernel image must be cognizant of a couple of things. + 1. Requirements of a kernel-based image to be used by Enterprise Gateway. 2. Is the base image one from [Jupyter Docker-stacks](https://github.com/jupyter/docker-stacks)? ### Requirements for Custom Kernel Images -Custom kernel images require some support files from the Enterprise Gateway repository. These are packaged into a tar file for each release starting in `2.5.0`. This tar file (named `jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz`) is composed of a few files - one bootstrap script and a kernel launcher (one per kernel type). + +Custom kernel images require some support files from the Enterprise Gateway repository. These are packaged into a tar file for each release starting in `2.5.0`. This tar file (named `jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz`) is composed of a few files - one bootstrap script and a kernel launcher (one per kernel type). #### Bootstrap-kernel.sh -Enterprise Gateway provides a single [bootstrap-kernel.sh](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/bootstrap/bootstrap-kernel.sh) script that handles the three kernel languages supported out of the box - Python, R, and Scala. When a kernel image is started by Enterprise Gateway, parameters used within the bootstrap-kernel.sh script are conveyed via environment variables. The bootstrap script is then responsible for validating and converting those parameters to meaningful arguments to the appropriate launcher. + +Enterprise Gateway provides a single [bootstrap-kernel.sh](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/bootstrap/bootstrap-kernel.sh) script that handles the three kernel languages supported out of the box - Python, R, and Scala. When a kernel image is started by Enterprise Gateway, parameters used within the bootstrap-kernel.sh script are conveyed via environment variables. The bootstrap script is then responsible for validating and converting those parameters to meaningful arguments to the appropriate launcher. #### Kernel Launcher -The kernel launcher, as discussed [here](kernel-launcher.md) does a number of things. In particular, it creates the connection ports and conveys that connection information back to Enterprise Gateway via the socket identified by the response address parameter. Although not a requirement for container-based usage, it is recommended that the launcher be written in the same language as the kernel. (This is more of a requirement when used in applications like Hadoop YARN.) + +The kernel launcher, as discussed [here](kernel-launcher.md) does a number of things. In particular, it creates the connection ports and conveys that connection information back to Enterprise Gateway via the socket identified by the response address parameter. Although not a requirement for container-based usage, it is recommended that the launcher be written in the same language as the kernel. (This is more of a requirement when used in applications like Hadoop YARN.) ### About Jupyter Docker-stacks Images -Most of what is presented assumes the base image for your custom image is derived from the [Jupyter Docker-stacks](https://github.com/jupyter/docker-stacks) repository. As a result, it's good to cover what makes up those assumptions so you can build your own image independently of the docker-stacks repository. -All images produced from the docker-stacks repository come with a certain user configured. This user is named `jovyan` and is mapped to a user id (UID) of `1000` and a group id (GID) of `100` - named `users`. +Most of what is presented assumes the base image for your custom image is derived from the [Jupyter Docker-stacks](https://github.com/jupyter/docker-stacks) repository. As a result, it's good to cover what makes up those assumptions so you can build your own image independently of the docker-stacks repository. + +All images produced from the docker-stacks repository come with a certain user configured. This user is named `jovyan` and is mapped to a user id (UID) of `1000` and a group id (GID) of `100` - named `users`. The various startup scripts and commands typically reside in `/usr/local/bin` and we recommend trying to adhere to that policy. -The base jupyter image, upon which most all images from docker-stacks are built, also contains a `fix-permissions` script that is responsible for _gracefully_ adjusting permissions based on its given parameters. By only changing the necessary permissions, use of this script minimizes the size of the docker layer in which that command is invoked during the build of the docker image. +The base jupyter image, upon which most all images from docker-stacks are built, also contains a `fix-permissions` script that is responsible for _gracefully_ adjusting permissions based on its given parameters. By only changing the necessary permissions, use of this script minimizes the size of the docker layer in which that command is invoked during the build of the docker image. ### Sample Dockerfiles for Custom Kernel Images -Below we provide two working Dockerfiles that produce custom kernel images. One based on an existing image from Jupyter docker-stacks, the other from an independent base image. + +Below we provide two working Dockerfiles that produce custom kernel images. One based on an existing image from Jupyter docker-stacks, the other from an independent base image. #### Custom Kernel Image Built on Jupyter Image -Here's an example Dockerfile that installs the minimally necessary items for a Python-based kernel image built on the docker-stack image `jupyter/scipy-notebook`. Note: the string `VERSION` must be replaced with the appropriate value. + +Here's an example Dockerfile that installs the minimally necessary items for a Python-based kernel image built on the docker-stack image `jupyter/scipy-notebook`. Note: the string `VERSION` must be replaced with the appropriate value. ```dockerfile # Choose a base image. Preferrably one from https://github.com/jupyter/docker-stacks FROM jupyter/scipy-notebook:61d8aaedaeaf - + # Switch user to root since, if from docker-stacks, its probably jovyan USER root - + # Install any packages required for the kernel-wrapper. If the image # does not contain the target kernel (i.e., IPython, IRkernel, etc., # it should be installed as well. RUN pip install pycrypto -# Download and extract the enterprise gateway kernel launchers and bootstrap +# Download and extract the enterprise gateway kernel launchers and bootstrap # files and deploy to /usr/local/bin. Change permissions to NB_UID:NB_GID. RUN wget https://github.com/jupyter-server/enterprise_gateway/releases/download/vVERSION/jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz &&\ tar -xvf jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz -C /usr/local/bin &&\ @@ -71,9 +79,10 @@ CMD /usr/local/bin/bootstrap-kernel.sh ``` #### Independent Custom Kernel Image -If your base image is not from docker-stacks, it is recommended that you NOT run the image as USER `root` and create an _image user_ that is not UID 0. For this example, we will create the `jovyan` user with UID `1000` and a primary group of `users`, GID `100`. Note that Enterprise Gateway makes no assumption relative to the user in which the kernel image is running. -Aside from configuring the image user, all other aspects of customization are the same. In this case, we'll use the tensorflow-gpu image and convert it to be usable via Enterprise Gateway as a custom kernel image. Note that because this image didn't have `wget` we used `curl` to download the supporting kernel-image files. +If your base image is not from docker-stacks, it is recommended that you NOT run the image as USER `root` and create an _image user_ that is not UID 0. For this example, we will create the `jovyan` user with UID `1000` and a primary group of `users`, GID `100`. Note that Enterprise Gateway makes no assumption relative to the user in which the kernel image is running. + +Aside from configuring the image user, all other aspects of customization are the same. In this case, we'll use the tensorflow-gpu image and convert it to be usable via Enterprise Gateway as a custom kernel image. Note that because this image didn't have `wget` we used `curl` to download the supporting kernel-image files. ```dockerfile FROM tensorflow/tensorflow:2.5.0-gpu-jupyter @@ -102,7 +111,7 @@ RUN pip install pycrypto # Download and extract the enterprise gateway kernel launchers and bootstrap # files and deploy to /usr/local/bin. Change permissions to NB_UID:NB_GID. RUN curl -L https://github.com/jupyter-server/enterprise_gateway/releases/download/vVERSION/jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz | \ - tar -xz -C /usr/local/bin + tar -xz -C /usr/local/bin RUN adduser --system --uid 1000 --gid 100 jovyan && \ chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \ @@ -117,15 +126,18 @@ CMD /usr/local/bin/bootstrap-kernel.sh ``` ## Deploying Your Custom Kernel Image -The final step in deploying a customer kernel image is creating a corresponding kernel specifications directory that is available to Enterprise Gateway. Since Enterprise Gateway is also running in a container, its import that its kernel specifications directory either be mounted externally or a new Enterprise Gateway image is created with the appropriate directory in place. For the purposes of this discussion, we'll assume the kernel specifications directory, `/usr/local/share/jupyter/kernels`, is externally mounted. -- Find a similar kernel specification directory from which to create your custom kernel specification. The most important aspect to this is matching the language of your kernel since it will use the same [kernel launcher](#kernel-launcher). Another important question is whether your custom kernel uses Spark, because those kernel specifications will vary significantly since many of the spark options reside in the `kernel.json`'s `env` stanza. Since our examples use _vanilla_ (non-Spark) python kernels we'll use the `python_kubernetes` kernel specification as our basis. +The final step in deploying a customer kernel image is creating a corresponding kernel specifications directory that is available to Enterprise Gateway. Since Enterprise Gateway is also running in a container, its import that its kernel specifications directory either be mounted externally or a new Enterprise Gateway image is created with the appropriate directory in place. For the purposes of this discussion, we'll assume the kernel specifications directory, `/usr/local/share/jupyter/kernels`, is externally mounted. + +- Find a similar kernel specification directory from which to create your custom kernel specification. The most important aspect to this is matching the language of your kernel since it will use the same [kernel launcher](#kernel-launcher). Another important question is whether your custom kernel uses Spark, because those kernel specifications will vary significantly since many of the spark options reside in the `kernel.json`'s `env` stanza. Since our examples use _vanilla_ (non-Spark) python kernels we'll use the `python_kubernetes` kernel specification as our basis. + ```bash cd /usr/local/share/jupyter/kernels cp -r python_kubernetes python_myCustomKernel ``` - Edit the `kernel.json` file and change the `display_name:`, `image_name:` and path to `launch_kubernetes.py` script. + ```json { "language": "python", @@ -138,8 +150,7 @@ cp -r python_kubernetes python_myCustomKernel } } }, - "env": { - }, + "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_myCustomKernel/scripts/launch_kubernetes.py", @@ -152,6 +163,7 @@ cp -r python_kubernetes python_myCustomKernel ] } ``` + - If using a whitelist (`EG_KERNEL_WHITELIST`), be sure to update it with the new kernel specification directory name (e.g., `python_myCustomKernel`) and restart/redeploy Enterprise Gateway. - Launch or refresh your Notebook session and confirm `My Custom Kernel` appears in the _new kernel_ drop-down. - Create a new notebook using `My Custom Kernel`. diff --git a/docs/source/developers/dev-process-proxy.md b/docs/source/developers/dev-process-proxy.md index 6e269e656..3d0d55159 100644 --- a/docs/source/developers/dev-process-proxy.md +++ b/docs/source/developers/dev-process-proxy.md @@ -1,7 +1,8 @@ # Implementing a process proxy -A process proxy implementation is necessary if you want to interact with a resource manager that is not currently supported or extend some existing behaviors. For example, recently, we've had [contributions](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/enterprise_gateway/services/processproxies/crd.py#L9) that interact with [Kubernetes Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions), which is an example of _extending_ the `KubernetesProcessProxy` to accomplish a slightly different task. -Examples of resource managers in which there's been some interest include [Slurm Workload Manager](https://slurm.schedmd.com/documentation.html) and [Apache Mesos](https://mesos.apache.org/), for example. In the end, it's really a matter of having access to an API and the ability to apply "tags" or "labels" in order to _discover_ where the kernel is running within the managed cluster. Once you have that information, then it becomes of matter of implementing the appropriate methods to control the kernel's lifecycle. +A process proxy implementation is necessary if you want to interact with a resource manager that is not currently supported or extend some existing behaviors. For example, recently, we've had [contributions](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/enterprise_gateway/services/processproxies/crd.py#L9) that interact with [Kubernetes Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions), which is an example of _extending_ the `KubernetesProcessProxy` to accomplish a slightly different task. + +Examples of resource managers in which there's been some interest include [Slurm Workload Manager](https://slurm.schedmd.com/documentation.html) and [Apache Mesos](https://mesos.apache.org/), for example. In the end, it's really a matter of having access to an API and the ability to apply "tags" or "labels" in order to _discover_ where the kernel is running within the managed cluster. Once you have that information, then it becomes of matter of implementing the appropriate methods to control the kernel's lifecycle. ```{admonition} Important! :class: error @@ -14,12 +15,13 @@ That said, if you and your organization plan to stay on Enterprise Gateway 2.x o ``` ## General approach -Please refer to the [Process Proxy section](../contributors/system-architecture.md#process-proxy) in the System Architecture pages for descriptions and structure of existing process proxies. Here is the general guideline for the process of implementing a process proxy. -1. Identify and understand how to _decorate_ your "job" within the resource manager. In Hadoop YARN, this is done by using the kernel's ID as the _application name_ by setting the [`--name` parameter to `${KERNEL_ID}`](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/etc/kernelspecs/spark_python_yarn_cluster/kernel.json#L14). In Kubernetes, we apply the kernel's ID to the [`kernel-id` label on the POD](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2#L16). -2. Today, all invocations of kernels into resource managers use a shell or python script mechanism configured into the `argv` stanza of the kernelspec. If you take this approach, you need to apply the necessary changes to integrate with your resource manager. -3. Determine how to interact with the resource manager's API to _discover_ the kernel and determine on which host it's running. This interaction should occur immediately following Enterprise Gateway's receipt of the kernel's connection information in its response from the kernel launcher. This extra step, performed within `confirm_remote_startup()`, is necessary to get the appropriate host name as reflected in the resource manager's API. -4. Determine how to monitor the "job" using the resource manager API. This will become part of the `poll()` implementation to determine if the kernel is still running. This should be as quick as possible since it occurs every 3 seconds. If this is an expensive call, you may need to make some adjustments like skip the call every so often. +Please refer to the [Process Proxy section](../contributors/system-architecture.md#process-proxy) in the System Architecture pages for descriptions and structure of existing process proxies. Here is the general guideline for the process of implementing a process proxy. + +1. Identify and understand how to _decorate_ your "job" within the resource manager. In Hadoop YARN, this is done by using the kernel's ID as the _application name_ by setting the [`--name` parameter to `${KERNEL_ID}`](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/etc/kernelspecs/spark_python_yarn_cluster/kernel.json#L14). In Kubernetes, we apply the kernel's ID to the [`kernel-id` label on the POD](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2#L16). +2. Today, all invocations of kernels into resource managers use a shell or python script mechanism configured into the `argv` stanza of the kernelspec. If you take this approach, you need to apply the necessary changes to integrate with your resource manager. +3. Determine how to interact with the resource manager's API to _discover_ the kernel and determine on which host it's running. This interaction should occur immediately following Enterprise Gateway's receipt of the kernel's connection information in its response from the kernel launcher. This extra step, performed within `confirm_remote_startup()`, is necessary to get the appropriate host name as reflected in the resource manager's API. +4. Determine how to monitor the "job" using the resource manager API. This will become part of the `poll()` implementation to determine if the kernel is still running. This should be as quick as possible since it occurs every 3 seconds. If this is an expensive call, you may need to make some adjustments like skip the call every so often. 5. Determine how to terminate "jobs" using the resource manager API. This will become part of the termination sequence, but probably only necessary if the message-based shutdown does not work (i.e., a last resort). ```{tip} @@ -28,10 +30,8 @@ Because kernel IDs are globally unique, they serve as ideal identifiers for disc You will likely need to provide implementations for `launch_process()`, `poll()`, `wait()`, `send_signal()`, and `kill()`, although, depending on where your process proxy resides in the class hierarchy, some implementations may be reused. -For example, if your process proxy is going to service remote kernels, you should consider deriving your implementation from the [`RemoteProcessProxy` class](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/enterprise_gateway/services/processproxies/processproxy.py#L981). If this is the case, then you'll need to implement `confirm_remote_startup()`. +For example, if your process proxy is going to service remote kernels, you should consider deriving your implementation from the [`RemoteProcessProxy` class](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/enterprise_gateway/services/processproxies/processproxy.py#L981). If this is the case, then you'll need to implement `confirm_remote_startup()`. -Likewise, if your process proxy is based on containers, you should consider deriving your implementation from the [`ContainerProcessProxy`](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/enterprise_gateway/services/processproxies/container.py#L34). If this is the case, then you'll need to implement `get_container_status()` and `terminate_container_resources()` rather than `confirm_remote_startup()`, etc. +Likewise, if your process proxy is based on containers, you should consider deriving your implementation from the [`ContainerProcessProxy`](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/enterprise_gateway/services/processproxies/container.py#L34). If this is the case, then you'll need to implement `get_container_status()` and `terminate_container_resources()` rather than `confirm_remote_startup()`, etc. Once the process proxy has been implemented, construct an appropriate kernel specification that references your process proxy and iterate until you are satisfied with how your remote kernels behave. - - diff --git a/docs/source/developers/kernel-launcher.md b/docs/source/developers/kernel-launcher.md index 4f8b54905..dae86e496 100644 --- a/docs/source/developers/kernel-launcher.md +++ b/docs/source/developers/kernel-launcher.md @@ -1,35 +1,42 @@ # Implementing a kernel launcher -A new implementation for a [_kernel launcher_](../contributors/system-architecture.md#kernel-launchers) becomes necessary when you want to introduce another kind of kernel to an existing configuration. Out of the box, Enterprise Gateway provides [kernel launchers](https://github.com/jupyter-server/enterprise_gateway/tree/master/etc/kernel-launchers) that support the IPython kernel, the Apache Toree scala kernel, and the R kernel - IRKernel. There are other "language-agnostic kernel launchers" provided by Enterprise Gateway, but those are used in container environments to start the container or pod where the "kernel image" uses on the three _language-based_ launchers to start the kernel within the container. + +A new implementation for a [_kernel launcher_](../contributors/system-architecture.md#kernel-launchers) becomes necessary when you want to introduce another kind of kernel to an existing configuration. Out of the box, Enterprise Gateway provides [kernel launchers](https://github.com/jupyter-server/enterprise_gateway/tree/master/etc/kernel-launchers) that support the IPython kernel, the Apache Toree scala kernel, and the R kernel - IRKernel. There are other "language-agnostic kernel launchers" provided by Enterprise Gateway, but those are used in container environments to start the container or pod where the "kernel image" uses on the three _language-based_ launchers to start the kernel within the container. Its generally recommended that the launcher be written in the language of the kernel, but that is not a requirement so long as the launcher can start and manage the kernel's lifecycle and issue interrupts (if the kernel does not support message-based interrupts itself). To reiterate, the four tasks of a kernel launcher are: + 1. Create the necessary connection information based on the 5 zero-mq ports, a signature key and algorithm specifier, along with a _gateway listener_ socket. 2. Conveyance of the connection (and listener socket) information back to the Enterprise Gateway process after encrypting the information using AES, then encrypting the AES key using the provided public key. 3. Invocation of the target kernel. 4. Listen for interrupt and shutdown requests from Enterprise Gateway on the communication socket and carry out the action when appropriate. ## Creating the connection information -If your target kernel exists, then there is probably support for creating ZeroMQ ports. If this proves difficult, you may be able to take a _hybrid approach_ where the connection information, encryption and listener portion of things is implemented in Python, while invocation takes place in the native language. This is how the [R kernel-launcher](https://github.com/jupyter-server/enterprise_gateway/tree/master/etc/kernel-launchers/R/scripts) support is implemented. + +If your target kernel exists, then there is probably support for creating ZeroMQ ports. If this proves difficult, you may be able to take a _hybrid approach_ where the connection information, encryption and listener portion of things is implemented in Python, while invocation takes place in the native language. This is how the [R kernel-launcher](https://github.com/jupyter-server/enterprise_gateway/tree/master/etc/kernel-launchers/R/scripts) support is implemented. When creating the connection information, your kernel launcher should handle the possibility that the `--port-range` option has been specified such that each port should reside within the specified range. -The port used between Enterprise Gateway and the launcher, known as the _communication port_ should also adhere to the port range. It is not required that this port be ZeroMQ (and is not a ZMQ port in existing implementations). +The port used between Enterprise Gateway and the launcher, known as the _communication port_ should also adhere to the port range. It is not required that this port be ZeroMQ (and is not a ZMQ port in existing implementations). ## Encrypting the connection information -The next task of the kernel launcher is sending the connection information back to the Enterprise Gateway server. Prior to doing this, the connection information, including the communication port, are encrypted using AES encryption and a 16-byte key. The AES key is then encrypted using the public key specified in the `public_key` parameter. These two fields (the AES-encrypted payload and the publice-key-encrypted AES key) are then included into a JSON structure that also include the launcher's version information and base64 encoded. Here's such an example from the [Python kernel launcher](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/etc/kernel-launchers/python/scripts/launch_ipykernel.py#L188-L209). + +The next task of the kernel launcher is sending the connection information back to the Enterprise Gateway server. Prior to doing this, the connection information, including the communication port, are encrypted using AES encryption and a 16-byte key. The AES key is then encrypted using the public key specified in the `public_key` parameter. These two fields (the AES-encrypted payload and the publice-key-encrypted AES key) are then included into a JSON structure that also include the launcher's version information and base64 encoded. Here's such an example from the [Python kernel launcher](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/etc/kernel-launchers/python/scripts/launch_ipykernel.py#L188-L209). The payload is then [sent back on a socket](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/etc/kernel-launchers/python/scripts/launch_ipykernel.py#L212-L256) identified by the `--response-address` option. ## Invoking the target kernel -For the Python kernel launcher it merely [embeds](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/etc/kernel-launchers/python/scripts/launch_ipykernel.py#L382) the kernel using a facility provided by the IPython kernel. For the R kernel launcher, the kernel is started using [`IRKernel::main()`](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/etc/kernel-launchers/R/scripts/launch_IRkernel.R#L252). The scala kernel launcher works similarly in that the kernel provides an "entrypoint" to start the kernel. + +For the Python kernel launcher it merely [embeds](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/etc/kernel-launchers/python/scripts/launch_ipykernel.py#L382) the kernel using a facility provided by the IPython kernel. For the R kernel launcher, the kernel is started using [`IRKernel::main()`](https://github.com/jupyter-server/enterprise_gateway/blob/54c8e31d9b17418f35454b49db691d2ce5643c22/etc/kernel-launchers/R/scripts/launch_IRkernel.R#L252). The scala kernel launcher works similarly in that the kernel provides an "entrypoint" to start the kernel. ## Listening for interrupt and shutdown requests -The last task that must be performed by a kernel launcher is to listen on the communication port for work. There are currently two requests sent on the port, a signal event and a shutdown request. -The signal event is of the form `{"signum": n}` where the string `'signum'` indicates a signal event and `'n'` is an integer specifying the signal number to send to the kernel. Typically, the value of 'n' is `2` representing `SIGINT` and used to interrupt any current processing. As more kernels adopt a message-based interrupt approach, this will not be as common. Enterprise Gateway also uses this event to perform its `poll()` implementation by sending `{"signum": 0}`. Raising a signal of 0 to a process is common way to determine the process is still alive. +The last task that must be performed by a kernel launcher is to listen on the communication port for work. There are currently two requests sent on the port, a signal event and a shutdown request. + +The signal event is of the form `{"signum": n}` where the string `'signum'` indicates a signal event and `'n'` is an integer specifying the signal number to send to the kernel. Typically, the value of 'n' is `2` representing `SIGINT` and used to interrupt any current processing. As more kernels adopt a message-based interrupt approach, this will not be as common. Enterprise Gateway also uses this event to perform its `poll()` implementation by sending `{"signum": 0}`. Raising a signal of 0 to a process is common way to determine the process is still alive. -The event is a shutdown request. This is sent when the process proxy has typically terminated the kernel and it's just performing its final cleanup. The form of this request is `{"shutdown": 1}`. This is what instructs the launcher to abandon listening on the communication socket and to exit. +The event is a shutdown request. This is sent when the process proxy has typically terminated the kernel and it's just performing its final cleanup. The form of this request is `{"shutdown": 1}`. This is what instructs the launcher to abandon listening on the communication socket and to exit. ## Other parameters -Besides `--port-range`, `--public-key`, and `--response-address`, the kernel launcher needs to support `--kernel-id` that indicates the kernel's ID as known to the Gateway server. It should also tolerate the existence of `--spark-context-initialization-mode` but, unless applicable for Spark enviornments, should only support values of `"none"` for this option. + +Besides `--port-range`, `--public-key`, and `--response-address`, the kernel launcher needs to support `--kernel-id` that indicates the kernel's ID as known to the Gateway server. It should also tolerate the existence of `--spark-context-initialization-mode` but, unless applicable for Spark enviornments, should only support values of `"none"` for this option. diff --git a/docs/source/developers/kernel-library.md b/docs/source/developers/kernel-library.md index 376e146fe..d830e5fef 100644 --- a/docs/source/developers/kernel-library.md +++ b/docs/source/developers/kernel-library.md @@ -1,6 +1,6 @@ -# Standalone Remote Kernel Execution +# Standalone Remote Kernel Execution -Remote kernels can be executed by using the `RemoteKernelManager` class directly. This enables running kernels using `ProcessProxy`s without requiring deployment of the Enterprise Gateway web application. This approach is also known as _Library Mode_. +Remote kernels can be executed by using the `RemoteKernelManager` class directly. This enables running kernels using `ProcessProxy`s without requiring deployment of the Enterprise Gateway web application. This approach is also known as _Library Mode_. This can be useful in niche situations, for example, using [nbconvert](https://nbconvert.readthedocs.io/) or [nbclient](https://nbclient.readthedocs.io/) to execute a kernel on a remote cluster. @@ -18,6 +18,6 @@ client = NotebookClient(nb=test_notebook, kernel_manager_class=RemoteKernelManag client.execute() ``` -The above code will execute the notebook on a kernel named `my_remote_kernel` using its configured `ProcessProxy`. +The above code will execute the notebook on a kernel named `my_remote_kernel` using its configured `ProcessProxy`. -Depending on the process proxy, the _hosting application_ (e.g., `nbclient`) will likely need to be configured to run on the same network as the remote kernel. So, for example, with Kubernetes, `nbclient` would need to be configured as a Kubernetes POD. +Depending on the process proxy, the _hosting application_ (e.g., `nbclient`) will likely need to be configured to run on the same network as the remote kernel. So, for example, with Kubernetes, `nbclient` would need to be configured as a Kubernetes POD. diff --git a/docs/source/developers/kernel-manager.md b/docs/source/developers/kernel-manager.md index 1829641aa..dd1e73e5a 100644 --- a/docs/source/developers/kernel-manager.md +++ b/docs/source/developers/kernel-manager.md @@ -1,11 +1,13 @@ # Using Jupyter Server's `GatewayKernelManager` + Another way to expose other Jupyter applications like `nbclient` or `papermill` to remote kernels is to use the [`GatewayKernelManager`](https://github.com/jupyter-server/jupyter_server/blob/745f5ba3f00280c1e1900326a7e08463d48a3912/jupyter_server/gateway/managers.py#L317) (and, implicitly, [`GatewayKernelClient`](https://github.com/jupyter-server/jupyter_server/blob/745f5ba3f00280c1e1900326a7e08463d48a3912/jupyter_server/gateway/managers.py#L562)) classes that are embedded in Jupyter Server. -These classes essentially emulate the lower level [`KernelManager`](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/manager.py#L84) and [`KernelClient`](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/client.py#L75) classes but _forward_ their requests to/from a configured gateway server. Their necessary configuration for interacting with the gateway server is set on the [`GatewayClient` configurable](../users/client-config.md#gateway-client-configuration). +These classes essentially emulate the lower level [`KernelManager`](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/manager.py#L84) and [`KernelClient`](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/client.py#L75) classes but _forward_ their requests to/from a configured gateway server. Their necessary configuration for interacting with the gateway server is set on the [`GatewayClient` configurable](../users/client-config.md#gateway-client-configuration). This allows for the _hosting application_ to remain **outside** the resource-managed cluster since the kernel is actually being managed by the target gateway server. So, using the previous example, one my have... + ```python import nbformat from nbclient import NotebookClient @@ -19,9 +21,10 @@ with open("my_notebook.ipynb") as fp: gw_client = GatewayClient.instance() gw_client.url = "http://my-gateway-server.com:8888" -client = NotebookClient(nb=test_notebook, +client = NotebookClient(nb=test_notebook, kernel_manager_class=GatewayKernelManager, kernel_name='my_remote_kernel') client.execute() ``` -In this case, `my_remote_kernel`'s kernel specification file actually resides on the Gateway server. `NotebookClient` will _think_ its talking to local `KernelManager` and `KernelClient` instances, when, in actuality, they are forwarding requests to (and getting response from) the Gateway server at 'http://my-gateway-server.com:8888'. + +In this case, `my_remote_kernel`'s kernel specification file actually resides on the Gateway server. `NotebookClient` will _think_ its talking to local `KernelManager` and `KernelClient` instances, when, in actuality, they are forwarding requests to (and getting response from) the Gateway server at 'http://my-gateway-server.com:8888'. diff --git a/docs/source/developers/kernel-specification.md b/docs/source/developers/kernel-specification.md index 1ae52b46f..98229ae1d 100644 --- a/docs/source/developers/kernel-specification.md +++ b/docs/source/developers/kernel-specification.md @@ -1,11 +1,13 @@ # Implementing a kernel specification -If you find yourself [implementing a kernel launcher](kernel-launcher.md), you'll need a way to make that kernel and kernel launcher available to applications. This is accomplished via the _kernel specification_ or _kernelspec_. -Kernelspecs reside in well-known directories. For Enterprise Gateway, we generally recommend they reside in `/usr/local/share/jupyter/kernels` where each entry in this directory is a directory representing the name of the kernel. The kernel specification is represented by the file `kernel.json`, the contents of which essentially indicate what environment variables should be present in the kernel process (via the `env` _stanza_) and which command (and arguments) should be issued to start the kernel process (via the `argv` _stanza_). The JSON also includes a `metadata` stanza that contains the process_proxy configuration, along with which process proxy class to instantiate to help manage the kernel process's lifecycle. +If you find yourself [implementing a kernel launcher](kernel-launcher.md), you'll need a way to make that kernel and kernel launcher available to applications. This is accomplished via the _kernel specification_ or _kernelspec_. -One approach the sample Enterprise Gateway kernel specifications take is to include a shell script that actually issues the `spark-submit` request. It is this shell script (typically named `run.sh`) that is referenced in the `argv` stanza. +Kernelspecs reside in well-known directories. For Enterprise Gateway, we generally recommend they reside in `/usr/local/share/jupyter/kernels` where each entry in this directory is a directory representing the name of the kernel. The kernel specification is represented by the file `kernel.json`, the contents of which essentially indicate what environment variables should be present in the kernel process (via the `env` _stanza_) and which command (and arguments) should be issued to start the kernel process (via the `argv` _stanza_). The JSON also includes a `metadata` stanza that contains the process_proxy configuration, along with which process proxy class to instantiate to help manage the kernel process's lifecycle. + +One approach the sample Enterprise Gateway kernel specifications take is to include a shell script that actually issues the `spark-submit` request. It is this shell script (typically named `run.sh`) that is referenced in the `argv` stanza. Here's an example from the [`spark_python_yarn_cluster`](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernelspecs/spark_python_yarn_cluster/kernel.json) kernel specification: + ```JSON { "language": "python", @@ -38,7 +40,9 @@ Here's an example from the [`spark_python_yarn_cluster`](https://github.com/jupy ] } ``` + where [`run.sh`](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh) issues `spark-submit` specifying the kernel launcher as the "application": + ```bash eval exec \ "${SPARK_HOME}/bin/spark-submit" \ @@ -49,7 +53,8 @@ eval exec \ "$@" ``` -For container-based environments, the `argv` may instead reference a script that is meant to create the container pod (for Kubernetes). For these, we use a [template file](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2) that operators can adjust to meet the needs of their environment. Here's how that `kernel.json` looks: +For container-based environments, the `argv` may instead reference a script that is meant to create the container pod (for Kubernetes). For these, we use a [template file](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2) that operators can adjust to meet the needs of their environment. Here's how that `kernel.json` looks: + ```json { "language": "python", @@ -63,8 +68,7 @@ For container-based environments, the `argv` may instead reference a script that }, "debugger": true }, - "env": { - }, + "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_kubernetes/scripts/launch_kubernetes.py", @@ -79,4 +83,5 @@ For container-based environments, the `argv` may instead reference a script that ] } ``` + As should be evident, kernel specifications are highly tuned to the runtime environment so your needs may be different, but _should_ resemble the approaches we've taken so far. diff --git a/docs/source/developers/rest-api.rst b/docs/source/developers/rest-api.rst index 05d186daa..3cec0cf8a 100644 --- a/docs/source/developers/rest-api.rst +++ b/docs/source/developers/rest-api.rst @@ -386,4 +386,3 @@ OpenAPI Specification Here's the current `OpenAPI `_ specification available from Enterprise Gateway. An interactive version is available `here `_. .. openapi:: ../../../enterprise_gateway/services/api/swagger.yaml - diff --git a/docs/source/operators/config-add-env.md b/docs/source/operators/config-add-env.md index d0ce0cbb9..2c7282603 100644 --- a/docs/source/operators/config-add-env.md +++ b/docs/source/operators/config-add-env.md @@ -1,5 +1,7 @@ # Additional environment variables + Besides those environment variables associated with configurable options, the following environment variables can also be used to influence functionality: + ```text EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME=default Kubernetes only. This value indicates the default service account name to use for @@ -92,21 +94,21 @@ Besides those environment variables associated with configurable options, the fo in an exception indicating error 403 (Forbidden). See also EG_PROHIBITED_GIDS. EG_RESPONSE_IP=None - Experimental. The IP address to use to formulate the response address (with - `EG_RESPONSE_PORT`). By default, the server's IP is used. However, we may find + Experimental. The IP address to use to formulate the response address (with + `EG_RESPONSE_PORT`). By default, the server's IP is used. However, we may find it necessary to use a different IP in cases where the target kernels are external - to the Enterprise Gateway server (for example). It's value may also need to be + to the Enterprise Gateway server (for example). It's value may also need to be set in cases where the computed (default) is not correct for the current topology. EG_RESPONSE_PORT=8877 - The single response port used to receive connection information + The single response port used to receive connection information from launched kernels. EG_RESPONSE_PORT_RETRIES=10 The number of retries to attempt when the original response port - (EG_RESPONSE_PORT) is found to be in-use. This value should be + (EG_RESPONSE_PORT) is found to be in-use. This value should be set to 0 (zero) if no port retries are desired. - + EG_SHARED_NAMESPACE=False Kubernetes only. This value indicates whether (True) or not (False) all kernel pods should reside in the same namespace as Enterprise Gateway. This is not a recommended @@ -130,4 +132,4 @@ Besides those environment variables associated with configurable options, the fo EG_YARN_CERT_BUNDLE= The path to a .pem or any other custom truststore used as a CA bundle in yarn-api-client. -``` \ No newline at end of file +``` diff --git a/docs/source/operators/config-cli.md b/docs/source/operators/config-cli.md index c548ef783..db049750f 100644 --- a/docs/source/operators/config-cli.md +++ b/docs/source/operators/config-cli.md @@ -1,5 +1,6 @@ # Command-line options -In some cases, it may be easier to use command line options. These can also be used for _static_ values that should not be the targeted for [_dynamic configurables_](config-dynamic.md/#dynamic-configurables). + +In some cases, it may be easier to use command line options. These can also be used for _static_ values that should not be the targeted for [_dynamic configurables_](config-dynamic.md/#dynamic-configurables). To see the same configuration options at the command line, run the following: @@ -7,7 +8,7 @@ To see the same configuration options at the command line, run the following: jupyter enterprisegateway --help-all ``` -A snapshot of this help appears below for ease of reference. The options for the superclass `EnterpriseGatewayConfigMixin` have been omitted. As with the `--generate-config` option, each option includes its corresponding environment variable, if applicable. +A snapshot of this help appears below for ease of reference. The options for the superclass `EnterpriseGatewayConfigMixin` have been omitted. As with the `--generate-config` option, each option includes its corresponding environment variable, if applicable. ```text Jupyter Enterprise Gateway diff --git a/docs/source/operators/config-culling.md b/docs/source/operators/config-culling.md index 8c9afd36d..2d3279be0 100644 --- a/docs/source/operators/config-culling.md +++ b/docs/source/operators/config-culling.md @@ -1,4 +1,5 @@ # Culling idle kernels + With the adoption of notebooks and interactive development for data science, a new "resource utilization" pattern has arisen, where kernel resources are locked for a given notebook, but due to interactive development processes it might be idle for a long period of time causing the cluster resources to starve. One way to workaround this problem is to enable the culling of idle kernels after a specific timeout period. Idle kernel culling is set to “off” by default. It’s enabled by setting `--RemoteKernelManager.cull_idle_timeout` to a positive value representing the number of seconds a kernel must remain idle to be culled (default: 0, recommended: 43200, 12 hours). diff --git a/docs/source/operators/config-dynamic.md b/docs/source/operators/config-dynamic.md index 7a1cfad8f..1e2a85763 100644 --- a/docs/source/operators/config-dynamic.md +++ b/docs/source/operators/config-dynamic.md @@ -1,17 +1,19 @@ # Dynamic configurables + Enterprise Gateway also supports the ability to update configuration variables without having to -restart Enterprise Gateway. This enables the ability to do things like enable debug logging or +restart Enterprise Gateway. This enables the ability to do things like enable debug logging or adjust the maximum number of kernels per user, all without having to restart Enterprise Gateway. To enable dynamic configurables configure `EnterpriseGatewayApp.dynamic_config_interval` to a -positive value (default is 0 or disabled). Since this is the number of seconds to poll Enterprise Gateway's configuration files, -a value greater than 60 (1 minute) is recommended. This functionality works for most configuration +positive value (default is 0 or disabled). Since this is the number of seconds to poll Enterprise Gateway's configuration files, +a value greater than 60 (1 minute) is recommended. This functionality works for most configuration values, but does have the following caveats: + 1. Any configuration variables set on the command line (CLI) or via environment variables are - NOT eligible for dynamic updates. This is because Jupyter gives those values priority over + NOT eligible for dynamic updates. This is because Jupyter gives those values priority over file-based configuration variables. 2. Any configuration variables tied to background processing may not reflect their update if - the variable is not *observed* for changes. For example, the code behind + the variable is not _observed_ for changes. For example, the code behind `RemoteKernelManager.cull_idle_timeout` may not reflect changes to the timeout period if that variable is not monitored (i.e., observed) for changes. 3. Only `Configurables` registered by Enterprise Gateway are eligible for dynamic updates. @@ -23,6 +25,5 @@ As a result, operators and adminstrators are encouraged to configure Enterprise Note that if `EnterpriseGatewayApp.dynamic_config_interval` is configured with a positive value via the configuration file (i.e., is eligible for updates) and is subsequently set to 0, then dynamic configuration updates will be disabled until Enterprise Gateway is restarted with a -positive value. Therefore, we recommend `EnterpriseGatewayApp.dynamic_config_interval` be +positive value. Therefore, we recommend `EnterpriseGatewayApp.dynamic_config_interval` be configured via the command line or environment. - diff --git a/docs/source/operators/config-env-debug.md b/docs/source/operators/config-env-debug.md index 287e031dc..79bae20aa 100644 --- a/docs/source/operators/config-env-debug.md +++ b/docs/source/operators/config-env-debug.md @@ -1,5 +1,7 @@ # Environment variables that assist in troubleshooting + The following environment variables may be useful for troubleshooting: + ```text EG_DOCKER_LOG_LEVEL=WARNING By default, the docker client library is too verbose for its logging. This @@ -23,7 +25,7 @@ The following environment variables may be useful for troubleshooting: should rarely be necessary. EG_POLL_INTERVAL=0.5 - The interval (in seconds) to wait before checking poll results again. + The interval (in seconds) to wait before checking poll results again. EG_REMOVE_CONTAINER=True Used by launch_docker.py, indicates whether the kernel's docker container should be diff --git a/docs/source/operators/config-file.md b/docs/source/operators/config-file.md index 7be736d16..4ee1c49f9 100644 --- a/docs/source/operators/config-file.md +++ b/docs/source/operators/config-file.md @@ -1,18 +1,22 @@ # Configuration file options + Placing configuration options into the configuration file `jupyter_enterprise_gateway_config.py` is recommended because this will enabled the use of the [_dynamic configurables_](config-dynamic.md/#dynamic-configurables) functionality. To generate a template configuration file, run the following: ```bash jupyter enterprisegateway --generate-config ``` -This command will produce a `jupyter_enterprise_gateway_config.py` file, typically located in the invoking user's `$HOME/.jupyter` directory. The file contains python code, including comments, relative to each available configuration option. The actual option itself will also be commented out. To enable that option, set its value and uncomment the code. + +This command will produce a `jupyter_enterprise_gateway_config.py` file, typically located in the invoking user's `$HOME/.jupyter` directory. The file contains python code, including comments, relative to each available configuration option. The actual option itself will also be commented out. To enable that option, set its value and uncomment the code. ```{Note} Some options may appear duplicated. For example, the `remote_hosts` trait appears on both `c.EnterpriseGatewayConfigMixin` and `c.EnterpriseGatewayApp`. This is due to how configurable traits appear in the class hierarchy. Since `EnterpriseGatewayApp` derives from `EnterpriseGatewayConfigMixin` and both are configurable classes, the output contains duplicated values. If both values are set, the value _closest_ to the derived class will be used (in this case, `EnterpriseGatewayApp`). ``` -Here's an example entry. Note that its default value, when defined, is also displayed, along with the corresponding environment variable name: + +Here's an example entry. Note that its default value, when defined, is also displayed, along with the corresponding environment variable name: + ```python ## Bracketed comma-separated list of hosts on which DistributedProcessProxy -# kernels will be launched e.g., ['host1','host2']. +# kernels will be launched e.g., ['host1','host2']. # (EG_REMOTE_HOSTS env var - non-bracketed, just comma-separated) # Default: ['localhost'] # c.EnterpriseGatewayConfigMixin.remote_hosts = ['localhost'] diff --git a/docs/source/operators/config-kernel-override.md b/docs/source/operators/config-kernel-override.md index f86774c1a..e9599d362 100644 --- a/docs/source/operators/config-kernel-override.md +++ b/docs/source/operators/config-kernel-override.md @@ -1,43 +1,45 @@ # Per-kernel overrides + As mentioned in the overview of [Process Proxy Configuration](../contributors/system-architecture.md#process-proxy-configuration) -capabilities, it's possible to override or amend specific system-level configuration values on a per-kernel basis. These capabilities can be implemented with the kernel specification's process-proxy `config` stanza or via environment variables. +capabilities, it's possible to override or amend specific system-level configuration values on a per-kernel basis. These capabilities can be implemented with the kernel specification's process-proxy `config` stanza or via environment variables. ## Per-kernel configuration overrides The following enumerates the set of per-kernel configuration overrides: -* `remote_hosts`: This process proxy configuration entry can be used to override `--EnterpriseGatewayApp.remote_hosts`. - Any values specified in the config dictionary override the globally defined values. These apply to all +- `remote_hosts`: This process proxy configuration entry can be used to override `--EnterpriseGatewayApp.remote_hosts`. + Any values specified in the config dictionary override the globally defined values. These apply to all `DistributedProcessProxy` kernels. -* `yarn_endpoint`: This process proxy configuration entry can be used to override `--EnterpriseGatewayApp.yarn_endpoint`. - Any values specified in the config dictionary override the globally defined values. These apply to all - `YarnClusterProcessProxy` kernels. Note that you'll likely be required to specify a different `HADOOP_CONF_DIR` +- `yarn_endpoint`: This process proxy configuration entry can be used to override `--EnterpriseGatewayApp.yarn_endpoint`. + Any values specified in the config dictionary override the globally defined values. These apply to all + `YarnClusterProcessProxy` kernels. Note that you'll likely be required to specify a different `HADOOP_CONF_DIR` setting in the kernel.json's `env` stanza in order of the `spark-submit` command to target the appropriate YARN cluster. -* `authorized_users`: This process proxy configuration entry can be used to override - `--EnterpriseGatewayApp.authorized_users`. Any values specified in the config dictionary override the globally - defined values. These values apply to **all** process-proxy kernels, including the default `LocalProcessProxy`. Note +- `authorized_users`: This process proxy configuration entry can be used to override + `--EnterpriseGatewayApp.authorized_users`. Any values specified in the config dictionary override the globally + defined values. These values apply to **all** process-proxy kernels, including the default `LocalProcessProxy`. Note that the typical use-case for this value is to not set `--EnterpriseGatewayApp.authorized_users` at the global level, but then restrict access at the kernel level. -* `unauthorized_users`: This process proxy configuration entry can be used to **_amend_** - `--EnterpriseGatewayApp.unauthorized_users`. Any values specified in the config dictionary are **added** to the - globally defined values. As a result, once a user is denied access at the global level, they will _always be denied - access at the kernel level_. These values apply to **all** process-proxy kernels, including the default +- `unauthorized_users`: This process proxy configuration entry can be used to **_amend_** + `--EnterpriseGatewayApp.unauthorized_users`. Any values specified in the config dictionary are **added** to the + globally defined values. As a result, once a user is denied access at the global level, they will _always be denied + access at the kernel level_. These values apply to **all** process-proxy kernels, including the default `LocalProcessProxy`. -* `port_range`: This process proxy configuration entry can be used to override `--EnterpriseGatewayApp.port_range`. - Any values specified in the config dictionary override the globally defined values. These apply to all +- `port_range`: This process proxy configuration entry can be used to override `--EnterpriseGatewayApp.port_range`. + Any values specified in the config dictionary override the globally defined values. These apply to all `RemoteProcessProxy` kernels. ## Per-kernel environment overrides + In some cases, it is useful to allow specific values that exist in a kernel.json `env` stanza to be -overridden on a per-kernel basis. For example, if the kernel.json supports resource limitations you -may want to allow some requests to have access to more memory or GPUs than another. Enterprise +overridden on a per-kernel basis. For example, if the kernel.json supports resource limitations you +may want to allow some requests to have access to more memory or GPUs than another. Enterprise Gateway enables this capability by honoring environment variables provided in the json request over those same-named variables in the kernel.json `env` stanza. Environment variables for which this can occur are any variables prefixed with `KERNEL_` as well as any variables listed in the `EnterpriseGatewayApp.env_whitelist` configurable trait (or via -the `EG_ENV_WHITELIST` variable). Locally defined variables listed in `EG_PROCESS_ENV_WHITELIST` +the `EG_ENV_WHITELIST` variable). Locally defined variables listed in `EG_PROCESS_ENV_WHITELIST` are also available for replacement in the kernel process' environment. See [Kernel Environment Variables](../users/kernel-envs.md) in the Users documentation section for a complete set of recognized `KERNEL_` variables. diff --git a/docs/source/operators/config-security.md b/docs/source/operators/config-security.md index b5644bf57..141d29e1c 100644 --- a/docs/source/operators/config-security.md +++ b/docs/source/operators/config-security.md @@ -1,42 +1,45 @@ # Configuring Security Jupyter Enterprise Gateway does not currently perform user _authentication_ but, instead, assumes that all users -issuing requests have been previously authenticated. Recommended applications for this are -[Apache Knox](https://knox.apache.org/) or [Jupyter Hub](https://jupyterhub.readthedocs.io/en/latest/) +issuing requests have been previously authenticated. Recommended applications for this are +[Apache Knox](https://knox.apache.org/) or [Jupyter Hub](https://jupyterhub.readthedocs.io/en/latest/) (e.g., if gateway-enabled notebook servers were spawned targeting an Enterprise Gateway cluster). This section introduces some security features inherent in Enterprise Gateway (with more to come). ## KERNEL_USERNAME -In order to convey the name of the authenticated user, `KERNEL_USERNAME` should be sent in the kernel creation request -via the `env:` entry. This will occur automatically within the gateway-enabled Notebook server since it propagates all environment variables -prefixed with `KERNEL_`. If the request does not include a `KERNEL_USERNAME` entry, one will be added to the kernel's +In order to convey the name of the authenticated user, `KERNEL_USERNAME` should be sent in the kernel creation request +via the `env:` entry. This will occur automatically within the gateway-enabled Notebook server since it propagates all environment variables +prefixed with `KERNEL_`. If the request does not include a `KERNEL_USERNAME` entry, one will be added to the kernel's launch environment with the value of the gateway user. This value is then used within the _authorization_ and _impersonation_ functionality. ## Authorization -By default, all users are authorized to start kernels. This behavior can be adjusted when situations arise where -more control is required. Basic authorization can be expressed in two ways. + +By default, all users are authorized to start kernels. This behavior can be adjusted when situations arise where +more control is required. Basic authorization can be expressed in two ways. ### Authorized Users + The command-line or configuration file option: `EnterpriseGatewayApp.authorized_users` can be specified to contain a -list of user names indicating which users are permitted to launch kernels within the current gateway server. +list of user names indicating which users are permitted to launch kernels within the current gateway server. -On each kernel launched, the authorized users list is searched for the value of `KERNEL_USERNAME` (case-sensitive). If +On each kernel launched, the authorized users list is searched for the value of `KERNEL_USERNAME` (case-sensitive). If the user is found in the list the kernel's launch sequence continues, otherwise HTTP Error 403 (Forbidden) is raised and the request fails. ```{warning} -Since the `authorized_users` option must be exhaustive, it should be used only in situations where a small +Since the `authorized_users` option must be exhaustive, it should be used only in situations where a small and limited set of users are allowed access and empty otherwise. ``` - + ### Unauthorized Users + The command-line or configuration file option: `EnterpriseGatewayApp.unauthorized_users` can be specified to contain a -list of user names indicating which users are **NOT** permitted to launch kernels within the current gateway server. -The `unauthorized_users` list is always checked prior to the `authorized_users` list. If the value of `KERNEL_USERNAME` +list of user names indicating which users are **NOT** permitted to launch kernels within the current gateway server. +The `unauthorized_users` list is always checked prior to the `authorized_users` list. If the value of `KERNEL_USERNAME` appears in the `unauthorized_users` list, the request is immediately failed with the same 403 (Forbidden) HTTP Error. From a system security standpoint, privileged users (e.g., `root` and any users allowed `sudo` privileges) should be @@ -44,8 +47,8 @@ added to this option. ### Authorization Failures -It should be noted that the corresponding messages logged when each of the above authorization failures occur are -slightly different. This allows the administrator to discern from which authorization list the failure was generated. +It should be noted that the corresponding messages logged when each of the above authorization failures occur are +slightly different. This allows the administrator to discern from which authorization list the failure was generated. Failures stemming from _inclusion_ in the `unauthorized_users` list will include text similar to the following: @@ -70,7 +73,7 @@ option is configured via two pieces of information: `EG_IMPERSONATION_ENABLED` a `EG_IMPERSONATION_ENABLED` indicates the intention that user impersonation should be performed and can also be conveyed via the command-line boolean option `EnterpriseGatewayApp.impersonation_enabled` (default = False). -`KERNEL_USERNAME` is also conveyed within the environment of the kernel launch sequence where +`KERNEL_USERNAME` is also conveyed within the environment of the kernel launch sequence where its value is used to indicate the user that should be impersonated. ### Impersonation in Hadoop YARN clusters @@ -84,7 +87,7 @@ user name. :class: warning When using kerberos in a YARN managed cluster, the gateway user (`elyra` by default) needs to be set up as a `proxyuser` superuser in hadoop configuration. Please refer to the -[Hadoop documentation](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Superusers.html) +[Hadoop documentation](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Superusers.html) regarding the proper configuration steps. ``` @@ -100,28 +103,28 @@ YARN APIs and can also be conveyed via the command-line boolean option `Enterpri ### Impersonation in Standalone or YARN Client Mode -Impersonation performed in standalone or YARN cluster modes tends to take the form of using `sudo` to perform the -kernel launch as the target user. This can also be configured within the +Impersonation performed in standalone or YARN cluster modes tends to take the form of using `sudo` to perform the +kernel launch as the target user. This can also be configured within the [run.sh](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernelspecs/spark_python_yarn_client/bin/run.sh) script and requires the following: 1. The gateway user (i.e., the user in which Enterprise Gateway is running) must be enabled to perform sudo operations -on each potential host. This enablement must also be done to prevent password prompts since Enterprise Gateway runs -in the background. Refer to your operating system documentation for details. + on each potential host. This enablement must also be done to prevent password prompts since Enterprise Gateway runs + in the background. Refer to your operating system documentation for details. 2. Each user identified by `KERNEL_USERNAME` must be associated with an actual operating system user on each host. 3. Once the gateway user is configured for `sudo` privileges it is **strongly recommended** that that user be included -in the set of `unauthorized_users`. Otherwise, kernels not configured for impersonation, or those requests that do not -include `KERNEL_USERNAME`, will run as the, now, highly privileged gateway user! + in the set of `unauthorized_users`. Otherwise, kernels not configured for impersonation, or those requests that do not + include `KERNEL_USERNAME`, will run as the, now, highly privileged gateway user! ```{warning} -Should impersonation be disabled after granting the gateway user elevated privileges, it is +Should impersonation be disabled after granting the gateway user elevated privileges, it is **strongly recommended** those privileges be revoked (on all hosts) prior to starting kernels since those kernels will run as the gateway user **regardless of the value of KERNEL_USERNAME**. ``` ## SSH Tunneling -Jupyter Enterprise Gateway is configured to perform SSH tunneling on the five ZeroMQ kernel sockets as well as the +Jupyter Enterprise Gateway is configured to perform SSH tunneling on the five ZeroMQ kernel sockets as well as the communication socket created within the launcher and used to perform remote and cross-user signalling functionality. SSH tunneling is NOT enabled by default. Tunneling can be enabled/disabled via the environment variable `EG_ENABLE_TUNNELING=False`. Note, there is no command-line or configuration file support for this variable. @@ -130,13 +133,13 @@ Note that SSH by default validates host keys before connecting to remote hosts a or unknown hosts. Enterprise Gateway honors this requirement, and invalid or unknown hosts will cause tunneling to fail. Please perform necessary steps to validate all hosts before enabling SSH tunneling, such as: -* SSH to each node cluster and accept the host key properly -* Configure SSH to disable `StrictHostKeyChecking` +- SSH to each node cluster and accept the host key properly +- Configure SSH to disable `StrictHostKeyChecking` -## Using Generic Security Service (Kerberos) +## Using Generic Security Service (Kerberos) Jupyter Enterprise Gateway has support for SSH connections using GSS (for example Kerberos), which enables its deployment -without the use of an ssh key. The `EG_REMOTE_GSS_SSH` environment variable can be used to control this behavior. +without the use of an ssh key. The `EG_REMOTE_GSS_SSH` environment variable can be used to control this behavior. ```{seealso} The list of [additional supported environment variables](config-add-env.md#additional-environment-variables). @@ -149,30 +152,30 @@ The list of [additional supported environment variables](config-add-env.md#addit Enterprise Gateway supports Secure Sockets Layer (SSL) communication with its clients. With SSL enabled, all the communication between the server and client are encrypted and highly secure. -1. You can start Enterprise Gateway to communicate via a secure protocol mode by setting the `certfile` and `keyfile` -options with the command: +1. You can start Enterprise Gateway to communicate via a secure protocol mode by setting the `certfile` and `keyfile` + options with the command: - ``` - jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --certfile=mycert.pem --keyfile=mykey.key - ``` + ``` + jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --certfile=mycert.pem --keyfile=mykey.key + ``` - As server starts up, the log should reflect the following, + As server starts up, the log should reflect the following, - ``` - [EnterpriseGatewayApp] Jupyter Enterprise Gateway at https://localhost:8888 - ``` + ``` + [EnterpriseGatewayApp] Jupyter Enterprise Gateway at https://localhost:8888 + ``` - Note: Enterprise Gateway server is started with `HTTPS` instead of `HTTP`, meaning server side SSL is enabled. + Note: Enterprise Gateway server is started with `HTTPS` instead of `HTTP`, meaning server side SSL is enabled. - TIP: - A self-signed certificate can be generated with openssl. For example, the following command will create a - certificate valid for 365 days with both the key and certificate data written to the same file: + TIP: + A self-signed certificate can be generated with openssl. For example, the following command will create a + certificate valid for 365 days with both the key and certificate data written to the same file: - ```bash - openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout mykey.key -out mycert.pem - ``` + ```bash + openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout mykey.key -out mycert.pem + ``` -2. With Enterprise Gateway server SSL enabled, now you need to configure the client side SSL, which is accomplished via the Gateway configuration options embedded in Notebook server. +2. With Enterprise Gateway server SSL enabled, now you need to configure the client side SSL, which is accomplished via the Gateway configuration options embedded in Notebook server. During Jupyter Notebook server startup, export the following environment variables where the gateway-enabled server has access during runtime: @@ -185,18 +188,19 @@ options with the command: ```{note} If using a self-signed certificate, you can set `JUPYTER_GATEWAY_CA_CERTS` same as `JUPYTER_GATEWAY_CLIENT_CERT`. - ``` - + ``` ### Using Enterprise Gateway configuration file + You can also utilize the [Enterprise Gateway configuration file](config-file.md#configuration-file-options) to set static configurations for the server. To enable SSL from the configuration file, modify the corresponding parameter to the appropriate value. - ``` - c.EnterpriseGatewayApp.certfile = '/absolute/path/to/your/certificate/fullchain.pem' - c.EnterpriseGatewayApp.keyfile = '/absolute/path/to/your/certificate/privatekey.key' - ``` + ``` + c.EnterpriseGatewayApp.certfile = '/absolute/path/to/your/certificate/fullchain.pem' + c.EnterpriseGatewayApp.keyfile = '/absolute/path/to/your/certificate/privatekey.key' + ``` + Using configuration file achieves the same result as starting the server with `--certfile` and `--keyfile`, this way provides better readability and maintainability. diff --git a/docs/source/operators/config-sys-env.md b/docs/source/operators/config-sys-env.md index aeb267224..10e85f41d 100644 --- a/docs/source/operators/config-sys-env.md +++ b/docs/source/operators/config-sys-env.md @@ -1,8 +1,11 @@ # System-owned environment variables + The following environment variables are managed by Enterprise Gateway and listed here for completeness. + ```{warning} Manually setting these variables could adversely affect operations. ``` + ```text EG_DOCKER_MODE Docker only. Used by launch_docker.py to determine if the kernel container @@ -16,4 +19,4 @@ Manually setting these variables could adversely affect operations. kernel's connection information should be sent. Enterprise Gateway is listening on that socket and will associate that connnection information with the responding kernel. -``` \ No newline at end of file +``` diff --git a/docs/source/operators/deploy-conductor.md b/docs/source/operators/deploy-conductor.md index 393191815..bd6c5f9ec 100644 --- a/docs/source/operators/deploy-conductor.md +++ b/docs/source/operators/deploy-conductor.md @@ -1,9 +1,9 @@ # IBM Spectrum Conductor deployments -This information will be added shortly. The configuration is similar to that of [Hadoop YARN deployments](deploy-yarn-cluster.md) with the `ConductorClusterProcessProxy` used in place of `YARNClusterProcessProxy`. +This information will be added shortly. The configuration is similar to that of [Hadoop YARN deployments](deploy-yarn-cluster.md) with the `ConductorClusterProcessProxy` used in place of `YARNClusterProcessProxy`. The following sample kernel specifications are currently available on IBM Spectrum Conductor: -+ spark_R_conductor_cluster -+ spark_python_conductor_cluster -+ spark_scala_conductor_cluster +- spark_R_conductor_cluster +- spark_python_conductor_cluster +- spark_scala_conductor_cluster diff --git a/docs/source/operators/deploy-distributed.md b/docs/source/operators/deploy-distributed.md index b37f1cb17..3aab47b10 100644 --- a/docs/source/operators/deploy-distributed.md +++ b/docs/source/operators/deploy-distributed.md @@ -1,21 +1,22 @@ # Distributed deployments -This section describes how to deploy Enterprise Gateway to manage kernels across a distributed set of hosts. In this case, a resource manager is not used, but, rather, SSH is used to distribute the kernels. This functionality is accomplished via the [`DistributedProcessProxy`](../contributors/system-architecture.md#distributedprocessproxy). +This section describes how to deploy Enterprise Gateway to manage kernels across a distributed set of hosts. In this case, a resource manager is not used, but, rather, SSH is used to distribute the kernels. This functionality is accomplished via the [`DistributedProcessProxy`](../contributors/system-architecture.md#distributedprocessproxy). Steps required to complete deployment on a distributed cluster are: + 1. [Install Enterprise Gateway](installing-eg.md) on the "primary node" of the cluster. 2. [Install the desired kernels](installing-kernels.md) 3. Install and configure the server and desired kernel specifications (see below) 4. [Launch Enterprise Gateway](launching-eg.md) -The `DistributedProcessProxy` simply uses a fixed set of host names and selects the _next_ host using a simple round-robin algorithm (see the [Roadmap](../contributors/roadmap.md) for making this pluggable). In this case, you can still experience bottlenecks on a given node that receives requests to start "large" kernels, but otherwise, you will be better off compared to when all kernels are started on a single node or as local processes, which is the default for Jupyter Notebook and JupyterLab when not configured to use Enterprise Gateway. +The `DistributedProcessProxy` simply uses a fixed set of host names and selects the _next_ host using a simple round-robin algorithm (see the [Roadmap](../contributors/roadmap.md) for making this pluggable). In this case, you can still experience bottlenecks on a given node that receives requests to start "large" kernels, but otherwise, you will be better off compared to when all kernels are started on a single node or as local processes, which is the default for Jupyter Notebook and JupyterLab when not configured to use Enterprise Gateway. The following sample kernelspecs are configured to use the `DistributedProcessProxy`: -+ python_distributed -+ spark_python_yarn_client -+ spark_scala_yarn_client -+ spark_R_yarn_client +- python_distributed +- spark_python_yarn_client +- spark_scala_yarn_client +- spark_R_yarn_client ```{admonition} Important! :class: warning @@ -24,8 +25,8 @@ The `DistributedProcessProxy` utilizes SSH between the Enterprise Gateway server The set of remote hosts used by the `DistributedProcessProxy` are derived from two places. -+ The configuration option `EnterpriseGatewayApp.remote_hosts`, whose default value comes from the env variable EG_REMOTE_HOSTS - which, itself, defaults to 'localhost'. -+ The config option can be [overridden on a per-kernel basis](config-kernel-override.md#per-kernel-configuration-overrides) if the process_proxy stanza contains a config stanza where there's a `remote_hosts` entry. If present, this value will be used instead. +- The configuration option `EnterpriseGatewayApp.remote_hosts`, whose default value comes from the env variable EG_REMOTE_HOSTS - which, itself, defaults to 'localhost'. +- The config option can be [overridden on a per-kernel basis](config-kernel-override.md#per-kernel-configuration-overrides) if the process_proxy stanza contains a config stanza where there's a `remote_hosts` entry. If present, this value will be used instead. ```{tip} Entries in the remote hosts configuration should be fully qualified domain names (FQDN). For example, `host1.acme.com, host2.acme.com` @@ -36,12 +37,14 @@ Entries in the remote hosts configuration should be fully qualified domain names All the kernel *specifications* configured to use the `DistributedProcessProxy` must be on all nodes to which there's a reference in the remote hosts configuration! With YARN cluster node, only the Python and R kernel _packages_ are required on each node, not the entire kernel specification. ``` -The following installs the sample `python_distributed` kernel specification relative to the 2.6.0 release on the given node. This step must be repeated for each node and each kernel specification. -``` Bash +The following installs the sample `python_distributed` kernel specification relative to the 2.6.0 release on the given node. This step must be repeated for each node and each kernel specification. + +```Bash wget https://github.com/jupyter-server/enterprise_gateway/releases/download/v2.5.0/jupyter_enterprise_gateway_kernelspecs-2.6.0.tar.gz KERNELS_FOLDER=/usr/local/share/jupyter/kernels tar -zxvf jupyter_enterprise_gateway_kernelspecs-2.6.0.tar.gz --strip 1 --directory $KERNELS_FOLDER/python_distributed/ python_distributed/ ``` + ```{tip} You may find it easier to install all kernel specifications on each node, then remove directories corresponding to specification you're not interested in using. ``` @@ -52,7 +55,7 @@ YARN client mode kernel specifications can be considered _distributed mode kerne YARN Client kernel specifications require the following environment variable to be set within their `env` entries: -* `SPARK_HOME` must point to the Apache Spark installation path +- `SPARK_HOME` must point to the Apache Spark installation path ``` SPARK_HOME:/usr/hdp/current/spark2-client #For HDP distribution @@ -81,7 +84,7 @@ After that, you should have a `kernel.json` that looks similar to the one below: }, "argv": [ "/usr/local/share/jupyter/kernels/spark_python_yarn_client/bin/run.sh", - "--RemoteProcessProxy.kernel-id", + "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", @@ -101,12 +104,12 @@ Each node of the cluster will typically be configured in the same manner relativ Although Enterprise Gateway does not provide sample kernelspecs for Spark standalone, here are the steps necessary to convert a `yarn_client` kernelspec to standalone. -+ Make a copy of the source `yarn_client` kernelspec into an applicable `standalone` directory. -+ Edit the `kernel.json` file: - + Update the display_name with e.g. `Spark - Python (Spark Standalone)`. - + Update the `--master` option in the SPARK_OPTS to point to the spark master node rather than indicate `--deploy-mode client`. - + Update `SPARK_OPTS` and remove the `spark.yarn.submit.waitAppCompletion=false`. - + Update the `argv` stanza to reference `run.sh` in the appropriate directory. +- Make a copy of the source `yarn_client` kernelspec into an applicable `standalone` directory. +- Edit the `kernel.json` file: + - Update the display_name with e.g. `Spark - Python (Spark Standalone)`. + - Update the `--master` option in the SPARK_OPTS to point to the spark master node rather than indicate `--deploy-mode client`. + - Update `SPARK_OPTS` and remove the `spark.yarn.submit.waitAppCompletion=false`. + - Update the `argv` stanza to reference `run.sh` in the appropriate directory. After that, you should have a `kernel.json` that looks similar to the one below: @@ -129,7 +132,7 @@ After that, you should have a `kernel.json` that looks similar to the one below: }, "argv": [ "/usr/local/share/jupyter/kernels/spark_python_spark_standalone/bin/run.sh", - "--RemoteProcessProxy.kernel-id", + "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", @@ -138,5 +141,3 @@ After that, you should have a `kernel.json` that looks similar to the one below: ] } ``` - - diff --git a/docs/source/operators/deploy-docker.md b/docs/source/operators/deploy-docker.md index 789778616..dc945677e 100644 --- a/docs/source/operators/deploy-docker.md +++ b/docs/source/operators/deploy-docker.md @@ -1,20 +1,20 @@ # Docker and Docker Swarm deployments -This section describes how to deploy Enterprise Gateway into an existing Docker or Docker Swarm cluster. The two deployments are nearly identical and any differences will be noted. +This section describes how to deploy Enterprise Gateway into an existing Docker or Docker Swarm cluster. The two deployments are nearly identical and any differences will be noted. -The base Enterprise Gateway image is [elyra/enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) and can be found in the Enterprise Gateway dockerhub organization [elyra](https://hub.docker.com/r/elyra/), along with other images. See [Docker Images](../contributors/docker.md) for image details. +The base Enterprise Gateway image is [elyra/enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) and can be found in the Enterprise Gateway dockerhub organization [elyra](https://hub.docker.com/r/elyra/), along with other images. See [Docker Images](../contributors/docker.md) for image details. The following sample kernelspecs are currently available on Docker and Docker Swarm deployments: -+ R_docker -+ python_docker -+ python_tf_docker -+ python_tf_gpu_docker -+ scala_docker +- R_docker +- python_docker +- python_tf_docker +- python_tf_gpu_docker +- scala_docker ## Docker Swarm deployment -Enterprise Gateway manifests itself as a Docker Swarm service. It is identified by the name `enterprise-gateway` within the cluster. In addition, all objects related to Enterprise Gateway, including kernel instances, have a label of `app=enterprise-gateway` applied. +Enterprise Gateway manifests itself as a Docker Swarm service. It is identified by the name `enterprise-gateway` within the cluster. In addition, all objects related to Enterprise Gateway, including kernel instances, have a label of `app=enterprise-gateway` applied. The current deployment uses a compose stack definition, [docker-compose.yml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/docker/docker-compose.yml) which creates an overlay network intended for use solely by Enterprise Gateway and any kernel-based services it launches. @@ -26,14 +26,15 @@ docker stack deploy -c docker-compose.yml enterprise-gateway More information about deploying and managing stacks can be found [here](https://docs.docker.com/engine/reference/commandline/stack_deploy/). -Since Swarm's support for session-based affinity has not been investigated at this time, the deployment script configures a single replica. Once session affinity is available, the number of replicas can be increased. +Since Swarm's support for session-based affinity has not been investigated at this time, the deployment script configures a single replica. Once session affinity is available, the number of replicas can be increased. ```{note} Once session affinity has been figured out, we can (theretically) configure Enterprise Gateway for high availability by increasing the replicas. However, HA support cannot be fully realized until Enterprise Gateway has finalized its persistent sessions functionality. ``` ## Docker deployment -An alternative deployment of Enterprise Gateway in docker environments is to deploy Enterprise Gateway as a traditional docker container. This can be accomplished via the [docker-compose.yml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/docker/docker-compose.yml) file. However, keep in mind that in choosing this deployment approach, one loses leveraging swarm's monitoring & restart capabilities. That said, choosing this approach does not preclude one from leveraging swarm's scheduling capabilities for launching kernels. As noted below, kernel instances, and how they manifest as docker-based entities (i.e., a swarm service or a docker container), is purely a function of the process proxy class to which they're associated. + +An alternative deployment of Enterprise Gateway in docker environments is to deploy Enterprise Gateway as a traditional docker container. This can be accomplished via the [docker-compose.yml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/docker/docker-compose.yml) file. However, keep in mind that in choosing this deployment approach, one loses leveraging swarm's monitoring & restart capabilities. That said, choosing this approach does not preclude one from leveraging swarm's scheduling capabilities for launching kernels. As noted below, kernel instances, and how they manifest as docker-based entities (i.e., a swarm service or a docker container), is purely a function of the process proxy class to which they're associated. To start the stack using compose: @@ -45,29 +46,31 @@ The documentation for managing a compose stack can be found [here](https://docs. ## Kernelspec Modifications -One of the more common areas of customization we see occur within the kernelspec files located in /usr/local/share/jupyter/kernels. To customize the kernel definitions, the kernels directory can be exposed as a mounted volume thereby making it available to all containers within the swarm cluster. +One of the more common areas of customization we see occur within the kernelspec files located in /usr/local/share/jupyter/kernels. To customize the kernel definitions, the kernels directory can be exposed as a mounted volume thereby making it available to all containers within the swarm cluster. -As an example, we have included the necessary commands to mount these volumes, both in the deployment script and in the [launch_docker.py](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/docker/scripts/launch_docker.py) file used to launch docker-based kernels. By default, these references are commented out as they require the system administrator to ensure the directories are available throughout the cluster. +As an example, we have included the necessary commands to mount these volumes, both in the deployment script and in the [launch_docker.py](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/docker/scripts/launch_docker.py) file used to launch docker-based kernels. By default, these references are commented out as they require the system administrator to ensure the directories are available throughout the cluster. -Note that because the kernel launch script, [launch_docker.py](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/docker/scripts/launch_docker.py), resides in the kernelspecs hierarchy, updates or modifications to docker-based kernel instances can now also take place. +Note that because the kernel launch script, [launch_docker.py](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/docker/scripts/launch_docker.py), resides in the kernelspecs hierarchy, updates or modifications to docker-based kernel instances can now also take place. ## Docker and Docker Swarm Kernel Instances -Enterprise Gateway currently supports launching of _vanilla_ (i.e., non-spark) kernels within a Docker Swarm cluster. When kernels are launched, Enterprise Gateway is responsible for creating the appropriate entity. The kind of entity created is a function of the corresponding process proxy class. -When the process proxy class is `DockerSwarmProcessProxy` the `launch_docker.py` script will create a Docker Swarm _service_. This service uses a restart policy of `none` meaning that it's configured to go away upon failures or completion. In addition, because the kernel is launched as a swarm service, the kernel can "land" on any node of the cluster. +Enterprise Gateway currently supports launching of _vanilla_ (i.e., non-spark) kernels within a Docker Swarm cluster. When kernels are launched, Enterprise Gateway is responsible for creating the appropriate entity. The kind of entity created is a function of the corresponding process proxy class. + +When the process proxy class is `DockerSwarmProcessProxy` the `launch_docker.py` script will create a Docker Swarm _service_. This service uses a restart policy of `none` meaning that it's configured to go away upon failures or completion. In addition, because the kernel is launched as a swarm service, the kernel can "land" on any node of the cluster. -When the process proxy class is `DockerProcessProxy` the `launch_docker.py` script will create a traditional docker _container_. As a result, the kernel will always reside on the same host as the corresponding Enterprise Gateway. +When the process proxy class is `DockerProcessProxy` the `launch_docker.py` script will create a traditional docker _container_. As a result, the kernel will always reside on the same host as the corresponding Enterprise Gateway. Items worth noting: + 1. The Swarm service or Docker container name will be composed of the launching username (`KERNEL_USERNAME`) and kernel-id. 2. The service/container will have 3 labels applied: "kernel_id=", "component=kernel", and "app=enterprise-gateway" - similar to Kubernetes. 3. The service/container will be launched within the same docker network as Enterprise Gateway. ## DockerSwarmProcessProxy -To indicate that a given kernel should be launched as a Docker Swarm service into a swarm cluster, the kernel.json file's `metadata` stanza must include a `process_proxy` stanza indicating a `class_name:` of `DockerSwarmProcessProxy`. This ensures the appropriate lifecycle management will take place relative to a Docker Swarm environment. +To indicate that a given kernel should be launched as a Docker Swarm service into a swarm cluster, the kernel.json file's `metadata` stanza must include a `process_proxy` stanza indicating a `class_name:` of `DockerSwarmProcessProxy`. This ensures the appropriate lifecycle management will take place relative to a Docker Swarm environment. -Along with the `class_name:` entry, this process proxy stanza should also include a proxy configuration stanza which specifies the docker image to associate with the kernel's service container. If this entry is not provided, the Enterprise Gateway implementation will use a default entry of `elyra/kernel-py:VERSION`. In either case, this value is made available to the rest of the parameters used to launch the kernel by way of an environment variable: `KERNEL_IMAGE`. +Along with the `class_name:` entry, this process proxy stanza should also include a proxy configuration stanza which specifies the docker image to associate with the kernel's service container. If this entry is not provided, the Enterprise Gateway implementation will use a default entry of `elyra/kernel-py:VERSION`. In either case, this value is made available to the rest of the parameters used to launch the kernel by way of an environment variable: `KERNEL_IMAGE`. ```{note} _The use of `VERSION` in docker image tags is a placeholder for the appropriate version-related image tag. When kernelspecs are built via the Enterprise Gateway Makefile, `VERSION` is replaced with the appropriate version denoting the target release. A full list of available image tags can be found in the dockerhub repository corresponding to each image._ @@ -85,14 +88,15 @@ _The use of `VERSION` in docker image tags is a placeholder for the appropriate } } ``` -As always, kernels are launched by virtue of the `argv:` stanza in their respective kernel.json files. However, when launching kernels in a docker environment, what gets invoked isn't the kernel's launcher, but, instead, a python script that is responsible for using the [Docker Python API](https://docker-py.readthedocs.io/en/stable/) to create the corresponding instance. + +As always, kernels are launched by virtue of the `argv:` stanza in their respective kernel.json files. However, when launching kernels in a docker environment, what gets invoked isn't the kernel's launcher, but, instead, a python script that is responsible for using the [Docker Python API](https://docker-py.readthedocs.io/en/stable/) to create the corresponding instance. ```json { "argv": [ "python", "/usr/local/share/jupyter/kernels/python_docker/scripts/launch_docker.py", - "--RemoteProcessProxy.kernel-id", + "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", @@ -104,7 +108,7 @@ As always, kernels are launched by virtue of the `argv:` stanza in their respect ## DockerProcessProxy -Running containers in Docker Swarm versus traditional Docker are different enough to warrant having separate process proxy implementations. As a result, the kernel.json file could reference the `DockerProcessProxy` class and, accordingly, a traditional docker container (as opposed to a swarm _service_) will be created. The rest of the kernel.json file, image name, argv stanza, etc. is identical. +Running containers in Docker Swarm versus traditional Docker are different enough to warrant having separate process proxy implementations. As a result, the kernel.json file could reference the `DockerProcessProxy` class and, accordingly, a traditional docker container (as opposed to a swarm _service_) will be created. The rest of the kernel.json file, image name, argv stanza, etc. is identical. ```json { @@ -119,7 +123,7 @@ Running containers in Docker Swarm versus traditional Docker are different enoug "argv": [ "python", "/usr/local/share/jupyter/kernels/python_docker/scripts/launch_docker.py", - "--RemoteProcessProxy.kernel-id", + "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", @@ -131,4 +135,4 @@ Running containers in Docker Swarm versus traditional Docker are different enoug Upon invocation, the invoked process proxy will set a "docker mode" environment variable (`EG_DOCKER_MODE`) to either `swarm` or `docker`, depending on the process proxy instance, that the `launch_docker.py` script uses to determine whether a _service_ or _container_ should be created, respectively. -It should be noted that each of these forms of process proxy usage does **NOT** need to match to the way in which the Enterprise Gateway instance was deployed. For example, if Enterprise Gateway was deployed as a Docker Swarm service and a `DockerProcessProxy` is used, that corresponding kernel will be launched as a traditional docker container and will reside on the same host as wherever the Enterprise Gateway (swarm) service is running. Similarly, if Enterprise Gateway was deployed using standard Docker container and a `DockerSwarmProcessProxy` is used (and assuming a swarm configuration is present), that corresponding kernel will be launched as a docker swarm service and will reside on whatever host the Docker Swarm scheduler decides is best. That is, the kernel container's lifecycle will be managed by the corresponding process proxy and the Enterprise Gateway's deployment has no bearing. +It should be noted that each of these forms of process proxy usage does **NOT** need to match to the way in which the Enterprise Gateway instance was deployed. For example, if Enterprise Gateway was deployed as a Docker Swarm service and a `DockerProcessProxy` is used, that corresponding kernel will be launched as a traditional docker container and will reside on the same host as wherever the Enterprise Gateway (swarm) service is running. Similarly, if Enterprise Gateway was deployed using standard Docker container and a `DockerSwarmProcessProxy` is used (and assuming a swarm configuration is present), that corresponding kernel will be launched as a docker swarm service and will reside on whatever host the Docker Swarm scheduler decides is best. That is, the kernel container's lifecycle will be managed by the corresponding process proxy and the Enterprise Gateway's deployment has no bearing. diff --git a/docs/source/operators/deploy-kubernetes.md b/docs/source/operators/deploy-kubernetes.md index 1d9c06092..491a430c4 100644 --- a/docs/source/operators/deploy-kubernetes.md +++ b/docs/source/operators/deploy-kubernetes.md @@ -2,26 +2,26 @@ This section describes how to deploy Enterprise Gateway into an existing Kubernetes cluster. -In this solution, Enterprise Gateway is, itself, provisioned as a Kubernetes _deployment_ and exposed as a Kubernetes _service_. In this way, Enterprise Gateway can leverage load balancing and high availability functionality provided by Kubernetes (although HA cannot be fully realized until Enterprise Gateway supports persistent sessions). +In this solution, Enterprise Gateway is, itself, provisioned as a Kubernetes _deployment_ and exposed as a Kubernetes _service_. In this way, Enterprise Gateway can leverage load balancing and high availability functionality provided by Kubernetes (although HA cannot be fully realized until Enterprise Gateway supports persistent sessions). The following sample kernel specifications apply to Kubernetes deployments: -+ R_kubernetes -+ python_kubernetes -+ python_tf_gpu_kubernetes -+ python_tf_kubernetes -+ scala_kubernetes -+ spark_R_kubernetes -+ spark_python_kubernetes -+ spark_scala_kubernetes +- R_kubernetes +- python_kubernetes +- python_tf_gpu_kubernetes +- python_tf_kubernetes +- scala_kubernetes +- spark_R_kubernetes +- spark_python_kubernetes +- spark_scala_kubernetes -As with all Kubernetes deployments, Enterprise Gateway is built into a docker image. The base Enterprise Gateway image is [elyra/enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) and can be found in the Enterprise Gateway dockerhub organization [elyra](https://hub.docker.com/r/elyra/), along with other kubernetes-based images. See [Docker Images](../contributors/docker.md) for image details. +As with all Kubernetes deployments, Enterprise Gateway is built into a docker image. The base Enterprise Gateway image is [elyra/enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) and can be found in the Enterprise Gateway dockerhub organization [elyra](https://hub.docker.com/r/elyra/), along with other kubernetes-based images. See [Docker Images](../contributors/docker.md) for image details. When deployed within a [spark-on-kubernetes](https://spark.apache.org/docs/latest/running-on-kubernetes.html) cluster, Enterprise Gateway can easily support cluster-managed kernels distributed across the cluster. Enterprise Gateway will also provide standalone (i.e., _vanilla_) kernel invocation (where spark contexts are not automatically created) which also benefits from their distribution across the cluster. ## Deploying Enterprise Gateway on Kubernetes -Once the Kubernetes cluster is configured and `kubectl` is demonstrated to be working on the primary node, it is time to deploy Enterprise Gateway. There are a couple of different deployment options - using helm charts or kubectl templates. +Once the Kubernetes cluster is configured and `kubectl` is demonstrated to be working on the primary node, it is time to deploy Enterprise Gateway. There are a couple of different deployment options - using helm charts or kubectl templates. ### Deploying with helm charts @@ -42,55 +42,58 @@ From anywhere with helm cluster access, create the service and deployment by run ```bash helm upgrade --install --namespace enterprise-gateway enterprise-gateway etc/kubernetes/helm/enterprise-gateway ``` + the helm chart tarball is also accessible as an asset on our [release](https://github.com/jupyter-server/enterprise_gateway/releases) page: + ```bash helm install --namespace enterprise-gateway enterprise-gateway https://github.com/jupyter-server/enterprise_gateway/releases/download/v2.6.0/jupyter_enterprise_gateway_helm-2.6.0.tgz ``` + #### Configuration + Here are the values that you can set when deploying the helm chart. You can override them with helm's `--set` or `--values` options. -| **Parameter** | **Description** | **Default** | -| ------------- | --------------- | ----------- | -| `image` | Enterprise Gateway image name and tag to use. Ensure the tag is updated to the version of Enterprise Gateway you wish to run. | `elyra/enterprise-gateway:VERSION`, where `VERSION` is the release being used | -| `imagePullPolicy` | Enterprise Gateway image pull policy. Use `IfNotPresent` policy so that dev-based systems don't automatically update. This provides more control. Since formal tags will be release-specific this policy should be sufficient for them as well. | `IfNotPresent` | -| `port` | The primary port on which Enterprise Gateway is servicing requests. | `8888` | -| `response_port` | The port on which Enterprise Gateway will receive kernel connection info responses. | `8877` | -| `replicas` | Update to deploy multiple replicas of EG. | `1` | -| `logLevel` | Log output level. | `DEBUG` | -| `mirrorWorkingDirs` | Whether to mirror working directories. NOTE: This requires appropriate volume mounts to make notebook dir accessible. | `false` | -| `k8sMasterPublicIP` | Master public IP on which to expose EG. | `nil` | -| `authToken` | Optional authorization token passed in all requests (see --EnterpriseGatewayApp.auth_token) | `nil` | -| `kernel.clusterRole` | Kernel cluster role created by this chart. Used if no KERNEL_NAMESPACE is provided by client. | `kernel-controller` | -| `kernel.shareGatewayNamespace` | Will start kernels in the same namespace as EG if True. | `false` | -| `kernel.launchTimeout` | Timeout for kernel launching in seconds. | `60` | -| `kernel.cullIdleTimeout` | Idle timeout in seconds. Default is 1 hour. | `3600` | -| `kernel.whitelist` | List of kernel names that are available for use. | `{r_kubernetes,...}` (see `values.yaml`) | -| `kernel.defaultKernelName` | Default kernel name should be something from the whitelist | `python-kubernetes` | -| `kernelspecs.image` | Optional custom data image containing kernelspecs to use. Cannot be used with NFS enabled. | `nil` | -| `kernelspecs.imagePullPolicy` | Kernelspecs image pull policy. | `Always` | -| `nfs.enabled` | Whether NFS-mounted kernelspecs are enabled. Cannot be used with `kernelspecs.image` set. | `false` | -| `nfs.internalServerIPAddress` | IP address of NFS server. Required if NFS is enabled. | `nil` | -| `nfs.internalServerIPAddress` | IP address of NFS server. Required if NFS is enabled. | `nil` | -| `kernelspecsPvc.enabled` | Use a persistent volume claim to store kernelspecs in a persistent volume | `false` | -| `kernelspecsPvc.name` | PVC name. Required if want mount kernelspecs without nfs. PVC should create in the same namespace before EG deployed. | `nil` | -| `ingress.enabled` | Whether to include an EG ingress resource during deployment.| `false` | -| `ingress.traefik.enabled` | Configure the ingress using Traefik as the controller. NOTE: A traefik controller must be installed and `ingress.enabled` must be `true`. | `true` | -| `ingress.traefik.annotations` | Traefik-relative ingress annotations to be included when `ingress.traefik.enabled` is `true`. | `(traefik-ingress annotations)` | -| `ingress.traefik.path` | URL context to be used in addition to the hostname to access Enterprise Gateway when `ingress.traefik.enabled` is `true`. | `/gateway` | -| `ingress.nginx.enabled` | Configure the ingress using Nginx as the controller. NOTE: A nginx controller must be installed and `ingress.enabled` must be `true`. | `false` | -| `ingress.nginx.annotations` | Nginx-relative ingress annotations to be included when `ingress.nginx.enabled` is `true`. | `(nginx-ingress annotations)` | -| `ingress.nginx.path` | URL context to be used in addition to the hostname to access Enterprise Gateway when `ingress.nginx.enabled` is `true`. | `/gateway/?(.*)` | -| `ingress.hostName` | Ingress resource host | `nil` | -| `ingress.port` | The port where enterprise gateway service is running | `8888` | -| `kip.enabled` | Whether the Kernel Image Puller should be used | `true` | -| `kip.image` | Kernel Image Puller image name and tag to use. Ensure the tag is updated to the version of the Enterprise Gateway release you wish to run. | `elyra/kernel-image-puller:VERSION`, where `VERSION` is the release being used | -| `kip.imagePullPolicy` | Kernel Image Puller image pull policy. Use `IfNotPresent` policy so that dev-based systems don't automatically update. This provides more control. Since formal tags will be release-specific this policy should be sufficient for them as well. | `IfNotPresent` | -| `kip.interval` | The interval (in seconds) at which the Kernel Image Puller fetches kernelspecs to pull kernel images. | `300` | -| `kip.pullPolicy` | Determines whether the Kernel Image Puller will pull kernel images it has previously pulled (`Always`) or only those it hasn't yet pulled (`IfNotPresent`) | `IfNotPresent` | -| `kip.criSocket` | The container runtime interface socket, use `/run/containerd/containerd.sock` for containerd installations | `/var/run/docker.sock` -| `kip.defaultContainerRegistry` | Prefix to use if a registry is not already specified on image name (e.g., quay.io/elyra/kernel-py:2.6.0) | `docker.io` - +| **Parameter** | **Description** | **Default** | +| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `image` | Enterprise Gateway image name and tag to use. Ensure the tag is updated to the version of Enterprise Gateway you wish to run. | `elyra/enterprise-gateway:VERSION`, where `VERSION` is the release being used | +| `imagePullPolicy` | Enterprise Gateway image pull policy. Use `IfNotPresent` policy so that dev-based systems don't automatically update. This provides more control. Since formal tags will be release-specific this policy should be sufficient for them as well. | `IfNotPresent` | +| `port` | The primary port on which Enterprise Gateway is servicing requests. | `8888` | +| `response_port` | The port on which Enterprise Gateway will receive kernel connection info responses. | `8877` | +| `replicas` | Update to deploy multiple replicas of EG. | `1` | +| `logLevel` | Log output level. | `DEBUG` | +| `mirrorWorkingDirs` | Whether to mirror working directories. NOTE: This requires appropriate volume mounts to make notebook dir accessible. | `false` | +| `k8sMasterPublicIP` | Master public IP on which to expose EG. | `nil` | +| `authToken` | Optional authorization token passed in all requests (see --EnterpriseGatewayApp.auth_token) | `nil` | +| `kernel.clusterRole` | Kernel cluster role created by this chart. Used if no KERNEL_NAMESPACE is provided by client. | `kernel-controller` | +| `kernel.shareGatewayNamespace` | Will start kernels in the same namespace as EG if True. | `false` | +| `kernel.launchTimeout` | Timeout for kernel launching in seconds. | `60` | +| `kernel.cullIdleTimeout` | Idle timeout in seconds. Default is 1 hour. | `3600` | +| `kernel.whitelist` | List of kernel names that are available for use. | `{r_kubernetes,...}` (see `values.yaml`) | +| `kernel.defaultKernelName` | Default kernel name should be something from the whitelist | `python-kubernetes` | +| `kernelspecs.image` | Optional custom data image containing kernelspecs to use. Cannot be used with NFS enabled. | `nil` | +| `kernelspecs.imagePullPolicy` | Kernelspecs image pull policy. | `Always` | +| `nfs.enabled` | Whether NFS-mounted kernelspecs are enabled. Cannot be used with `kernelspecs.image` set. | `false` | +| `nfs.internalServerIPAddress` | IP address of NFS server. Required if NFS is enabled. | `nil` | +| `nfs.internalServerIPAddress` | IP address of NFS server. Required if NFS is enabled. | `nil` | +| `kernelspecsPvc.enabled` | Use a persistent volume claim to store kernelspecs in a persistent volume | `false` | +| `kernelspecsPvc.name` | PVC name. Required if want mount kernelspecs without nfs. PVC should create in the same namespace before EG deployed. | `nil` | +| `ingress.enabled` | Whether to include an EG ingress resource during deployment. | `false` | +| `ingress.traefik.enabled` | Configure the ingress using Traefik as the controller. NOTE: A traefik controller must be installed and `ingress.enabled` must be `true`. | `true` | +| `ingress.traefik.annotations` | Traefik-relative ingress annotations to be included when `ingress.traefik.enabled` is `true`. | `(traefik-ingress annotations)` | +| `ingress.traefik.path` | URL context to be used in addition to the hostname to access Enterprise Gateway when `ingress.traefik.enabled` is `true`. | `/gateway` | +| `ingress.nginx.enabled` | Configure the ingress using Nginx as the controller. NOTE: A nginx controller must be installed and `ingress.enabled` must be `true`. | `false` | +| `ingress.nginx.annotations` | Nginx-relative ingress annotations to be included when `ingress.nginx.enabled` is `true`. | `(nginx-ingress annotations)` | +| `ingress.nginx.path` | URL context to be used in addition to the hostname to access Enterprise Gateway when `ingress.nginx.enabled` is `true`. | `/gateway/?(.*)` | +| `ingress.hostName` | Ingress resource host | `nil` | +| `ingress.port` | The port where enterprise gateway service is running | `8888` | +| `kip.enabled` | Whether the Kernel Image Puller should be used | `true` | +| `kip.image` | Kernel Image Puller image name and tag to use. Ensure the tag is updated to the version of the Enterprise Gateway release you wish to run. | `elyra/kernel-image-puller:VERSION`, where `VERSION` is the release being used | +| `kip.imagePullPolicy` | Kernel Image Puller image pull policy. Use `IfNotPresent` policy so that dev-based systems don't automatically update. This provides more control. Since formal tags will be release-specific this policy should be sufficient for them as well. | `IfNotPresent` | +| `kip.interval` | The interval (in seconds) at which the Kernel Image Puller fetches kernelspecs to pull kernel images. | `300` | +| `kip.pullPolicy` | Determines whether the Kernel Image Puller will pull kernel images it has previously pulled (`Always`) or only those it hasn't yet pulled (`IfNotPresent`) | `IfNotPresent` | +| `kip.criSocket` | The container runtime interface socket, use `/run/containerd/containerd.sock` for containerd installations | `/var/run/docker.sock` | +| `kip.defaultContainerRegistry` | Prefix to use if a registry is not already specified on image name (e.g., quay.io/elyra/kernel-py:2.6.0) | `docker.io` | #### Uninstalling Enterprise Gateway @@ -99,11 +102,13 @@ When using helm, you can uninstall Enterprise Gateway with the following command ```bash helm delete enterprise-gateway --namespace enterprise-gateway ``` + ### Deploying with kubectl templates Choose this deployment option if you want to deploy directly from Kubernetes template files with kubectl, rather than using a package manager like helm. #### Create the Enterprise Gateway kubernetes service and deployment + From the master node, create the service and deployment using the yaml file from a source release or the git repository: ```bash @@ -116,24 +121,30 @@ deployment "enterprise-gateway" created #### Uninstalling Enterprise Gateway To shut down Enterprise Gateway issue a delete command using the previously mentioned global label `app=enterprise-gateway` + ```bash kubectl delete all -l app=enterprise-gateway ``` + or simply delete the namespace + ```bash kubectl delete ns enterprise-gateway ``` Note that deleting the Enterprise Gateway namespace will not delete cluster-scoped resources like the cluster roles `enterprise-gateway-controller` and `kernel-controller` or the cluster role binding `enterprise-gateway-controller`. The following commands can be used to delete these: + ```bash kubectl delete clusterrole -l app=enterprise-gateway kubectl delete clusterrolebinding -l app=enterprise-gateway ``` ## Enterprise Gateway Deployment Details -Enterprise Gateway manifests itself as a Kubernetes deployment, exposed externally by a Kubernetes service. By default, it is identified by the name `enterprise-gateway` within the cluster. In addition, all objects related to Enterprise Gateway, including kernel instances, have the kubernetes label of `app=enterprise-gateway` applied. -The service is currently configured as type `NodePort` but is intended for type `LoadBalancer` when appropriate network plugins are available. Because kernels are stateful, the service is also configured with a `sessionAffinity` of `ClientIP`. As a result, kernel creation requests will be routed to different deployment instances (see deployment) thereby diminishing the need for a `LoadBalancer` type. Here's the service yaml entry from [enterprise-gateway.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/enterprise-gateway.yaml) (for helm, see [service.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/helm/enterprise-gateway/templates/service.yaml)): +Enterprise Gateway manifests itself as a Kubernetes deployment, exposed externally by a Kubernetes service. By default, it is identified by the name `enterprise-gateway` within the cluster. In addition, all objects related to Enterprise Gateway, including kernel instances, have the kubernetes label of `app=enterprise-gateway` applied. + +The service is currently configured as type `NodePort` but is intended for type `LoadBalancer` when appropriate network plugins are available. Because kernels are stateful, the service is also configured with a `sessionAffinity` of `ClientIP`. As a result, kernel creation requests will be routed to different deployment instances (see deployment) thereby diminishing the need for a `LoadBalancer` type. Here's the service yaml entry from [enterprise-gateway.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/enterprise-gateway.yaml) (for helm, see [service.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/helm/enterprise-gateway/templates/service.yaml)): + ```yaml apiVersion: v1 kind: Service @@ -144,18 +155,20 @@ metadata: namespace: enterprise-gateway spec: ports: - - name: http - port: 8888 - targetPort: 8888 - - name: response - port: 8877 - targetPort: 8877 + - name: http + port: 8888 + targetPort: 8888 + - name: response + port: 8877 + targetPort: 8877 selector: gateway-selector: enterprise-gateway sessionAffinity: ClientIP type: NodePort ``` -The deployment yaml essentially houses the pod description. By increasing the number of `replicas` a configuration can experience instant benefits of distributing Enterprise Gateway instances across the cluster. This implies that once session persistence is finalized, we should be able to provide highly available (HA) kernels. Here's the yaml portion from [enterprise-gateway.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/enterprise-gateway.yaml) that defines the Kubernetes deployment and pod (for helm, see [deployement.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/helm/enterprise-gateway/templates/deployment.yaml): + +The deployment yaml essentially houses the pod description. By increasing the number of `replicas` a configuration can experience instant benefits of distributing Enterprise Gateway instances across the cluster. This implies that once session persistence is finalized, we should be able to provide highly available (HA) kernels. Here's the yaml portion from [enterprise-gateway.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/enterprise-gateway.yaml) that defines the Kubernetes deployment and pod (for helm, see [deployement.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/helm/enterprise-gateway/templates/deployment.yaml): + ```yaml apiVersion: apps/v1beta2 kind: Deployment @@ -167,8 +180,8 @@ metadata: app: enterprise-gateway component: enterprise-gateway spec: -# Uncomment/Update to deploy multiple replicas of EG -# replicas: 1 + # Uncomment/Update to deploy multiple replicas of EG + # replicas: 1 selector: matchLabels: gateway-selector: enterprise-gateway @@ -182,66 +195,73 @@ spec: # Created above. serviceAccountName: enterprise-gateway-sa containers: - - env: - - name: EG_PORT - value: "8888" - - - name: EG_RESPONSE_PORT - value: "8877" - - # Created above. - - name: EG_NAMESPACE - value: "enterprise-gateway" - - # Created above. Used if no KERNEL_NAMESPACE is provided by client. - - name: EG_KERNEL_CLUSTER_ROLE - value: "kernel-controller" - - # All kernels reside in the EG namespace if True, otherwise KERNEL_NAMESPACE - # must be provided or one will be created for each kernel. - - name: EG_SHARED_NAMESPACE - value: "False" - - # NOTE: This requires appropriate volume mounts to make notebook dir accessible - - name: EG_MIRROR_WORKING_DIRS - value: "False" - - # Current idle timeout is 1 hour. - - name: EG_CULL_IDLE_TIMEOUT - value: "3600" - - - name: EG_LOG_LEVEL - value: "DEBUG" - - - name: EG_KERNEL_LAUNCH_TIMEOUT - value: "60" - - - name: EG_KERNEL_WHITELIST - value: "'r_kubernetes','python_kubernetes','python_tf_kubernetes','python_tf_gpu_kubernetes','scala_kubernetes','spark_r_kubernetes','spark_python_kubernetes','spark_scala_kubernetes','spark_python_operator'" - - - name: EG_DEFAULT_KERNEL_NAME - value: "python_kubernetes" - - # Optional authorization token passed in all requests (see --EnterpriseGatewayApp.auth_token) - #- name: EG_AUTH_TOKEN - # value: - - # Ensure the following VERSION tag is updated to the version of Enterprise Gateway you wish to run - image: elyra/enterprise-gateway:VERSION - # k8s will only pull :latest all the time. - # the following line will make sure that :VERSION is always pulled - # You should remove this if you want to pin EG to a release tag - imagePullPolicy: Always - name: enterprise-gateway - args: ["--gateway"] - ports: - - containerPort: 8888 - - containerPort: 8877 + - env: + - name: EG_PORT + value: "8888" + + - name: EG_RESPONSE_PORT + value: + "8877" + + # Created above. + - name: EG_NAMESPACE + value: + "enterprise-gateway" + + # Created above. Used if no KERNEL_NAMESPACE is provided by client. + - name: EG_KERNEL_CLUSTER_ROLE + value: + "kernel-controller" + + # All kernels reside in the EG namespace if True, otherwise KERNEL_NAMESPACE + # must be provided or one will be created for each kernel. + - name: EG_SHARED_NAMESPACE + value: + "False" + + # NOTE: This requires appropriate volume mounts to make notebook dir accessible + - name: EG_MIRROR_WORKING_DIRS + value: + "False" + + # Current idle timeout is 1 hour. + - name: EG_CULL_IDLE_TIMEOUT + value: "3600" + + - name: EG_LOG_LEVEL + value: "DEBUG" + + - name: EG_KERNEL_LAUNCH_TIMEOUT + value: "60" + + - name: EG_KERNEL_WHITELIST + value: "'r_kubernetes','python_kubernetes','python_tf_kubernetes','python_tf_gpu_kubernetes','scala_kubernetes','spark_r_kubernetes','spark_python_kubernetes','spark_scala_kubernetes','spark_python_operator'" + + - name: EG_DEFAULT_KERNEL_NAME + value: "python_kubernetes" + + # Optional authorization token passed in all requests (see --EnterpriseGatewayApp.auth_token) + #- name: EG_AUTH_TOKEN + # value: + + # Ensure the following VERSION tag is updated to the version of Enterprise Gateway you wish to run + image: elyra/enterprise-gateway:VERSION + # k8s will only pull :latest all the time. + # the following line will make sure that :VERSION is always pulled + # You should remove this if you want to pin EG to a release tag + imagePullPolicy: Always + name: enterprise-gateway + args: ["--gateway"] + ports: + - containerPort: 8888 + - containerPort: 8877 ``` + ### Namespaces -A best practice for Kubernetes applications running in an enterprise is to isolate applications via namespaces. Since Enterprise Gateway also requires isolation at the kernel level, it makes sense to use a namespace for each kernel, by default. -The initial namespace is created in the `enterprise-gateway.yaml` file using a default name of `enterprise-gateway`. This name is communicated to Enterprise Gateway via the env variable `EG_NAMESPACE`. All Enterprise Gateway components reside in this namespace. +A best practice for Kubernetes applications running in an enterprise is to isolate applications via namespaces. Since Enterprise Gateway also requires isolation at the kernel level, it makes sense to use a namespace for each kernel, by default. + +The initial namespace is created in the `enterprise-gateway.yaml` file using a default name of `enterprise-gateway`. This name is communicated to Enterprise Gateway via the env variable `EG_NAMESPACE`. All Enterprise Gateway components reside in this namespace. ```yaml apiVersion: apps/v1beta2 @@ -251,9 +271,9 @@ metadata: namespace: enterprise-gateway ``` -By default, kernel namespaces are created when the respective kernel is launched. At that time, the kernel namespace name is computed from the kernel username (`KERNEL_USERNAME`) and its kernel ID (`KERNEL_ID`) just like the kernel pod name. Upon a kernel's termination, this namespace - provided it was created by Enterprise Gateway - will be deleted. +By default, kernel namespaces are created when the respective kernel is launched. At that time, the kernel namespace name is computed from the kernel username (`KERNEL_USERNAME`) and its kernel ID (`KERNEL_ID`) just like the kernel pod name. Upon a kernel's termination, this namespace - provided it was created by Enterprise Gateway - will be deleted. -Installations wishing to pre-create the kernel namespace can do so by conveying the name of the kernel namespace via `KERNEL_NAMESPACE` in the `env` portion of the kernel creation request. (They must also provide the namespace's service account name via `KERNEL_SERVICE_ACCOUNT_NAME` - see next section.) When `KERNEL_NAMESPACE` is set, Enterprise Gateway will not attempt to create a kernel-specific namespace, nor will it attempt its deletion. As a result, kernel namespace lifecycle management is the user's responsibility. +Installations wishing to pre-create the kernel namespace can do so by conveying the name of the kernel namespace via `KERNEL_NAMESPACE` in the `env` portion of the kernel creation request. (They must also provide the namespace's service account name via `KERNEL_SERVICE_ACCOUNT_NAME` - see next section.) When `KERNEL_NAMESPACE` is set, Enterprise Gateway will not attempt to create a kernel-specific namespace, nor will it attempt its deletion. As a result, kernel namespace lifecycle management is the user's responsibility. ```{tip} If you need to associate resources to users, one suggestion is to create a namespace per user and set `KERNEL_NAMESPACE = KERNEL_USERNAME` on the client (see [Kernel Environment Variables](../users/kernel-envs.md)). @@ -262,9 +282,11 @@ If you need to associate resources to users, one suggestion is to create a names Although **not recommended**, installations requiring everything in the same namespace - Enterprise Gateway and all its kernels - can do so by setting env `EG_SHARED_NAMESPACE` to `True` (or by setting the helm chart value `kernel.shareGatewayNamespace` to `true`). When set, all kernels will run in the Enterprise Gateway namespace, essentially eliminating all aspects of isolation between kernel instances (and resources). ### Role-Based Access Control (RBAC) -Another best practice of Kubernetes applications is to define the minimally viable set of permissions for the application. Enterprise Gateway does this by defining role-based access control (RBAC) objects for both Enterprise Gateway and kernels. -Because the Enterprise Gateway pod must create kernel namespaces, pods, services (for Spark support) and role bindings, a cluster-scoped role binding is required. The cluster role binding `enterprise-gateway-controller` also references the subject, `enterprise-gateway-sa`, which is the service account associated with the Enterprise Gateway namespace and also created by the yaml file or [`clusterrolebinding.yaml`](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/helm/enterprise-gateway/templates/clusterrolebinding.yaml)) helm chart. +Another best practice of Kubernetes applications is to define the minimally viable set of permissions for the application. Enterprise Gateway does this by defining role-based access control (RBAC) objects for both Enterprise Gateway and kernels. + +Because the Enterprise Gateway pod must create kernel namespaces, pods, services (for Spark support) and role bindings, a cluster-scoped role binding is required. The cluster role binding `enterprise-gateway-controller` also references the subject, `enterprise-gateway-sa`, which is the service account associated with the Enterprise Gateway namespace and also created by the yaml file or [`clusterrolebinding.yaml`](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/helm/enterprise-gateway/templates/clusterrolebinding.yaml)) helm chart. + ```yaml apiVersion: v1 kind: ServiceAccount @@ -284,7 +306,16 @@ metadata: component: enterprise-gateway rules: - apiGroups: [""] - resources: ["pods", "namespaces", "services", "configmaps", "secrets", "persistentvolumnes", "persistentvolumeclaims"] + resources: + [ + "pods", + "namespaces", + "services", + "configmaps", + "secrets", + "persistentvolumnes", + "persistentvolumeclaims", + ] verbs: ["get", "watch", "list", "create", "delete"] - apiGroups: ["rbac.authorization.k8s.io"] resources: ["rolebindings"] @@ -307,7 +338,7 @@ roleRef: apiGroup: rbac.authorization.k8s.io ``` -The `enterprise-gateway.yaml` file and [`clusterrole.yaml`](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/helm/enterprise-gateway/templates/clusterrole.yaml) helm chart also define the minimally viable roles for a kernel pod - most of which are required for Spark support. Since kernels, by default, reside within their own namespace created upon their launch, a cluster role is used within a namespace-scoped role binding created when the kernel's namespace is created. The name of the kernel cluster role is `kernel-controller` and, when Enterprise Gateway creates the namespace and role binding, is also the name of the role binding instance. +The `enterprise-gateway.yaml` file and [`clusterrole.yaml`](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/helm/enterprise-gateway/templates/clusterrole.yaml) helm chart also define the minimally viable roles for a kernel pod - most of which are required for Spark support. Since kernels, by default, reside within their own namespace created upon their launch, a cluster role is used within a namespace-scoped role binding created when the kernel's namespace is created. The name of the kernel cluster role is `kernel-controller` and, when Enterprise Gateway creates the namespace and role binding, is also the name of the role binding instance. ```yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -324,10 +355,13 @@ rules: ``` #### Kernel Service Account Name -As noted above, installations wishing to pre-create their own kernel namespaces should provide the name of the service account associated with the namespace via `KERNEL_SERVICE_ACCOUNT_NAME` in the `env` portion of the kernel creation request (along with `KERNEL_NAMESPACE`). If not provided, the built-in namespace service account, `default`, will be referenced. In such circumstances, Enterprise Gateway will **not** create a role binding on the name for the service account, so it is the user's responsibility to ensure that the service account has the capability to perform equivalent operations as defined by the `kernel-controller` role. + +As noted above, installations wishing to pre-create their own kernel namespaces should provide the name of the service account associated with the namespace via `KERNEL_SERVICE_ACCOUNT_NAME` in the `env` portion of the kernel creation request (along with `KERNEL_NAMESPACE`). If not provided, the built-in namespace service account, `default`, will be referenced. In such circumstances, Enterprise Gateway will **not** create a role binding on the name for the service account, so it is the user's responsibility to ensure that the service account has the capability to perform equivalent operations as defined by the `kernel-controller` role. #### Example Custom Namespace + Here's an example of the creation of a custom namespace (`kernel-ns`) with its own service account (`kernel-sa`) and role binding (`kernel-controller`) that references the cluster-scoped role (`kernel-controller`) and includes appropriate labels to help with administration and analysis: + ```yaml apiVersion: v1 kind: Namespace @@ -363,20 +397,25 @@ roleRef: name: kernel-controller apiGroup: rbac.authorization.k8s.io ``` + ### Kernel Image Puller -Because kernels now reside within containers, and it's typical for the first reference of a container to trigger its pull from a container repository (e.g., docker.io or quay.io), kernel startup requests can easily time out whenever the kernel image is first accessed on any given node. To mitigate this issue, Enterprise Gateway deployment includes a DaemonSet object named `kernel-image-puller` or KIP. This object is responsible for polling Enterprise Gateway for the current set of configured kernelspecs, picking out any configured image name references, and pulling those images to the node on which KIP is running. Because it's a daemon set, this will also address the case when new nodes are added to a configuration (although spinning up new nodes on a kernel start request will likely time out anyway). + +Because kernels now reside within containers, and it's typical for the first reference of a container to trigger its pull from a container repository (e.g., docker.io or quay.io), kernel startup requests can easily time out whenever the kernel image is first accessed on any given node. To mitigate this issue, Enterprise Gateway deployment includes a DaemonSet object named `kernel-image-puller` or KIP. This object is responsible for polling Enterprise Gateway for the current set of configured kernelspecs, picking out any configured image name references, and pulling those images to the node on which KIP is running. Because it's a daemon set, this will also address the case when new nodes are added to a configuration (although spinning up new nodes on a kernel start request will likely time out anyway). #### KIP Configuration + The Kernel Image Puller can be configured for the interval at which it checks for new kernelspecs (`KIP_INTERVAL`), the number of puller threads it will utilize per node (`KIP_NUM_PULLERS`), the number of retries it will attempt for a given image (`KIP_NUM_RETRIES`), and the pull policy (`KIP_PULL_POLICY`) - which essentially dictates whether it will attempt to pull images that its already encountered (`Always`) vs. only pulling the image if it hasn't seen it yet (`IfNotPresent`). If the Enterprise Gateway defines an authentication token (`EG_AUTH_TOKEN`) then that same token should be configured here as (`KIP_AUTH_TOKEN`) so that the puller can correctly authenticate its requests. #### KIP Container Runtime -The Kernel Image Puller also supports multiple container runtimes since Docker is no longer configured by default in Kubernetes. KIP currently supports Docker and Containerd runtimes. If another runtime is encountered, KIP will try to proceed using the Containerd client `crictl` against the configured socket. As a result, it is import that the `criSocket` value be appropriately configured relative to the container runtime. If the runtime is something other than Docker or Containerd and `crictl` isn't able to pull images, it may be necessary to manually pre-seed images or incur kernel start timeouts the first time a given node is asked to start a kernel associated with a non-resident image. -KIP also supports the notion of a _default container registry_ whereby image names that do not specify a registry (e.g., `docker.io` or `quay.io`) KIP will apply the configured default. Ideally, the image name should be fully qualified. +The Kernel Image Puller also supports multiple container runtimes since Docker is no longer configured by default in Kubernetes. KIP currently supports Docker and Containerd runtimes. If another runtime is encountered, KIP will try to proceed using the Containerd client `crictl` against the configured socket. As a result, it is import that the `criSocket` value be appropriately configured relative to the container runtime. If the runtime is something other than Docker or Containerd and `crictl` isn't able to pull images, it may be necessary to manually pre-seed images or incur kernel start timeouts the first time a given node is asked to start a kernel associated with a non-resident image. + +KIP also supports the notion of a _default container registry_ whereby image names that do not specify a registry (e.g., `docker.io` or `quay.io`) KIP will apply the configured default. Ideally, the image name should be fully qualified. Here's what the Kernel Image Puller portion of the `enterprise-gateway.yaml` template looks like... + ```yaml apiVersion: apps/v1 kind: DaemonSet @@ -386,67 +425,68 @@ metadata: spec: selector: matchLabels: - name: kernel-image-puller + name: kernel-image-puller template: metadata: labels: - name: kernel-image-puller + name: kernel-image-puller app: enterprise-gateway component: kernel-image-puller spec: containers: - - name: kernel-image-puller - image: elyra/kernel-image-puller:dev - env: - - name: KIP_GATEWAY_HOST - value: "http://enterprise-gateway.enterprise-gateway:8888" - - name: KIP_INTERVAL - value: "300" - - name: KIP_PULL_POLICY - value: "IfNotPresent" - - name: KIP_CRI_ENDPOINT - value: "unix:///var/run/docker.sock" - # value: "unix:///run/containerd/containerd.sock" - - name: KIP_DEFAULT_CONTAINER_REGISTRY - value: "docker.io" - # value: "quay.io" - volumeMounts: - - name: cri-socket - mountPath: "/var/run/docker.sock" - # mountPath: "/run/containerd/containerd.sock" for containerd installations + - name: kernel-image-puller + image: elyra/kernel-image-puller:dev + env: + - name: KIP_GATEWAY_HOST + value: "http://enterprise-gateway.enterprise-gateway:8888" + - name: KIP_INTERVAL + value: "300" + - name: KIP_PULL_POLICY + value: "IfNotPresent" + - name: KIP_CRI_ENDPOINT + value: "unix:///var/run/docker.sock" + # value: "unix:///run/containerd/containerd.sock" + - name: KIP_DEFAULT_CONTAINER_REGISTRY + value: "docker.io" + # value: "quay.io" + volumeMounts: + - name: cri-socket + mountPath: "/var/run/docker.sock" + # mountPath: "/run/containerd/containerd.sock" for containerd installations volumes: - - name: cri-socket - hostPath: - path: /var/run/docker.sock - # path: "/run/containerd/containerd.sock" for containerd installations + - name: cri-socket + hostPath: + path: /var/run/docker.sock + # path: "/run/containerd/containerd.sock" for containerd installations ``` ### Kernelspec Modifications -One of the more common areas of customization we see occurs within the kernelspec files located in `/usr/local/share/jupyter/kernels`. To accommodate the ability to customize the kernel definitions, you have two different options: NFS mounts, or custom container images. The two options are mutually exclusive, because they mount kernelspecs into the same location in the Enterprise Gateway pod. +One of the more common areas of customization we see occurs within the kernelspec files located in `/usr/local/share/jupyter/kernels`. To accommodate the ability to customize the kernel definitions, you have two different options: NFS mounts, or custom container images. The two options are mutually exclusive, because they mount kernelspecs into the same location in the Enterprise Gateway pod. #### Via NFS The kernels directory can be mounted as an NFS volume into the Enterprise Gateway pod, thereby making the kernelspecs available to all EG pods within the Kubernetes cluster (provided the NFS mounts exist on all applicable nodes). -As an example, we have included the necessary entries for mounting an existing NFS mount point into the Enterprise Gateway pod. By default, these references are commented out as they require the operator to configure the appropriate NFS mounts and server IP. If you are deploying Enterprise Gateway via the helm chart, you can enable NFS directly via helm values. +As an example, we have included the necessary entries for mounting an existing NFS mount point into the Enterprise Gateway pod. By default, these references are commented out as they require the operator to configure the appropriate NFS mounts and server IP. If you are deploying Enterprise Gateway via the helm chart, you can enable NFS directly via helm values. Here you can see how `enterprise-gateway.yaml` references use of the volume (ia `volumeMounts` for the container specification and `volumes` in the pod specification (non-applicable entries have been omitted): + ```yaml - spec: - containers: -# Uncomment to enable NFS-mounted kernelspecs - volumeMounts: - - name: kernelspecs - mountPath: "/usr/local/share/jupyter/kernels" - volumes: +spec: + containers: + # Uncomment to enable NFS-mounted kernelspecs + volumeMounts: - name: kernelspecs - nfs: - server: - path: "/usr/local/share/jupyter/kernels" - + mountPath: "/usr/local/share/jupyter/kernels" + volumes: + - name: kernelspecs + nfs: + server: + path: "/usr/local/share/jupyter/kernels" ``` + ```{tip} Because the kernel pod definition file, [kernel-pod.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2), resides in the kernelspecs hierarchy, customizations to the deployments of future kernel instances can now also take place. In addition, these same entries can be added to the kernel-pod.yaml definitions if access to the same or other NFS mount points are desired within kernel pods. (We'll be looking at ways to make modifications to per-kernel configurations more manageable.) ``` @@ -455,7 +495,7 @@ Use of more formal persistent volume types must include the [Persistent Volume]( #### Via Custom Container Image -If you are deploying Enterprise Gateway via the helm chart, then instead of using NFS, you can build your custom kernelspecs into a container image that Enterprise Gateway consumes. Here's an example Dockerfile for such a container: +If you are deploying Enterprise Gateway via the helm chart, then instead of using NFS, you can build your custom kernelspecs into a container image that Enterprise Gateway consumes. Here's an example Dockerfile for such a container: ``` FROM alpine:3.9 @@ -465,22 +505,24 @@ COPY kernels /kernels This assumes that your source contains a `kernels/` directory with all of the kernelspecs you'd like to end up in the image, e.g. `kernels/python_kubernetes/kernel.json` and any associated files. -Once you build your custom kernelspecs image and push it to a container registry, you can refer to it from your helm deployment. For instance: +Once you build your custom kernelspecs image and push it to a container registry, you can refer to it from your helm deployment. For instance: ```bash helm upgrade --install --atomic --namespace enterprise-gateway enterprise-gateway etc/kubernetes/helm --set kernelspecs.image=your-custom-image:latest ``` -...where `your-custom-image:latest` is the image name and tag of your kernelspecs image. Once deployed, the helm chart copies the data from the `/kernels` directory of your container into the `/usr/local/share/jupyter/kernels` directory of the Enterprise Gateway pod. Note that when this happens, the built-in kernelspecs are no longer available. So include all kernelspecs that you want to be available in your container image. +...where `your-custom-image:latest` is the image name and tag of your kernelspecs image. Once deployed, the helm chart copies the data from the `/kernels` directory of your container into the `/usr/local/share/jupyter/kernels` directory of the Enterprise Gateway pod. Note that when this happens, the built-in kernelspecs are no longer available. So include all kernelspecs that you want to be available in your container image. Also, you should update the helm chart `kernel.whitelist` value with the name(s) of your custom kernelspecs. ## Kubernetes Kernel Instances + There are essentially two kinds of kernels (independent of language) launched within an Enterprise Gateway Kubernetes cluster - _vanilla_ and _spark-on-kubernetes_ (if available). -When _vanilla_ kernels are launched, Enterprise Gateway is responsible for creating the corresponding pod. On the other hand, _spark-on-kubernetes_ kernels are launched via `spark-submit` with a specific `master` URI - which then creates the corresponding pod(s) (including executor pods). Images can be launched using both forms provided they have the appropriate support for Spark installed. +When _vanilla_ kernels are launched, Enterprise Gateway is responsible for creating the corresponding pod. On the other hand, _spark-on-kubernetes_ kernels are launched via `spark-submit` with a specific `master` URI - which then creates the corresponding pod(s) (including executor pods). Images can be launched using both forms provided they have the appropriate support for Spark installed. + +Here's the yaml configuration used when _vanilla_ kernels are launched. As noted in the `KubernetesProcessProxy` section below, this file ([kernel-pod.yaml.j2](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2)) serves as a template where each of the tags surrounded with `{{` and `}}` represent variables that are substituted at the time of the kernel's launch. All `{{ kernel_xxx }}` parameters correspond to `KERNEL_XXX` environment variables that can be specified from the client in the kernel creation request's json body. -Here's the yaml configuration used when _vanilla_ kernels are launched. As noted in the `KubernetesProcessProxy` section below, this file ([kernel-pod.yaml.j2](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2)) serves as a template where each of the tags surrounded with `{{` and `}}` represent variables that are substituted at the time of the kernel's launch. All `{{ kernel_xxx }}` parameters correspond to `KERNEL_XXX` environment variables that can be specified from the client in the kernel creation request's json body. ```yaml+jinja apiVersion: v1 kind: Pod @@ -525,20 +567,25 @@ spec: image: "{{ kernel_image }}" name: "{{ kernel_pod_name }}" ``` + There are a number of items worth noting: + 1. Kernel pods can be identified in three ways using `kubectl`: - 1. By the global label `app=enterprise-gateway` - useful when needing to identify all related objects (e.g., `kubectl get all -l app=enterprise-gateway`) - 1. By the *kernel_id* label `kernel_id=` - useful when only needing specifics about a given kernel. This label is used internally by enterprise-gateway when performing its discovery and lifecycle management operations. - 1. By the *component* label `component=kernel` - useful when needing to identity only kernels and not other enterprise-gateway components. (Note, the latter can be isolated via `component=enterprise-gateway`.) - - Note that since kernels run in isolated namespaces by default, it's often helpful to include the clause `--all-namespaces` on commands that will span namespaces. To isolate commands to a given namespace, you'll need to add the namespace clause `--namespace `. -1. Each kernel pod is named by the invoking user (via the `KERNEL_USERNAME` env) and its kernel_id (env `KERNEL_ID`). This identifier also applies to those kernels launched within `spark-on-kubernetes`. -1. Kernel pods use the specified `securityContext`. If env `KERNEL_UID` is not specified in the kernel creation request a default value of `1000` (the jovyan user) will be used. Similarly, for `KERNEL_GID`, whose default is `100` (the users group). In addition, Enterprise Gateway enforces a list of prohibited UID and GID values. By default, this list is initialized to the 0 (root) UID and GID. Administrators can configure the `EG_PROHIBITED_UIDS` and `EG_PROHIBITED_GIDS` environment variables via the `enterprise-gateway.yaml` file with comma-separated values to alter the set of user and group ids to be prevented. -1. As noted above, if `KERNEL_NAMESPACE` is not provided in the request, Enterprise Gateway will create a namespace using the same naming algorithm for the pod. In addition, the `kernel-controller` cluster role will be bound to a namespace-scoped role binding of the same name using the namespace's default service account as its subject. Users wishing to use their own kernel namespaces must provide **both** `KERNEL_NAMESPACE` and `KERNEL_SERVICE_ACCOUNT_NAME` as these are both used in the `kernel-pod.yaml.j2` as `{{ kernel_namespace }}` and `{{ kernel_service_account_name }}`, respectively. -1. Kernel pods have restart policies of `Never`. This is because the Jupyter framework already has built-in logic for auto-restarting failed kernels and any other restart policy would likely interfere with the built-in behaviors. + + 1. By the global label `app=enterprise-gateway` - useful when needing to identify all related objects (e.g., `kubectl get all -l app=enterprise-gateway`) + 1. By the _kernel_id_ label `kernel_id=` - useful when only needing specifics about a given kernel. This label is used internally by enterprise-gateway when performing its discovery and lifecycle management operations. + 1. By the _component_ label `component=kernel` - useful when needing to identity only kernels and not other enterprise-gateway components. (Note, the latter can be isolated via `component=enterprise-gateway`.) + + Note that since kernels run in isolated namespaces by default, it's often helpful to include the clause `--all-namespaces` on commands that will span namespaces. To isolate commands to a given namespace, you'll need to add the namespace clause `--namespace `. + +1. Each kernel pod is named by the invoking user (via the `KERNEL_USERNAME` env) and its kernel_id (env `KERNEL_ID`). This identifier also applies to those kernels launched within `spark-on-kubernetes`. +1. Kernel pods use the specified `securityContext`. If env `KERNEL_UID` is not specified in the kernel creation request a default value of `1000` (the jovyan user) will be used. Similarly, for `KERNEL_GID`, whose default is `100` (the users group). In addition, Enterprise Gateway enforces a list of prohibited UID and GID values. By default, this list is initialized to the 0 (root) UID and GID. Administrators can configure the `EG_PROHIBITED_UIDS` and `EG_PROHIBITED_GIDS` environment variables via the `enterprise-gateway.yaml` file with comma-separated values to alter the set of user and group ids to be prevented. +1. As noted above, if `KERNEL_NAMESPACE` is not provided in the request, Enterprise Gateway will create a namespace using the same naming algorithm for the pod. In addition, the `kernel-controller` cluster role will be bound to a namespace-scoped role binding of the same name using the namespace's default service account as its subject. Users wishing to use their own kernel namespaces must provide **both** `KERNEL_NAMESPACE` and `KERNEL_SERVICE_ACCOUNT_NAME` as these are both used in the `kernel-pod.yaml.j2` as `{{ kernel_namespace }}` and `{{ kernel_service_account_name }}`, respectively. +1. Kernel pods have restart policies of `Never`. This is because the Jupyter framework already has built-in logic for auto-restarting failed kernels and any other restart policy would likely interfere with the built-in behaviors. 1. The parameters to the launcher that is built into the image are communicated via environment variables as noted in the `env:` section above. ## Unconditional Volume Mounts + Unconditional volume mounts can be added in the `kernel-pod.yaml.j2` template. An example of these unconditional volume mounts can be found when extending docker shared memory. For some I/O jobs the pod will need more than the default `64mb` of shared memory on the `/dev/shm` path. ```yaml+jinja @@ -584,7 +631,7 @@ emptyDir: ## Kubernetes Resource Quotas -When deploying kernels on a Kubernetes cluster a best practice is to define request and limit quotas for CPUs, GPUs, and Memory. These quotas can be defined from the client via KERNEL_-prefixed environment variables which are passed through to the kernel at startup. +When deploying kernels on a Kubernetes cluster a best practice is to define request and limit quotas for CPUs, GPUs, and Memory. These quotas can be defined from the client via KERNEL\_-prefixed environment variables which are passed through to the kernel at startup. - `KERNEL_CPUS` - CPU Request by Kernel - `KERNEL_MEMORY` - MEMORY Request by Kernel @@ -593,7 +640,7 @@ When deploying kernels on a Kubernetes cluster a best practice is to define requ - `KERNEL_MEMORY_LIMIT` - MEMORY Limit - `KERNEL_GPUS_LIMIT` - GPUS Limit -Memory and CPU units are based on the [Kubernetes Official Documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) while GPU is using the NVIDIA `nvidia.com/gpu` parameter. The desired units should be included in the variable's value. +Memory and CPU units are based on the [Kubernetes Official Documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) while GPU is using the NVIDIA `nvidia.com/gpu` parameter. The desired units should be included in the variable's value. When defined, these variables are then substituted into the appropriate location of the corresponding kernel-pod.yaml.j2 template. @@ -628,11 +675,13 @@ When defined, these variables are then substituted into the appropriate location ``` ## KubernetesProcessProxy + To indicate that a given kernel should be launched into a Kubernetes configuration, the kernel.json file's `metadata` stanza must include a `process_proxy` stanza indicating a `class_name:` of `KubernetesProcessProxy`. This ensures the appropriate lifecycle management will take place relative to a Kubernetes environment. -Along with the `class_name:` entry, this process proxy stanza should also include a proxy configuration stanza which specifies the container image to associate with the kernel's pod. If this entry is not provided, the Enterprise Gateway implementation will use a default entry of `elyra/kernel-py:VERSION`. In either case, this value is made available to the rest of the parameters used to launch the kernel by way of an environment variable: `KERNEL_IMAGE`. +Along with the `class_name:` entry, this process proxy stanza should also include a proxy configuration stanza which specifies the container image to associate with the kernel's pod. If this entry is not provided, the Enterprise Gateway implementation will use a default entry of `elyra/kernel-py:VERSION`. In either case, this value is made available to the rest of the parameters used to launch the kernel by way of an environment variable: `KERNEL_IMAGE`. + +_(Please note that the use of `VERSION` in docker image tags is a placeholder for the appropriate version-related image tag. When kernelspecs are built via the Enterprise Gateway Makefile, `VERSION` is replaced with the appropriate version denoting the target release. A full list of available image tags can be found in the dockerhub repository corresponding to each image.)_ -_(Please note that the use of `VERSION` in docker image tags is a placeholder for the appropriate version-related image tag. When kernelspecs are built via the Enterprise Gateway Makefile, `VERSION` is replaced with the appropriate version denoting the target release. A full list of available image tags can be found in the dockerhub repository corresponding to each image.)_ ```json { "metadata": { @@ -645,13 +694,15 @@ _(Please note that the use of `VERSION` in docker image tags is a placeholder fo } } ``` -As always, kernels are launched by virtue of the `argv:` stanza in their respective kernel.json files. However, when launching _vanilla_ kernels in a kubernetes environment, what gets invoked isn't the kernel's launcher, but, instead, a python script that is responsible for using the [Kubernetes Python API](https://github.com/kubernetes-client/python) to create the corresponding pod instance. The pod is _configured_ by applying the values to each of the substitution parameters into the [kernel-pod.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2) file previously displayed. This file resides in the same `scripts` directory as the kubernetes launch script - `launch_kubernetes.py` - which is referenced by the kernel.json's `argv:` stanza: + +As always, kernels are launched by virtue of the `argv:` stanza in their respective kernel.json files. However, when launching _vanilla_ kernels in a kubernetes environment, what gets invoked isn't the kernel's launcher, but, instead, a python script that is responsible for using the [Kubernetes Python API](https://github.com/kubernetes-client/python) to create the corresponding pod instance. The pod is _configured_ by applying the values to each of the substitution parameters into the [kernel-pod.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2) file previously displayed. This file resides in the same `scripts` directory as the kubernetes launch script - `launch_kubernetes.py` - which is referenced by the kernel.json's `argv:` stanza: + ```json { "argv": [ "python", "/usr/local/share/jupyter/kernels/python_kubernetes/scripts/launch_kubernetes.py", - "--RemoteProcessProxy.kernel-id", + "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", @@ -660,20 +711,24 @@ As always, kernels are launched by virtue of the `argv:` stanza in their respect ] } ``` + By default, _vanilla_ kernels use a value of `none` for the spark context initialization mode so no context will be created automatically. -When the kernel is intended to target _Spark-on-kubernetes_, its launch is very much like kernels launched in YARN _cluster mode_, albeit with a completely different set of parameters. Here's an example `SPARK_OPTS` string value which best conveys the idea: +When the kernel is intended to target _Spark-on-kubernetes_, its launch is very much like kernels launched in YARN _cluster mode_, albeit with a completely different set of parameters. Here's an example `SPARK_OPTS` string value which best conveys the idea: + ``` "SPARK_OPTS": "--master k8s://https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT} --deploy-mode cluster --name ${KERNEL_USERNAME}-${KERNEL_ID} --conf spark.kubernetes.driver.label.app=enterprise-gateway --conf spark.kubernetes.driver.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.executor.label.app=enterprise-gateway --conf spark.kubernetes.executor.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.driver.docker.image=${KERNEL_IMAGE} --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor-py:v2.5.0-kubernetes-0.5.0 --conf spark.kubernetes.submission.waitAppCompletion=false", ``` + Note that each of the labels previously discussed are also applied to the _driver_ and _executor_ pods. For these invocations, the `argv:` is nearly identical to non-kubernetes configurations, invoking a `run.sh` script which essentially holds the `spark-submit` invocation that takes the aforementioned `SPARK_OPTS` as its primary parameter: + ```json { "argv": [ "/usr/local/share/jupyter/kernels/spark_python_kubernetes/bin/run.sh", - "--RemoteProcessProxy.kernel-id", + "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", @@ -686,6 +741,7 @@ For these invocations, the `argv:` is nearly identical to non-kubernetes configu ``` ### Confirming deployment and the service port mapping + ```bash kubectl get all --all-namespaces -l app=enterprise-gateway @@ -701,7 +757,8 @@ po/enterprise-gateway-74c46cb7fc-jrkl7 1/1 Running 0 2h NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE svc/enterprise-gateway NodePort 10.110.253.220 8888:32422/TCP 2h ``` -Of particular importance is the mapping to port `8888` (e.g.,`32422`). If you are performing this on the same host as where the notebook will run, then you will need to note the cluster-ip entry (e.g.,`10.110.253.220`). + +Of particular importance is the mapping to port `8888` (e.g.,`32422`). If you are performing this on the same host as where the notebook will run, then you will need to note the cluster-ip entry (e.g.,`10.110.253.220`). (Note: if the number of replicas is > 1, then you will see two pods listed with different five-character suffixes.) @@ -715,7 +772,7 @@ Of particular importance is the mapping to port `8888` (e.g.,`32422`). If you a # - 9.30.118.200 ``` -The value of the `JUPYTER_GATEWAY_URL` used by the gateway-enabled Notebook server will vary depending on whether you choose to define an external IP or not. If and external IP is defined, you'll set `JUPYTER_GATEWAY_URL=:8888` else you'll set `JUPYTER_GATEWAY_URL=:32422` **but also need to restart clients each time Enterprise Gateway is started.** As a result, use of the `externalIPs:` value is highly recommended. +The value of the `JUPYTER_GATEWAY_URL` used by the gateway-enabled Notebook server will vary depending on whether you choose to define an external IP or not. If and external IP is defined, you'll set `JUPYTER_GATEWAY_URL=:8888` else you'll set `JUPYTER_GATEWAY_URL=:32422` **but also need to restart clients each time Enterprise Gateway is started.** As a result, use of the `externalIPs:` value is highly recommended. ## Setting up a Kubernetes Ingress for use with Enterprise Gateway @@ -744,6 +801,7 @@ can be found at [here](https://github.com/jupyter-server/enterprise_gateway/tree for Enterprise Gateway. Example - Enable ingress and edit etc/kubernetes/helm/values.yaml to the desired configurations and install EG as normal via helm. + ```bash ingress: enabled: true # Ingress is disabled by default @@ -757,6 +815,7 @@ ingress: ``` A quick look at our ingress resource after deploying EG with helm : + ```bash $ kubectl describe ingress enterprise-gateway-ingress -n enterprise-gateway Name: enterprise-gateway-ingress @@ -783,17 +842,20 @@ Events: ``` This will expose the Enterprise Gateway service at + ```bash http://KUBERNETES_HOSTNAME:PORT/gateway ``` + where `PORT` is the ingress controller's http `NodePort` we referenced earlier. **NOTE:** `PORT` may be optional depending on how your environment/infrastructure is configured. - ## Kubernetes Tips + The following items illustrate some useful commands for navigating Enterprise Gateway within a kubernetes environment. -- All objects created on behalf of Enterprise Gateway can be located using the label `app=enterprise-gateway`. You'll probably see duplicated entries for the deployments(deploy) and replication sets (rs) - we didn't include the duplicates here. +- All objects created on behalf of Enterprise Gateway can be located using the label `app=enterprise-gateway`. You'll probably see duplicated entries for the deployments(deploy) and replication sets (rs) - we didn't include the duplicates here. + ```bash kubectl get all -l app=enterprise-gateway --all-namespaces @@ -807,38 +869,47 @@ NAME READY STATUS RESTARTS pod/alice-5e755458-a114-4215-96b7-bcb016fc7b62 1/1 Running 0 8s pod/enterprise-gateway-74c46cb7fc-jrkl7 1/1 Running 0 3h ``` + - All objects related to a given kernel can be located using the label `kernel_id=` + ```bash kubectl get all -l kernel_id=5e755458-a114-4215-96b7-bcb016fc7b62 --all-namespaces NAME READY STATUS RESTARTS AGE pod/alice-5e755458-a114-4215-96b7-bcb016fc7b62 1/1 Running 0 28s ``` + Note: because kernels are, by default, isolated to their own namespace, you could also find all objects of a given kernel using only the `--namespace ` clause. - To enter into a given pod (i.e., container) in order to get a better idea of what might be happening within the container, use the exec command with the pod name + ```bash kubectl exec -it enterprise-gateway-74c46cb7fc-jrkl7 /bin/bash ``` - Logs can be accessed against the pods or deployment (requires the object type prefix (e.g., `pod/`)) + ```bash kubectl logs -f pod/alice-5e755458-a114-4215-96b7-bcb016fc7b62 ``` + Note that if using multiple replicas, commands against each pod are required. -- The Kubernetes dashboard is useful as well. It's located at port `30000` of the master node +- The Kubernetes dashboard is useful as well. It's located at port `30000` of the master node + ```bash https://elyra-kube1.foo.bar.com:30000/dashboard/#!/overview?namespace=default ``` + From there, logs can be accessed by selecting the `Pods` option in the left-hand pane followed by the _lined_ icon on the far right. - User \"system:serviceaccount:default:default\" cannot list pods in the namespace \"default\" -On a recent deployment, Enterprise Gateway was not able to create or list kernel pods. Found -the following command was necessary. (Kubernetes security relative to Enterprise Gateway is still under construction.) +On a recent deployment, Enterprise Gateway was not able to create or list kernel pods. Found +the following command was necessary. (Kubernetes security relative to Enterprise Gateway is still under construction.) + ```bash kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=default:default ``` diff --git a/docs/source/operators/deploy-single.md b/docs/source/operators/deploy-single.md index c7e24338e..df611ffba 100644 --- a/docs/source/operators/deploy-single.md +++ b/docs/source/operators/deploy-single.md @@ -3,6 +3,7 @@ Single-server deployment can be useful for development and is not meant to be run in production environments as it subjects the gateway server to resource exhaustion. Steps to deploy a single server are: + 1. [Install Enterprise Gateway](installing-eg.md) 2. [Install the desired kernels](installing-kernels.md) 3. Install and configure the server and desired kernel specifications (see below) @@ -13,19 +14,13 @@ If you just want to try Enterprise Gateway in a single-server setup, you can use ```json { "display_name": "Python 3 Local", - "language": "python", + "language": "python", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.processproxy.LocalProcessProxy" } }, - "argv": [ - "python", - "-m", - "ipykernel_launcher", - "-f", - "{connection_file}" - ] + "argv": ["python", "-m", "ipykernel_launcher", "-f", "{connection_file}"] } ``` diff --git a/docs/source/operators/deploy-yarn-cluster.md b/docs/source/operators/deploy-yarn-cluster.md index a1e2618f0..2164fd26e 100644 --- a/docs/source/operators/deploy-yarn-cluster.md +++ b/docs/source/operators/deploy-yarn-cluster.md @@ -4,26 +4,31 @@ To leverage the full distributed capabilities of Jupyter Enterprise Gateway, the The following sample kernelspecs are currently available on YARN cluster: -+ spark_R_yarn_cluster -+ spark_python_yarn_cluster -+ spark_scala_yarn_cluster +- spark_R_yarn_cluster +- spark_python_yarn_cluster +- spark_scala_yarn_cluster Steps required to complete deployment on a Hadoop YARN cluster are: -1. [Install Enterprise Gateway](installing-eg.md) on the primary node of the Hadoop YARN cluster. Note, this location is not a hard-requirement, but recommended. If installed remotely, some extra configuration will be necessary relative to the Hadoop configuration. + +1. [Install Enterprise Gateway](installing-eg.md) on the primary node of the Hadoop YARN cluster. Note, this location is not a hard-requirement, but recommended. If installed remotely, some extra configuration will be necessary relative to the Hadoop configuration. 2. [Install the desired kernels](installing-kernels.md) 3. Install and configure the server and desired kernel specifications (see below) 4. [Launch Enterprise Gateway](launching-eg.md) The distributed capabilities are currently based on an Apache Spark cluster utilizing Hadoop YARN as the resource manager and thus require the following environment variables to be set to facilitate the integration between Apache Spark and Hadoop YARN components: -* `SPARK_HOME` must point to the Apache Spark installation path +- `SPARK_HOME` must point to the Apache Spark installation path + ``` SPARK_HOME:/usr/hdp/current/spark2-client # For HDP distribution ``` -* EG_YARN_ENDPOINT: Must point to the YARN resource manager endpoint if remote from YARN cluster + +- EG_YARN_ENDPOINT: Must point to the YARN resource manager endpoint if remote from YARN cluster + ``` -EG_YARN_ENDPOINT=http://${YARN_RESOURCE_MANAGER_FQDN}:8088/ws/v1/cluster +EG_YARN_ENDPOINT=http://${YARN_RESOURCE_MANAGER_FQDN}:8088/ws/v1/cluster ``` + ```{note} If Enterprise Gateway is using an applicable `HADOOP_CONF_DIR` that contains a valid `yarn-site.xml` file, then this config value can remain unset (default = None) and the YARN client library will locate the appropriate resource manager from the configuration. This is also true in cases where the YARN cluster is configured for high availability. ``` @@ -36,7 +41,7 @@ EG_ALT_YARN_ENDPOINT=http://${ALT_YARN_RESOURCE_MANAGER_FQDN}:8088/ws/v1/cluster ## Configuring Kernels for YARN Cluster mode -For each supported kernel (IPyKernel for Python, Apache Toree for Scala, and IRKernel for R), we have provided sample kernel configurations and launchers as assets associated with each [Enterprise Gateway release](https://github.com/jupyter-server/enterprise_gateway/releases). For Hadoop YARN configurations, you can access those specific kernel specifications within the `jupyter_enterprise_gateway_kernelspecs_yarn-VERSION.tar.gz` file. (Replace `VERSION` with the desired release number.) +For each supported kernel (IPyKernel for Python, Apache Toree for Scala, and IRKernel for R), we have provided sample kernel configurations and launchers as assets associated with each [Enterprise Gateway release](https://github.com/jupyter-server/enterprise_gateway/releases). For Hadoop YARN configurations, you can access those specific kernel specifications within the `jupyter_enterprise_gateway_kernelspecs_yarn-VERSION.tar.gz` file. (Replace `VERSION` with the desired release number.) ```{note} The sample kernels specifications in `jupyter_enterprise_gateway_kernelspecs_yarn-VERSION.tar.gz` also contain specification for YARN client mode (in addition to cluster mode). Both are usable in this situation. @@ -45,6 +50,7 @@ The sample kernels specifications in `jupyter_enterprise_gateway_kernelspecs_yar ```{tip} We recommend installing kernel specifications into a shared folder like `/usr/local/share/jupyter/kernels`. This is the location in which they reside within container images and where many of the document references assume they'll be located. ``` + ### Python Kernel (IPython kernel) Considering we would like to enable the IPython kernel to run on YARN Cluster and Client mode we would have to copy the sample configuration folder **spark_python_yarn_cluster** to where the Jupyter kernels are installed (e.g. jupyter kernelspec list) @@ -73,7 +79,7 @@ For more information about the Scala kernel, please visit the [Apache Toree](htt Considering we would like to enable the IRkernel to run on YARN Cluster and Client mode we would have to copy the sample configuration folder **spark_R_yarn_cluster** to where the Jupyter kernels are installed (e.g. jupyter kernelspec list) -``` Bash +```Bash wget https://github.com/jupyter-server/enterprise_gateway/releases/download/v2.6.0/jupyter_enterprise_gateway_kernelspecs-2.6.0.tar.gz KERNELS_FOLDER=/usr/local/share/jupyter/kernels tar -zxvf jupyter_enterprise_gateway_kernelspecs-2.6.0.tar.gz --strip 1 --directory $KERNELS_FOLDER/spark_R_yarn_cluster/ spark_R_yarn_cluster/ @@ -82,6 +88,7 @@ tar -zxvf jupyter_enterprise_gateway_kernelspecs-2.6.0.tar.gz --strip 1 --direct For more information about the iR kernel, please visit the [IRkernel](https://irkernel.github.io/) page. ### Adjusting the kernel specifications + After installing the kernel specifications, you should have a `kernel.json` that resembles the following (this one is relative to the Python kernel): ```json @@ -103,7 +110,7 @@ After installing the kernel specifications, you should have a `kernel.json` that }, "argv": [ "/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh", - "--RemoteProcessProxy.kernel-id", + "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", @@ -112,8 +119,9 @@ After installing the kernel specifications, you should have a `kernel.json` that ] } ``` -The `metadata` and `argv` entries for each kernel specification should be nearly identical and not require changes. You will need to adjust the `env` entries to apply to your specific configuration. + +The `metadata` and `argv` entries for each kernel specification should be nearly identical and not require changes. You will need to adjust the `env` entries to apply to your specific configuration. You should also check the same kinds of environment and path settings in the corresponding `bin/run.sh` file - although changes are not typically necessary. -After making any necessary adjustments such as updating `SPARK_HOME` or other environment specific configuration and paths, you now should have a new kernel available to execute your notebook cell code distributed on a Hadoop YARN Spark Cluster. +After making any necessary adjustments such as updating `SPARK_HOME` or other environment specific configuration and paths, you now should have a new kernel available to execute your notebook cell code distributed on a Hadoop YARN Spark Cluster. diff --git a/docs/source/operators/installing-eg.md b/docs/source/operators/installing-eg.md index 608b5a685..80c8afd49 100644 --- a/docs/source/operators/installing-eg.md +++ b/docs/source/operators/installing-eg.md @@ -6,12 +6,12 @@ packages for scientific computing and data science. Use the following installation steps: -* Download [Anaconda](https://www.anaconda.com/download). We recommend downloading Anaconda’s +- Download [Anaconda](https://www.anaconda.com/download). We recommend downloading Anaconda’s latest Python version (currently Python 3.9). -* Install the version of Anaconda which you downloaded, following the instructions on the download page. +- Install the version of Anaconda which you downloaded, following the instructions on the download page. -* Install the latest version of Jupyter Enterprise Gateway from [PyPI](https://pypi.python.org/pypi/jupyter_enterprise_gateway/) +- Install the latest version of Jupyter Enterprise Gateway from [PyPI](https://pypi.python.org/pypi/jupyter_enterprise_gateway/) or [conda forge](https://conda-forge.org/) along with its dependencies. ```{warning} @@ -31,6 +31,7 @@ conda install -c conda-forge jupyter_enterprise_gateway At this point, the Jupyter Enterprise Gateway deployment provides local kernel support which is fully compatible with Jupyter Kernel Gateway. To uninstall Jupyter Enterprise Gateway... + ```bash #uninstall using pip pip uninstall jupyter_enterprise_gateway diff --git a/docs/source/operators/installing-kernels.md b/docs/source/operators/installing-kernels.md index 0e7ff2b54..653e54f25 100644 --- a/docs/source/operators/installing-kernels.md +++ b/docs/source/operators/installing-kernels.md @@ -1,10 +1,12 @@ # Installing supported kernels (common) + Enterprise Gateway includes kernel specifications that support the following kernels: + - IPython kernel (Python) - Apache Toree (Scala) - IRKernel (R) -Refer to the following for instructions on installing the respective kernels. For cluster-based environments, these steps should be performed on each applicable node of the cluster, unless noted otherwise. +Refer to the following for instructions on installing the respective kernels. For cluster-based environments, these steps should be performed on each applicable node of the cluster, unless noted otherwise. ## Python Kernel (IPython kernel) @@ -19,13 +21,13 @@ This step is also required for the IRkernel (see below). However, it is **not** ## Scala Kernel (Apache Toree) -We have tested the latest version of [Apache Toree](https://toree.apache.org/) with Scala 2.11 support. Please note that the Apache Toree kernel is now bundled in the kernelspecs tar file for each of the Scala kernelspecs provided by Enterprise Gateway. +We have tested the latest version of [Apache Toree](https://toree.apache.org/) with Scala 2.11 support. Please note that the Apache Toree kernel is now bundled in the kernelspecs tar file for each of the Scala kernelspecs provided by Enterprise Gateway. -The sample kernel specifications included in Enterprise Gateway include the necessary Apach Toree libraries so its installation is not necessary. In addition, because Apache Toree targets Spark installations, its distribution can be achieved via `spark-submit` and its installation is not necessary on worker nodes - except for [distributed deployments](deploy-distributed.md). +The sample kernel specifications included in Enterprise Gateway include the necessary Apach Toree libraries so its installation is not necessary. In addition, because Apache Toree targets Spark installations, its distribution can be achieved via `spark-submit` and its installation is not necessary on worker nodes - except for [distributed deployments](deploy-distributed.md). ## R Kernel (IRkernel) -Perform the following steps on Jupyter Enterprise Gateway hosting system as well as all worker nodes. Please refer to the [IRKernel documentation](https://irkernel.github.io/) for further details. +Perform the following steps on Jupyter Enterprise Gateway hosting system as well as all worker nodes. Please refer to the [IRKernel documentation](https://irkernel.github.io/) for further details. ```Bash conda install --yes --quiet -c r r-essentials r-irkernel r-argparse diff --git a/docs/source/operators/launching-eg.md b/docs/source/operators/launching-eg.md index ad1f44fc6..f6f8a7e7d 100644 --- a/docs/source/operators/launching-eg.md +++ b/docs/source/operators/launching-eg.md @@ -1,6 +1,6 @@ # Launching Enterprise Gateway (common) -Very few arguments are necessary to minimally start Enterprise Gateway. The following command could be considered a minimal command: +Very few arguments are necessary to minimally start Enterprise Gateway. The following command could be considered a minimal command: ```bash jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 @@ -12,9 +12,9 @@ where `--ip=0.0.0.0` exposes Enterprise Gateway on the public network and `--por The ability to target resource-managed clusters (and use remote kernels) will require additional configuration settings depending on the resource manager. For additional information see the appropriate server-based deployment topic of our Operators Guide. ``` -We recommend starting Enterprise Gateway as a background task. As a result, you might find it best to create a start script to maintain options, file redirection, etc. +We recommend starting Enterprise Gateway as a background task. As a result, you might find it best to create a start script to maintain options, file redirection, etc. -The following script starts Enterprise Gateway with `DEBUG` tracing enabled (default is `INFO`) and idle kernel culling for any kernels idle for 12 hours with idle check intervals occurring every 60 seconds. The Enterprise Gateway log can then be monitored via `tail -F enterprise_gateway.log` and it can be stopped via `kill $(cat enterprise_gateway.pid)` +The following script starts Enterprise Gateway with `DEBUG` tracing enabled (default is `INFO`) and idle kernel culling for any kernels idle for 12 hours with idle check intervals occurring every 60 seconds. The Enterprise Gateway log can then be monitored via `tail -F enterprise_gateway.log` and it can be stopped via `kill $(cat enterprise_gateway.pid)` ```bash #!/bin/bash @@ -31,5 +31,5 @@ fi ``` ```{tip} -Remember that any options set via the command-line will not be available for [dynamic configuration funtionality](config-dynamic.md#dynamic-configurables). +Remember that any options set via the command-line will not be available for [dynamic configuration funtionality](config-dynamic.md#dynamic-configurables). ``` diff --git a/docs/source/other/related-resources.md b/docs/source/other/related-resources.md index 32aa2fb63..abd85dd78 100644 --- a/docs/source/other/related-resources.md +++ b/docs/source/other/related-resources.md @@ -1,12 +1,12 @@ # Related Resources -Here are some resources related to the Jupyter Enterprise Gateway project. +Here are some resources related to the Jupyter Enterprise Gateway project. -* [Jupyter.org](https://jupyter.org) -* [Jupyter Server Team Compass](https://github.com/jupyter-server/team-compass#jupyter-server-team-compass) -* [Jupyter Calendar - Community Meetings](https://docs.jupyter.org/en/latest/community/content-community.html#jupyter-community-meetings) -* [Jupyter Community Discourse Forum](https://discourse.jupyter.org/) -* [Jupyter Kernel Gateway Github Repo](https://github.com/jupyter-server/kernel_gateway) - the source code for Kernel Gateway - which supports local kernels and notebook-hosted end-points. -* [Jupyter Server Github Repo](https://github.com/jupyter-server/jupyter_server) - the source code for the Jupyter Server. Many of the Enterprise Gateway's handlers and kernel management classes either _are_ or are derived from the Jupyter Server classes. -* [Jupyter Notebook Github Repo](https://github.com/jupyter/notebook) - the source code for the classic Notebook from which the gateways and Jupyter Server were derived. -* [Jupyter Client Github Repo](https://github.com/jupyter/jupyter_client) - the source code for the base kernel lifecycle management and message classes. Enterprise Gateway extends the `KernelManager` classes of `jupyter_client`. +- [Jupyter.org](https://jupyter.org) +- [Jupyter Server Team Compass](https://github.com/jupyter-server/team-compass#jupyter-server-team-compass) +- [Jupyter Calendar - Community Meetings](https://docs.jupyter.org/en/latest/community/content-community.html#jupyter-community-meetings) +- [Jupyter Community Discourse Forum](https://discourse.jupyter.org/) +- [Jupyter Kernel Gateway Github Repo](https://github.com/jupyter-server/kernel_gateway) - the source code for Kernel Gateway - which supports local kernels and notebook-hosted end-points. +- [Jupyter Server Github Repo](https://github.com/jupyter-server/jupyter_server) - the source code for the Jupyter Server. Many of the Enterprise Gateway's handlers and kernel management classes either _are_ or are derived from the Jupyter Server classes. +- [Jupyter Notebook Github Repo](https://github.com/jupyter/notebook) - the source code for the classic Notebook from which the gateways and Jupyter Server were derived. +- [Jupyter Client Github Repo](https://github.com/jupyter/jupyter_client) - the source code for the base kernel lifecycle management and message classes. Enterprise Gateway extends the `KernelManager` classes of `jupyter_client`. diff --git a/docs/source/other/troubleshooting.md b/docs/source/other/troubleshooting.md index cd2d98c6f..25489f0dd 100644 --- a/docs/source/other/troubleshooting.md +++ b/docs/source/other/troubleshooting.md @@ -1,103 +1,93 @@ # Troubleshooting Guide -This page identifies scenarios we've encountered when running Enterprise Gateway. We also provide +This page identifies scenarios we've encountered when running Enterprise Gateway. We also provide instructions for setting up a debug environment on our [Debugging Jupyter Enterprise Gateway](../contributors/debug.md) page. - -## Fresh Install +## Fresh Install + Scenario: **I just installed Enterprise Gateway but nothing happens, how do I proceed?** - + Because Enterprise Gateway is one element of a networked application, there are various _touch points_ that should -be validated independently. The following items can be used as a checklist to confirm general operability. -1. Confirm that Enterprise Gateway is servicing general requests. This can be accomplished using the following -`curl` command, which should produce the json corresponding to the configured kernelspecs: - ```bash - curl http://:/api/kernelspecs - ``` -2. Independently validate any resource manager you're running against. Various resource managers usually provide -examples for how to go about validating their configuration. -3. Confirm that the Enterprise Gateway arguments for contacting the configured resource manager are in place. These -should be covered in the deployment section of our Operators Guide. +be validated independently. The following items can be used as a checklist to confirm general operability. + +1. Confirm that Enterprise Gateway is servicing general requests. This can be accomplished using the following + `curl` command, which should produce the json corresponding to the configured kernelspecs: + `bash curl http://:/api/kernelspecs ` +2. Independently validate any resource manager you're running against. Various resource managers usually provide + examples for how to go about validating their configuration. +3. Confirm that the Enterprise Gateway arguments for contacting the configured resource manager are in place. These + should be covered in the deployment section of our Operators Guide. 4. If using a Notebook server as your front-end, ensure that the Gateway configuration options or NB2KG extension settings are properly configured. -Once the notebook has started, a refresh on the tree view should issue the same `kernelspecs` request in step 1 and -the drop-down menu items for available kernels should reflect an entry for each kernelspec returned. -5. **Always** consult your Enterprise Gateway log file. If you have not redirected `stdout` and `stderr` to a -file you are highly encouraged to do so. In addition, you should enable `DEBUG` logging at least until your -configuration is stable. Please note, however, that you may be asked to produce an Enterprise Gateway log with -`DEBUG` enabled when reporting issues. An example of output redirection and `DEBUG` logging is also provided in our + Once the notebook has started, a refresh on the tree view should issue the same `kernelspecs` request in step 1 and + the drop-down menu items for available kernels should reflect an entry for each kernelspec returned. +5. **Always** consult your Enterprise Gateway log file. If you have not redirected `stdout` and `stderr` to a + file you are highly encouraged to do so. In addition, you should enable `DEBUG` logging at least until your + configuration is stable. Please note, however, that you may be asked to produce an Enterprise Gateway log with + `DEBUG` enabled when reporting issues. An example of output redirection and `DEBUG` logging is also provided in our [Operators Guide](../operators/launching-eg.md#launching-enterprise-gateway-common). - + ## Hadoop YARN Cluster Mode -Scenario: **I'm trying to launch a (Python/Scala/R) kernel in YARN Cluster Mode, but it failed with + +Scenario: **I'm trying to launch a (Python/Scala/R) kernel in YARN Cluster Mode, but it failed with a "Kernel error" and State: 'FAILED'.** -1. Check the output from Enterprise Gateway for an error message. If an applicationId was -generated, make a note of it. For example, you can locate the applicationId -`application_1506552273380_0011` from the following snippet of message: - ``` - [D 2017-09-28 17:13:22.675 EnterpriseGatewayApp] 13: State: 'ACCEPTED', Host: 'burna2.yourcompany.com', KernelID: '28a5e827-4676-4415-bbfc-ac30a0dcc4c3', ApplicationID: 'application_1506552273380_0011' - 17/09/28 17:13:22 INFO YarnClientImpl: Submitted application application_1506552273380_0011 - 17/09/28 17:13:22 INFO Client: Application report for application_1506552273380_0011 (state: ACCEPTED) - 17/09/28 17:13:22 INFO Client: - client token: N/A - diagnostics: AM container is launched, waiting for AM container to Register with RM - ApplicationMaster host: N/A - ApplicationMaster RPC port: -1 - queue: default - start time: 1506644002471 - final status: UNDEFINED - tracking URL: http://burna1.yourcompany.com:8088/proxy/application_1506552273380_0011/ - ``` +1. Check the output from Enterprise Gateway for an error message. If an applicationId was + generated, make a note of it. For example, you can locate the applicationId + `application_1506552273380_0011` from the following snippet of message: + `[D 2017-09-28 17:13:22.675 EnterpriseGatewayApp] 13: State: 'ACCEPTED', Host: 'burna2.yourcompany.com', KernelID: '28a5e827-4676-4415-bbfc-ac30a0dcc4c3', ApplicationID: 'application_1506552273380_0011' 17/09/28 17:13:22 INFO YarnClientImpl: Submitted application application_1506552273380_0011 17/09/28 17:13:22 INFO Client: Application report for application_1506552273380_0011 (state: ACCEPTED) 17/09/28 17:13:22 INFO Client: client token: N/A diagnostics: AM container is launched, waiting for AM container to Register with RM ApplicationMaster host: N/A ApplicationMaster RPC port: -1 queue: default start time: 1506644002471 final status: UNDEFINED tracking URL: http://burna1.yourcompany.com:8088/proxy/application_1506552273380_0011/` 2. Lookup the YARN log for that applicationId in the YARN ResourceManager UI: ![YARN ResourceManager UI](../images/yarnui.jpg) 3. Drill down from the applicationId to find logs for the failed attempts and take appropriate - actions. For example, for the error below, - ``` - Traceback (most recent call last): - File "launch_ipykernel.py", line 7, in - from ipython_genutils.py3compat import str_to_bytes - ImportError: No module named ipython_genutils.py3compat - ``` - Simply running "pip install ipython_genutils" should fix the problem. If Anaconda is - installed, make sure the environment variable for Python, i.e. `PYSPARK_PYTHON`, is - properly configured in the kernelspec and matches the actual Anaconda installation - directory. - + actions. For example, for the error below, + ``` + Traceback (most recent call last): + File "launch_ipykernel.py", line 7, in + from ipython_genutils.py3compat import str_to_bytes + ImportError: No module named ipython_genutils.py3compat + ``` + Simply running "pip install ipython_genutils" should fix the problem. If Anaconda is + installed, make sure the environment variable for Python, i.e. `PYSPARK_PYTHON`, is + properly configured in the kernelspec and matches the actual Anaconda installation + directory. ## SSH Permissions -Scenario: **I'm trying to launch a (Python/Scala/R) kernel in YARN Client Mode, but it failed with + +Scenario: **I'm trying to launch a (Python/Scala/R) kernel in YARN Client Mode, but it failed with a "Kernel error" and an `AuthenticationException`.** + ``` -[E 2017-09-29 11:13:23.277 EnterpriseGatewayApp] Exception 'AuthenticationException' occurred -when creating a SSHClient connecting to 'xxx.xxx.xxx.xxx' with user 'elyra', +[E 2017-09-29 11:13:23.277 EnterpriseGatewayApp] Exception 'AuthenticationException' occurred +when creating a SSHClient connecting to 'xxx.xxx.xxx.xxx' with user 'elyra', message='Authentication failed.'. ``` -This error indicates that the password-less ssh may not be properly configured. Password-less -ssh needs to be configured on the node that the Enterprise Gateway is running on to all other +This error indicates that the password-less ssh may not be properly configured. Password-less +ssh needs to be configured on the node that the Enterprise Gateway is running on to all other worker nodes. You might also see an `SSHException` indicating a similar issue. + ``` -[E 2017-09-29 11:13:23.277 EnterpriseGatewayApp] Exception 'SSHException' occurred -when creating a SSHClient connecting to 'xxx.xxx.xxx.xxx' with user 'elyra', +[E 2017-09-29 11:13:23.277 EnterpriseGatewayApp] Exception 'SSHException' occurred +when creating a SSHClient connecting to 'xxx.xxx.xxx.xxx' with user 'elyra', message='No authentication methods available.'. ``` -In general, you can look for more information in the kernel log for YARN Client -kernels. The default location is /tmp with a filename of `kernel-.log`. The location -can be configured using the environment variable `EG_KERNEL_LOG_DIR` during Enterprise Gateway start up. +In general, you can look for more information in the kernel log for YARN Client +kernels. The default location is /tmp with a filename of `kernel-.log`. The location +can be configured using the environment variable `EG_KERNEL_LOG_DIR` during Enterprise Gateway start up. ```{seealso} -[Launching Enterprise Gateway](../operators/launching-eg.md#launching-enterprise-gateway-common) for an -example of starting the Enterprise Gateway from a script and the -[Operators Guide](../operators/config-add-env.md#additional-environment-variables) +[Launching Enterprise Gateway](../operators/launching-eg.md#launching-enterprise-gateway-common) for an +example of starting the Enterprise Gateway from a script and the +[Operators Guide](../operators/config-add-env.md#additional-environment-variables) for a list of configurable environment variables. ``` - ## SSH Tunneling + Scenario: **I'm trying to launch a (Python/Scala/R) kernel in YARN Client Mode with SSH tunneling enabled, but it failed with a "Kernel error" and a SSHException.** + ``` [E 2017-10-26 11:48:20.922 EnterpriseGatewayApp] The following exception occurred waiting for connection file response for KernelId 'da3d0dde-9de1-44b1-b1b4-e6f3cf52dfb9' on host @@ -130,6 +120,7 @@ Repeat the aforementioned step as `jdoe` on `node1` for each of the hosts listed `EG_REMOTE_HOSTS` and restart Enterprise Gateway. ## Kernel Encounters `TypeError` + Scenario: **I'm trying to launch a (Python/Scala/R) kernel, but it failed with `TypeError: Incorrect padding`.** ``` @@ -152,7 +143,7 @@ TypeError: Incorrect padding ``` To address this issue, first ensure that the launchers used for each kernel are derived -from the same release as the Enterprise Gateway server. Next ensure that `pycryptodomex 3.9.7` +from the same release as the Enterprise Gateway server. Next ensure that `pycryptodomex 3.9.7` or later is installed on all hosts using either `pip install` or `conda install` as shown below: ``` @@ -170,6 +161,7 @@ This should be done on the host running Enterprise Gateway as well as all the re on which the kernel is launched. ## Port Range + Scenario: **I'm trying to launch a (Python/Scala/R) kernel with port range, but it failed with `RuntimeError: Invalid port range `.** ``` @@ -190,13 +182,14 @@ RuntimeError: Invalid port range '1000..2000' specified. Range for valid port nu ``` To address this issue, make sure that the specified port range does not overlap with TCP's well-known - port range of (0, 1024]. +port range of (0, 1024]. ## Hadoop YARN Timeout + Scenario: **I'm trying to launch a (Python/Scala/R) kernel, but it times out and the YARN application status remain `ACCEPTED`.** Enterprise Gateway log from server will look like the one below, and will complain that there are no resources: - `launch timeout due to: YARN resources unavailable` +`launch timeout due to: YARN resources unavailable` ```bash State: 'ACCEPTED', Host: '', KernelID: '3181db50-8bb5-4f91-8556-988895f63efa', ApplicationID: 'application_1537119233094_0001' @@ -206,76 +199,81 @@ Enterprise Gateway log from server will look like the one below, and will compla SIGKILL signal sent to pid: 19690 YarnClusterProcessProxy.kill, application ID: application_1537119233094_0001, kernel ID: 3181db50-8bb5-4f91-8556-988895f63efa, state: ACCEPTED KernelID: '3181db50-8bb5-4f91-8556-988895f63efa' launch timeout due to: YARN resources unavailable after 61.0 seconds for app application_1537119233094_0001, launch timeout: 60.0! Check YARN configuration. - ``` +``` The most common cause for this is that YARN Resource Managers are failing to start and the cluster see no resources available. - Make sure YARN Resource Managerss are running ok. We have also noticed that, in Kerborized environments, sometimes there are - issues with directory access rights that cause the YARN Resource Managers to fail to start and this can be corrected by validating - the existence of `/hadoop/yarn` and that it's owned by `yarn: hadoop`. +Make sure YARN Resource Managerss are running ok. We have also noticed that, in Kerborized environments, sometimes there are +issues with directory access rights that cause the YARN Resource Managers to fail to start and this can be corrected by validating +the existence of `/hadoop/yarn` and that it's owned by `yarn: hadoop`. ## Kernel Resources + Scenario: **My kernel keeps dying when processing jobs that require large amount of resources (e.g. large files)** - This is usually seen when you are trying to use more resources then what is available for your kernel. - To address this issue, increase the amount of memory available for your Hadoop YARN application or another - resource manager managing the kernel. For example, on Kubernetes, this may be a time when the kernel specification's [kernel-pod.yaml.j2](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2) file should be extended with resource quotas. +This is usually seen when you are trying to use more resources then what is available for your kernel. +To address this issue, increase the amount of memory available for your Hadoop YARN application or another +resource manager managing the kernel. For example, on Kubernetes, this may be a time when the kernel specification's [kernel-pod.yaml.j2](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2) file should be extended with resource quotas. ## Spark and Python Versions + Scenario: **PySpark 2.4.x fails on Python 3.8** - PySpark 2.4.x fails on Python 3.8 as described in [SPARK-29536](https://issues.apache.org/jira/browse/SPARK-29536). - Use Python 3.7.x as the issue only seems to have been resolved on Spark 3.0. +PySpark 2.4.x fails on Python 3.8 as described in [SPARK-29536](https://issues.apache.org/jira/browse/SPARK-29536). +Use Python 3.7.x as the issue only seems to have been resolved on Spark 3.0. ## Kerberos + Scenario: **I'm trying to use a notebook with user impersonation on a Kerberos enabled cluster, but it fails to authenticate.** When using user impersonation in a YARN cluster with Kerberos authentication, if Kerberos is not - setup properly you will usually see the following warning in your Enterprise Gateway log that will keep a notebook from connecting: +setup properly you will usually see the following warning in your Enterprise Gateway log that will keep a notebook from connecting: ```bash WARN Client: Exception encountered while connecting to the server : javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt)] - ``` +``` The most common cause for this WARN is when the user that started Enterprise Gateway is not authenticated - with Kerberos. This can happen when the user has either not run `kinit` or their previous ticket has expired. +with Kerberos. This can happen when the user has either not run `kinit` or their previous ticket has expired. ## Openshift Kubernetes + Scenario: **Running Jupyter Enterprise Gateway on OpenShift Kubernetes Environment fails trying to create /home/jovyan/.local** As described [in the OpenShift Admin Guide](https://docs.openshift.com/container-platform/4.10/openshift_images/create-images.html) - there is a need to issue the following command to enable running with `USER` in Dockerfile. - +there is a need to issue the following command to enable running with `USER` in Dockerfile. + ```bash oc adm policy add-scc-to-group anyuid system:authenticated ``` ## Opening an issue + Scenario: **None of the scenarios on this page match or resolve my issue, what do I do next?** If you are unable to resolve your issue, take a look at our [open issues list](https://github.com/jupyter-server/enterprise_gateway/issues) to see if there is an applicable scenario -already reported. If found, please add a comment to the issue so that we can get a sense of urgency (although all -issues are important to us). If not found, please provide the following information if possible in a **new issue**. - -1. Describe the issue in as much detail as possible. This should include configuration information about your environment. -2. Gather and _attach_ the following files to the issue. If possible, please archive the files first. - 1. The **complete** Enterprise Gateway log file. If possible, please enable `DEBUG` logging that encompasses - the issue. You can refer to this section of our [Operators Guide](../operators/launching-eg.md#launching-enterprise-gateway-common) - for redirection and `DEBUG` enablement. - 2. The log file(s) produced from the corresponding kernel. This is primarily a function of the underlying resource - manager. - - For containerized installations like Kubernetes or Docker Swarm, kernel log output can be captured by - running the appropriate `logs` command against the pod or container, respectively. The names of the - corresponding pod/container can be found in the Enterprise Gateway log. - - For `Hadoop YARN` environments, - you'll need to navigate to the appropriate log directory relative the application ID associated with the kernel. - The application ID can be located in the Enterprise Gateway log. If you have access to an administrative console, - you can usually navigate to the application logs more easily. - 3. Although unlikely, the notebook log may also be helpful. If we find that the issue is more client-side - related, we may ask for `DEBUG` logging there as well. +already reported. If found, please add a comment to the issue so that we can get a sense of urgency (although all +issues are important to us). If not found, please provide the following information if possible in a **new issue**. + +1. Describe the issue in as much detail as possible. This should include configuration information about your environment. +2. Gather and _attach_ the following files to the issue. If possible, please archive the files first. + 1. The **complete** Enterprise Gateway log file. If possible, please enable `DEBUG` logging that encompasses + the issue. You can refer to this section of our [Operators Guide](../operators/launching-eg.md#launching-enterprise-gateway-common) + for redirection and `DEBUG` enablement. + 2. The log file(s) produced from the corresponding kernel. This is primarily a function of the underlying resource + manager. + - For containerized installations like Kubernetes or Docker Swarm, kernel log output can be captured by + running the appropriate `logs` command against the pod or container, respectively. The names of the + corresponding pod/container can be found in the Enterprise Gateway log. + - For `Hadoop YARN` environments, + you'll need to navigate to the appropriate log directory relative the application ID associated with the kernel. + The application ID can be located in the Enterprise Gateway log. If you have access to an administrative console, + you can usually navigate to the application logs more easily. + 3. Although unlikely, the notebook log may also be helpful. If we find that the issue is more client-side + related, we may ask for `DEBUG` logging there as well. 3. If you have altered or created new kernel specifications, the files corresponding to the failing kernels would be - helpful. These files could also be added to the attached archive or attached separately. + helpful. These files could also be added to the attached archive or attached separately. -Please know that we understand that some information cannot be provided due to its sensitivity. In such cases, just +Please know that we understand that some information cannot be provided due to its sensitivity. In such cases, just let us know and we'll be happy to approach the resolution of your issue from a different angle. diff --git a/docs/source/users/client-config.md b/docs/source/users/client-config.md index a5d3b4b82..d61020943 100644 --- a/docs/source/users/client-config.md +++ b/docs/source/users/client-config.md @@ -1,13 +1,19 @@ # Gateway Client Configuration -The set of Gateway Client configuration options include the following. To get the current set of supported options, run the following: + +The set of Gateway Client configuration options include the following. To get the current set of supported options, run the following: + ```bash jupyter server --help-all ``` + or + ```bash jupyter server --generate-config ``` -The following is produced from the `--help-all` option. To determine the corresponding configuration file option, replace `--` with `c.`. + +The following is produced from the `--help-all` option. To determine the corresponding configuration file option, replace `--` with `c.`. + ``` --GatewayClient.auth_scheme= The auth scheme, added as a prefix to the authorization token used in the HTTP headers. diff --git a/docs/source/users/connecting-to-eg.md b/docs/source/users/connecting-to-eg.md index 78346b319..cc8a31ef9 100644 --- a/docs/source/users/connecting-to-eg.md +++ b/docs/source/users/connecting-to-eg.md @@ -1,8 +1,9 @@ # Connecting the server to Enterprise Gateway -To leverage the benefits of Enterprise Gateway, it's helpful to redirect a Jupyter server's kernel management to the Gateway server. This allows better separation of the user's notebooks from the managed computer cluster (Kubernetes, Hadoop YARN, Docker Swarm, etc.) on which Enterprise Gateway resides. A Jupyter server can be configured to relay kernel requests to an Enterprise Gateway server in several ways. +To leverage the benefits of Enterprise Gateway, it's helpful to redirect a Jupyter server's kernel management to the Gateway server. This allows better separation of the user's notebooks from the managed computer cluster (Kubernetes, Hadoop YARN, Docker Swarm, etc.) on which Enterprise Gateway resides. A Jupyter server can be configured to relay kernel requests to an Enterprise Gateway server in several ways. ## Command line + To instruct the server to connect to an Enterprise Gateway instance running on host `` on port ``, the following command line options can be used: ```bash @@ -10,7 +11,8 @@ jupyter lab --gateway-url=http://: --GatewayClient.http_use ``` ## Configuration file -If command line options are not appropriate for your environment, the Jupyter server configuration can be used to express Enterprise Gateway options. Note however, that command line options always override configuration file options: + +If command line options are not appropriate for your environment, the Jupyter server configuration can be used to express Enterprise Gateway options. Note however, that command line options always override configuration file options: In your `jupyter_server_config.py` file add the following for the equivalent options: @@ -21,7 +23,9 @@ c.GatewayClient.http_pwd = "guest-password" ``` ## Docker image + All GatewayClient options have corresponding environment variable support, so if you have Jupyter Lab or Notebook already in a docker image, a corresponding docker invocation would look something like this: + ```bash docker run -t --rm \ -e JUPYTER_GATEWAY_URL='http://:' \ @@ -34,11 +38,12 @@ docker run -t --rm \ my-image ``` -Notebook files residing in `${HOME}/notebooks` can then be accessed via `http://localhost:8888`. +Notebook files residing in `${HOME}/notebooks` can then be accessed via `http://localhost:8888`. ## Connection Timeouts -Sometimes, depending on the kind of cluster Enterprise Gateway is servicing, connection establishment and kernel startup can take a while (sometimes upwards of minutes). This is particularly true for managed clusters that perform scheduling like Hadoop YARN or Kubernetes. In these configurations it is important to configure both the connection and request timeout values. -These options are handled by the `GatewayClient.connect_timeout` (env: `JUPYTER_GATEWAY_CONNECT_TIMEOUT`) and `GatewayClient.request_timeout` (env: `JUPYTER_GATEWAY_REQUEST_TIMEOUT`) options and default to 40 seconds. +Sometimes, depending on the kind of cluster Enterprise Gateway is servicing, connection establishment and kernel startup can take a while (sometimes upwards of minutes). This is particularly true for managed clusters that perform scheduling like Hadoop YARN or Kubernetes. In these configurations it is important to configure both the connection and request timeout values. + +These options are handled by the `GatewayClient.connect_timeout` (env: `JUPYTER_GATEWAY_CONNECT_TIMEOUT`) and `GatewayClient.request_timeout` (env: `JUPYTER_GATEWAY_REQUEST_TIMEOUT`) options and default to 40 seconds. -The `KERNEL_LAUNCH_TIMEOUT` environment variable will be set from these values or vice versa (whichever is greater). This value is used by EG to determine when it should give up on waiting for the kernel's startup to complete, while the other timeouts are used by Lab or Notebook when establishing the connection to EG. \ No newline at end of file +The `KERNEL_LAUNCH_TIMEOUT` environment variable will be set from these values or vice versa (whichever is greater). This value is used by EG to determine when it should give up on waiting for the kernel's startup to complete, while the other timeouts are used by Lab or Notebook when establishing the connection to EG. diff --git a/docs/source/users/index.rst b/docs/source/users/index.rst index 2ec672e5b..394adc212 100644 --- a/docs/source/users/index.rst +++ b/docs/source/users/index.rst @@ -24,4 +24,3 @@ The following assumes an Enterprise Gateway server has been configured and deplo kernel-envs .. other clients (nbclient, papermill) - diff --git a/docs/source/users/installation.md b/docs/source/users/installation.md index 916ac93aa..da19c12b4 100644 --- a/docs/source/users/installation.md +++ b/docs/source/users/installation.md @@ -1,22 +1,29 @@ # Installing the client -In terms of Enterprise Gateway, the client application is typically Jupyter Server (hosting JupyterLab) or Jupyter Notebook. These applications are then configured to connect to Enterprise Gateway. + +In terms of Enterprise Gateway, the client application is typically Jupyter Server (hosting JupyterLab) or Jupyter Notebook. These applications are then configured to connect to Enterprise Gateway. To install Jupyter Server via `pip`: + ```bash pip install jupyter_server ``` + or via `conda`: + ```bash conda install -c conda-forge jupyter_server ``` Likewise, for Jupyter Notebook via `pip`: + ```bash pip install notebook ``` + or via `conda`: + ```bash conda install -c conda-forge notebook ``` -For additional information regarding the installation of [Jupyter Server](https://jupyter-server.readthedocs.io/en/latest/index.html) or [Jupyter Notebook](https://jupyter-notebook.readthedocs.io/en/latest/), please refer to their respective documentation (see embedded links). \ No newline at end of file +For additional information regarding the installation of [Jupyter Server](https://jupyter-server.readthedocs.io/en/latest/index.html) or [Jupyter Notebook](https://jupyter-notebook.readthedocs.io/en/latest/), please refer to their respective documentation (see embedded links). diff --git a/docs/source/users/kernel-envs.md b/docs/source/users/kernel-envs.md index c33314cc2..fa9686c45 100644 --- a/docs/source/users/kernel-envs.md +++ b/docs/source/users/kernel-envs.md @@ -1,7 +1,9 @@ # Kernel Environment Variables -The Enterprise Gateway client software will also include _any_ environment variables prefixed with `KERNEL_` in the start kernel request sent to the Enterprise Gateway Server. This enables the ability to _statically parameterize_ aspects of kernel start requests relative to other clients using the same Enterprise Gateway instance. -There are several supported `KERNEL_` variables that the Enterprise Gateway server looks for and uses, but others can be sent to customize behaviors. The following kernel-specific environment variables are used by Enterprise Gateway. As mentioned above, all `KERNEL_` variables submitted in the kernel startup request's json body will be available to the kernel for its launch. +The Enterprise Gateway client software will also include _any_ environment variables prefixed with `KERNEL_` in the start kernel request sent to the Enterprise Gateway Server. This enables the ability to _statically parameterize_ aspects of kernel start requests relative to other clients using the same Enterprise Gateway instance. + +There are several supported `KERNEL_` variables that the Enterprise Gateway server looks for and uses, but others can be sent to customize behaviors. The following kernel-specific environment variables are used by Enterprise Gateway. As mentioned above, all `KERNEL_` variables submitted in the kernel startup request's json body will be available to the kernel for its launch. + ```text KERNEL_GID= or 100 Containers only. This value represents the group id in which the container will run. diff --git a/enterprise_gateway/__init__.py b/enterprise_gateway/__init__.py index 76586385c..e74da0c80 100644 --- a/enterprise_gateway/__init__.py +++ b/enterprise_gateway/__init__.py @@ -3,6 +3,9 @@ from ._version import __version__ """Lazy-loading entrypoint for the enterprise gateway package.""" + + def launch_instance(*args, **kwargs): from enterprise_gateway.enterprisegatewayapp import launch_instance + launch_instance(*args, **kwargs) diff --git a/enterprise_gateway/__main__.py b/enterprise_gateway/__main__.py index 9c41038f1..194488363 100644 --- a/enterprise_gateway/__main__.py +++ b/enterprise_gateway/__main__.py @@ -1,8 +1,8 @@ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """CLI entrypoint for the enterprise gateway package.""" -from __future__ import absolute_import -if __name__ == '__main__': +if __name__ == "__main__": import enterprise_gateway.enterprisegatewayapp as app + app.launch_instance() diff --git a/enterprise_gateway/_version.py b/enterprise_gateway/_version.py index d54eb6682..02b90e088 100644 --- a/enterprise_gateway/_version.py +++ b/enterprise_gateway/_version.py @@ -3,4 +3,4 @@ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. -__version__ = '3.0.0.dev0' +__version__ = "3.0.0.dev0" diff --git a/enterprise_gateway/base/handlers.py b/enterprise_gateway/base/handlers.py index f011df4e6..ec80d6295 100644 --- a/enterprise_gateway/base/handlers.py +++ b/enterprise_gateway/base/handlers.py @@ -3,26 +3,28 @@ """Tornado handlers for the base of the API.""" import json + import jupyter_server._version from jupyter_server.base.handlers import APIHandler from tornado import web -from ..mixins import TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin + from .._version import __version__ +from ..mixins import CORSMixin, JSONErrorsMixin, TokenAuthorizationMixin -class APIVersionHandler(TokenAuthorizationMixin, - CORSMixin, - JSONErrorsMixin, - APIHandler): - """" +class APIVersionHandler(TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, APIHandler): + """ " Extends the jupyter_server base API handler with token auth, CORS, and JSON errors to produce version information for jupyter_server and gateway. """ + def get(self): # not authenticated, so give as few info as possible # to be backwards compatibile, use only 'version' for the jupyter_server version # and be more specific for gateway_version - self.finish(json.dumps({"version": jupyter_server.__version__, "gateway_version": __version__})) + self.finish( + json.dumps({"version": jupyter_server.__version__, "gateway_version": __version__}) + ) class NotFoundHandler(JSONErrorsMixin, web.RequestHandler): @@ -36,11 +38,9 @@ class NotFoundHandler(JSONErrorsMixin, web.RequestHandler): tornado.web.HTTPError Always 404 Not Found """ + def prepare(self): raise web.HTTPError(404) -default_handlers = [ - (r'/api', APIVersionHandler), - (r'/(.*)', NotFoundHandler) -] +default_handlers = [(r"/api", APIVersionHandler), (r"/(.*)", NotFoundHandler)] diff --git a/enterprise_gateway/client/gateway_client.py b/enterprise_gateway/client/gateway_client.py index c29478a44..b70bcb458 100644 --- a/enterprise_gateway/client/gateway_client.py +++ b/enterprise_gateway/client/gateway_client.py @@ -1,12 +1,12 @@ - -import os import logging -import requests +import os import time -from uuid import uuid4 -from tornado.escape import json_encode, json_decode, utf8 from threading import Thread +from uuid import uuid4 + +import requests import websocket +from tornado.escape import json_decode, json_encode, utf8 try: # prefer python 3, fallback to 2 import queue as queue @@ -16,10 +16,10 @@ REQUEST_TIMEOUT = int(os.getenv("REQUEST_TIMEOUT", 120)) log_level = os.getenv("LOG_LEVEL", "INFO") -logging.basicConfig(format='[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s') +logging.basicConfig(format="[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s") -class GatewayClient(object): +class GatewayClient: """ *** E X P E R I M E N T A L *** *** E X P E R I M E N T A L *** @@ -27,70 +27,87 @@ class GatewayClient(object): integration tests and can be leveraged for micro service type of connections. """ - DEFAULT_USERNAME = os.getenv('KERNEL_USERNAME', 'bob') - DEFAULT_GATEWAY_HOST = os.getenv('GATEWAY_HOST', 'localhost:8888') - KERNEL_LAUNCH_TIMEOUT = os.getenv('KERNEL_LAUNCH_TIMEOUT', '40') + + DEFAULT_USERNAME = os.getenv("KERNEL_USERNAME", "bob") + DEFAULT_GATEWAY_HOST = os.getenv("GATEWAY_HOST", "localhost:8888") + KERNEL_LAUNCH_TIMEOUT = os.getenv("KERNEL_LAUNCH_TIMEOUT", "40") def __init__(self, host=DEFAULT_GATEWAY_HOST): - self.http_api_endpoint = 'http://{}/api/kernels'.format(host) - self.ws_api_endpoint = 'ws://{}/api/kernels'.format(host) - self.log = logging.getLogger('GatewayClient') + self.http_api_endpoint = f"http://{host}/api/kernels" + self.ws_api_endpoint = f"ws://{host}/api/kernels" + self.log = logging.getLogger("GatewayClient") self.log.setLevel(log_level) def start_kernel(self, kernelspec_name, username=DEFAULT_USERNAME, timeout=REQUEST_TIMEOUT): - self.log.info('Starting a {} kernel ....'.format(kernelspec_name)) + self.log.info(f"Starting a {kernelspec_name} kernel ....") - json_data = {'name': kernelspec_name, - 'env': {'KERNEL_USERNAME': username, - 'KERNEL_LAUNCH_TIMEOUT': GatewayClient.KERNEL_LAUNCH_TIMEOUT}} + json_data = { + "name": kernelspec_name, + "env": { + "KERNEL_USERNAME": username, + "KERNEL_LAUNCH_TIMEOUT": GatewayClient.KERNEL_LAUNCH_TIMEOUT, + }, + } response = requests.post(self.http_api_endpoint, data=json_encode(json_data)) if response.status_code == 201: json_data = response.json() kernel_id = json_data.get("id") - self.log.info('Started kernel with id {}'.format(kernel_id)) + self.log.info(f"Started kernel with id {kernel_id}") else: - raise RuntimeError('Error starting kernel : {} response code \n {}'. - format(response.status_code, response.content)) - - return KernelClient(self.http_api_endpoint, self.ws_api_endpoint, kernel_id, timeout=timeout, logger=self.log) + raise RuntimeError( + "Error starting kernel : {} response code \n {}".format( + response.status_code, response.content + ) + ) + + return KernelClient( + self.http_api_endpoint, + self.ws_api_endpoint, + kernel_id, + timeout=timeout, + logger=self.log, + ) def shutdown_kernel(self, kernel): - self.log.info("Shutting down kernel : {} ....".format(kernel.kernel_id)) + self.log.info(f"Shutting down kernel : {kernel.kernel_id} ....") if not kernel: return False kernel.shutdown() - url = "{}/{}".format(self.http_api_endpoint, kernel.kernel_id) + url = f"{self.http_api_endpoint}/{kernel.kernel_id}" response = requests.delete(url) if response.status_code == 204: - self.log.debug('Kernel {} shutdown'.format(kernel.kernel_id)) + self.log.debug(f"Kernel {kernel.kernel_id} shutdown") return True else: - raise RuntimeError('Error shutting down kernel {}: {}'.format(kernel.kernel_id, response.content)) + raise RuntimeError(f"Error shutting down kernel {kernel.kernel_id}: {response.content}") -class KernelClient(object): +class KernelClient: - DEAD_MSG_ID = 'deadbeefdeadbeefdeadbeefdeadbeef' + DEAD_MSG_ID = "deadbeefdeadbeefdeadbeefdeadbeef" POST_IDLE_TIMEOUT = 0.5 DEFAULT_INTERRUPT_WAIT = 1 - def __init__(self, http_api_endpoint, ws_api_endpoint, kernel_id, timeout=REQUEST_TIMEOUT, logger=None): + def __init__( + self, http_api_endpoint, ws_api_endpoint, kernel_id, timeout=REQUEST_TIMEOUT, logger=None + ): self.shutting_down = False self.restarting = False self.http_api_endpoint = http_api_endpoint - self.kernel_http_api_endpoint = '{}/{}'.format(http_api_endpoint, kernel_id) + self.kernel_http_api_endpoint = f"{http_api_endpoint}/{kernel_id}" self.ws_api_endpoint = ws_api_endpoint - self.kernel_ws_api_endpoint = '{}/{}/channels'.format(ws_api_endpoint, kernel_id) + self.kernel_ws_api_endpoint = f"{ws_api_endpoint}/{kernel_id}/channels" self.kernel_id = kernel_id self.log = logger - self.log.debug('Initializing kernel client ({}) to {}'.format(kernel_id, self.kernel_ws_api_endpoint)) + self.log.debug(f"Initializing kernel client ({kernel_id}) to {self.kernel_ws_api_endpoint}") - self.kernel_socket = \ - websocket.create_connection(self.kernel_ws_api_endpoint, timeout=timeout, enable_multithread=True) + self.kernel_socket = websocket.create_connection( + self.kernel_ws_api_endpoint, timeout=timeout, enable_multithread=True + ) self.response_queues = {} @@ -129,52 +146,79 @@ def execute(self, code, timeout=REQUEST_TIMEOUT): while True: response_message = self._get_response(msg_id, timeout, post_idle) if response_message: - response_message_type = response_message['msg_type'] - - if response_message_type == 'error' or \ - (response_message_type == 'execute_reply' and - response_message['content']['status'] == 'error'): - response.append('{}:{}:{}'.format(response_message['content']['ename'], - response_message['content']['evalue'], - response_message['content']['traceback'])) - elif response_message_type == 'stream': - response.append(KernelClient._convert_raw_response(response_message['content']['text'])) - - elif response_message_type == 'execute_result' or response_message_type == 'display_data': - if 'text/plain' in response_message['content']['data']: + response_message_type = response_message["msg_type"] + + if response_message_type == "error" or ( + response_message_type == "execute_reply" + and response_message["content"]["status"] == "error" + ): + response.append( + "{}:{}:{}".format( + response_message["content"]["ename"], + response_message["content"]["evalue"], + response_message["content"]["traceback"], + ) + ) + elif response_message_type == "stream": + response.append( + KernelClient._convert_raw_response(response_message["content"]["text"]) + ) + + elif ( + response_message_type == "execute_result" + or response_message_type == "display_data" + ): + if "text/plain" in response_message["content"]["data"]: response.append( - KernelClient._convert_raw_response(response_message['content']['data']['text/plain'])) - elif 'text/html' in response_message['content']['data']: + KernelClient._convert_raw_response( + response_message["content"]["data"]["text/plain"] + ) + ) + elif "text/html" in response_message["content"]["data"]: response.append( - KernelClient._convert_raw_response(response_message['content']['data']['text/html'])) - elif response_message_type == 'status': - if response_message['content']['execution_state'] == 'idle': + KernelClient._convert_raw_response( + response_message["content"]["data"]["text/html"] + ) + ) + elif response_message_type == "status": + if response_message["content"]["execution_state"] == "idle": post_idle = True # indicate we're at the logical end and timeout poll for next message continue else: - self.log.debug("Unhandled response for msg_id: {} of msg_type: {}". - format(msg_id, response_message_type)) - - if response_message is None: # We timed out. If post idle, its ok, else make mention of it + self.log.debug( + "Unhandled response for msg_id: {} of msg_type: {}".format( + msg_id, response_message_type + ) + ) + + if ( + response_message is None + ): # We timed out. If post idle, its ok, else make mention of it if not post_idle: - self.log.warning("Unexpected timeout occurred for msg_id: {} - no 'idle' status received!". - format(msg_id)) + self.log.warning( + "Unexpected timeout occurred for msg_id: {} - no 'idle' status received!".format( + msg_id + ) + ) break except BaseException as b: self.log.debug(b) - return ''.join(response) + return "".join(response) def interrupt(self): url = "{}/{}".format(self.kernel_http_api_endpoint, "interrupt") response = requests.post(url) if response.status_code == 204: - self.log.debug('Kernel {} interrupted'.format(self.kernel_id)) + self.log.debug(f"Kernel {self.kernel_id} interrupted") return True else: - raise RuntimeError('Unexpected response interrupting kernel {}: {}'. - format(self.kernel_id, response.content)) + raise RuntimeError( + "Unexpected response interrupting kernel {}: {}".format( + self.kernel_id, response.content + ) + ) def restart(self, timeout=REQUEST_TIMEOUT): self.restarting = True @@ -183,25 +227,33 @@ def restart(self, timeout=REQUEST_TIMEOUT): url = "{}/{}".format(self.kernel_http_api_endpoint, "restart") response = requests.post(url) if response.status_code == 200: - self.log.debug('Kernel {} restarted'.format(self.kernel_id)) - self.kernel_socket = \ - websocket.create_connection(self.kernel_ws_api_endpoint, timeout=timeout, enable_multithread=True) + self.log.debug(f"Kernel {self.kernel_id} restarted") + self.kernel_socket = websocket.create_connection( + self.kernel_ws_api_endpoint, timeout=timeout, enable_multithread=True + ) self.restarting = False return True else: self.restarting = False - raise RuntimeError('Unexpected response restarting kernel {}: {}'.format(self.kernel_id, response.content)) + raise RuntimeError( + "Unexpected response restarting kernel {}: {}".format( + self.kernel_id, response.content + ) + ) def get_state(self): - url = "{}".format(self.kernel_http_api_endpoint) + url = f"{self.kernel_http_api_endpoint}" response = requests.get(url) if response.status_code == 200: json = response.json() - self.log.debug('Kernel {} state: {}'.format(self.kernel_id, json)) - return json['execution_state'] + self.log.debug(f"Kernel {self.kernel_id} state: {json}") + return json["execution_state"] else: - raise RuntimeError('Unexpected response retrieving state for kernel {}: {}'. - format(self.kernel_id, response.content)) + raise RuntimeError( + "Unexpected response retrieving state for kernel {}: {}".format( + self.kernel_id, response.content + ) + ) def start_interrupt_thread(self, wait_time=DEFAULT_INTERRUPT_WAIT): self.interrupt_thread = Thread(target=self.perform_interrupt, args=(wait_time,)) @@ -241,14 +293,19 @@ def _get_response(self, msg_id, timeout, post_idle): """ if post_idle and timeout > KernelClient.POST_IDLE_TIMEOUT: - timeout = KernelClient.POST_IDLE_TIMEOUT # overwrite timeout to small value following idle messages. + timeout = ( + KernelClient.POST_IDLE_TIMEOUT + ) # overwrite timeout to small value following idle messages. msg_queue = self.response_queues.get(msg_id) try: - self.log.debug("Getting response for msg_id: {} with timeout: {}".format(msg_id, timeout)) + self.log.debug(f"Getting response for msg_id: {msg_id} with timeout: {timeout}") response = msg_queue.get(timeout=timeout) - self.log.debug("Got response for msg_id: {}, msg_type: {}". - format(msg_id, response['msg_type'] if response else 'null')) + self.log.debug( + "Got response for msg_id: {}, msg_type: {}".format( + msg_id, response["msg_type"] if response else "null" + ) + ) except queue.Empty: response = None @@ -275,15 +332,20 @@ def _read_responses(self): self.response_queues[msg_id] = queue.Queue() # insert into queue - self.log.debug("Inserting response for msg_id: {}, msg_type: {}". - format(msg_id, response_message['msg_type'])) + self.log.debug( + "Inserting response for msg_id: {}, msg_type: {}".format( + msg_id, response_message["msg_type"] + ) + ) self.response_queues.get(msg_id).put_nowait(response_message) except BaseException as be1: - if self.restarting: # If restarting, wait until restart has completed - which includes new socket + if ( + self.restarting + ): # If restarting, wait until restart has completed - which includes new socket i = 1 while self.restarting: if i >= 10 and i % 2 == 0: - self.log.debug("Still restarting after {} secs...".format(i)) + self.log.debug(f"Still restarting after {i} secs...") time.sleep(1) i += 1 continue @@ -294,22 +356,22 @@ def _read_responses(self): except BaseException as be2: if not self.shutting_down: - self.log.warning('Unexpected exception encountered ({})'.format(be2)) + self.log.warning(f"Unexpected exception encountered ({be2})") - self.log.debug('Response reader thread exiting...') + self.log.debug("Response reader thread exiting...") @staticmethod def _get_msg_id(message, logger): msg_id = KernelClient.DEAD_MSG_ID if message: - if 'msg_id' in message['parent_header'] and message['parent_header']['msg_id']: - msg_id = message['parent_header']['msg_id'] - elif 'msg_id' in message: + if "msg_id" in message["parent_header"] and message["parent_header"]["msg_id"]: + msg_id = message["parent_header"]["msg_id"] + elif "msg_id" in message: # msg_id may not be in the parent_header, see if present in response # IPython kernel appears to do this after restarts with a 'starting' status - msg_id = message['msg_id'] + msg_id = message["msg_id"] else: # Dump the "dead" message... - logger.debug("+++++ Dumping dead message: {}".format(message)) + logger.debug(f"+++++ Dumping dead message: {message}") return msg_id @staticmethod @@ -323,23 +385,25 @@ def _convert_raw_response(raw_response_message): @staticmethod def __create_execute_request(msg_id, code): - return json_encode({ - 'header': { - 'username': '', - 'version': '5.0', - 'session': '', - 'msg_id': msg_id, - 'msg_type': 'execute_request' - }, - 'parent_header': {}, - 'channel': 'shell', - 'content': { - 'code': "".join(code), - 'silent': False, - 'store_history': False, - 'user_expressions': {}, - 'allow_stdin': False - }, - 'metadata': {}, - 'buffers': {} - }) + return json_encode( + { + "header": { + "username": "", + "version": "5.0", + "session": "", + "msg_id": msg_id, + "msg_type": "execute_request", + }, + "parent_header": {}, + "channel": "shell", + "content": { + "code": "".join(code), + "silent": False, + "store_history": False, + "user_expressions": {}, + "allow_stdin": False, + }, + "metadata": {}, + "buffers": {}, + } + ) diff --git a/enterprise_gateway/enterprisegatewayapp.py b/enterprise_gateway/enterprisegatewayapp.py index 715742f9c..c2f0568ad 100644 --- a/enterprise_gateway/enterprisegatewayapp.py +++ b/enterprise_gateway/enterprisegatewayapp.py @@ -15,44 +15,42 @@ import weakref from typing import Optional -from zmq.eventloop import ioloop -from tornado import httpserver -from tornado import web -from tornado.log import enable_pretty_logging - -from traitlets.config import Configurable -from jupyter_core.application import JupyterApp, base_aliases from jupyter_client.kernelspec import KernelSpecManager +from jupyter_core.application import JupyterApp, base_aliases from jupyter_server.serverapp import random_ports from jupyter_server.utils import url_path_join +from tornado import httpserver, web +from tornado.log import enable_pretty_logging +from traitlets.config import Configurable +from zmq.eventloop import ioloop from ._version import __version__ - from .base.handlers import default_handlers as default_base_handlers +from .mixins import EnterpriseGatewayConfigMixin from .services.api.handlers import default_handlers as default_api_handlers from .services.kernels.handlers import default_handlers as default_kernel_handlers -from .services.kernelspecs.handlers import default_handlers as default_kernelspec_handlers +from .services.kernels.remotemanager import RemoteMappingKernelManager +from .services.kernelspecs import KernelSpecCache +from .services.kernelspecs.handlers import ( + default_handlers as default_kernelspec_handlers, +) from .services.sessions.handlers import default_handlers as default_session_handlers - from .services.sessions.kernelsessionmanager import FileKernelSessionManager from .services.sessions.sessionmanager import SessionManager -from .services.kernels.remotemanager import RemoteMappingKernelManager -from .services.kernelspecs import KernelSpecCache - -from .mixins import EnterpriseGatewayConfigMixin - # Add additional command line aliases aliases = dict(base_aliases) -aliases.update({ - 'ip': 'EnterpriseGatewayApp.ip', - 'port': 'EnterpriseGatewayApp.port', - 'port_retries': 'EnterpriseGatewayApp.port_retries', - 'keyfile': 'EnterpriseGatewayApp.keyfile', - 'certfile': 'EnterpriseGatewayApp.certfile', - 'client-ca': 'EnterpriseGatewayApp.client_ca', - 'ssl_version': 'EnterpriseGatewayApp.ssl_version' -}) +aliases.update( + { + "ip": "EnterpriseGatewayApp.ip", + "port": "EnterpriseGatewayApp.port", + "port_retries": "EnterpriseGatewayApp.port_retries", + "keyfile": "EnterpriseGatewayApp.keyfile", + "certfile": "EnterpriseGatewayApp.certfile", + "client-ca": "EnterpriseGatewayApp.client_ca", + "ssl_version": "EnterpriseGatewayApp.ssl_version", + } +) class EnterpriseGatewayApp(EnterpriseGatewayConfigMixin, JupyterApp): @@ -65,7 +63,8 @@ class EnterpriseGatewayApp(EnterpriseGatewayConfigMixin, JupyterApp): - creates a Tornado HTTP server - starts the Tornado event loop """ - name = 'jupyter-enterprise-gateway' + + name = "jupyter-enterprise-gateway" version = __version__ description = """ Jupyter Enterprise Gateway @@ -88,7 +87,7 @@ def initialize(self, argv=None): argv Command line arguments """ - super(EnterpriseGatewayApp, self).initialize(argv) + super().initialize(argv) self.init_configurables() self.init_webapp() self.init_http_server() @@ -103,16 +102,14 @@ def init_configurables(self): # adopt whatever default the kernel manager wants to use. kwargs = {} if self.default_kernel_name: - kwargs['default_kernel_name'] = self.default_kernel_name + kwargs["default_kernel_name"] = self.default_kernel_name self.kernel_spec_manager = self.kernel_spec_manager_class( parent=self, ) self.kernel_spec_cache = self.kernel_spec_cache_class( - parent=self, - kernel_spec_manager=self.kernel_spec_manager, - **kwargs + parent=self, kernel_spec_manager=self.kernel_spec_manager, **kwargs ) self.kernel_manager = self.kernel_manager_class( @@ -120,20 +117,17 @@ def init_configurables(self): log=self.log, connection_dir=self.runtime_dir, kernel_spec_manager=self.kernel_spec_manager, - **kwargs + **kwargs, ) - self.session_manager = SessionManager( - log=self.log, - kernel_manager=self.kernel_manager - ) + self.session_manager = SessionManager(log=self.log, kernel_manager=self.kernel_manager) self.kernel_session_manager = self.kernel_session_manager_class( parent=self, log=self.log, kernel_manager=self.kernel_manager, config=self.config, # required to get command-line options visible - **kwargs + **kwargs, ) # Attempt to start persisted sessions @@ -152,14 +146,14 @@ def _create_request_handlers(self): # append tuples for the standard kernel gateway endpoints for handler in ( - default_api_handlers + - default_kernel_handlers + - default_kernelspec_handlers + - default_session_handlers + - default_base_handlers + default_api_handlers + + default_kernel_handlers + + default_kernelspec_handlers + + default_session_handlers + + default_base_handlers ): # Create a new handler pattern rooted at the base_url - pattern = url_path_join('/', self.base_url, handler[0]) + pattern = url_path_join("/", self.base_url, handler[0]) # Some handlers take args, so retain those in addition to the # handler class ref new_handler = tuple([pattern] + list(handler[1:])) @@ -178,7 +172,7 @@ def wrapped_prepare(self): try: ssl.match_hostname(ssl_cert, authorized_hostname) except ssl.SSLCertVerificationError: - raise web.HTTPError(403, 'Forbidden') + raise web.HTTPError(403, "Forbidden") base_prepare(self) handler[1].prepare = wrapped_prepare @@ -227,12 +221,11 @@ def init_webapp(self): allow_remote_access=True, # setting ws_ping_interval value that can allow it to be modified for the purpose of toggling ping mechanism # for zmq web-sockets or increasing/decreasing web socket ping interval/timeouts. - ws_ping_interval=self.ws_ping_interval * 1000 + ws_ping_interval=self.ws_ping_interval * 1000, ) def _build_ssl_options(self) -> Optional[ssl.SSLContext]: - """Build an SSLContext for the tornado HTTP server. - """ + """Build an SSLContext for the tornado HTTP server.""" if not any((self.certfile, self.keyfile, self.client_ca)): # None indicates no SSL config return None @@ -254,18 +247,18 @@ def init_http_server(self): the same logic as the Jupyer Notebook server. """ ssl_options = self._build_ssl_options() - self.http_server = httpserver.HTTPServer(self.web_app, - xheaders=self.trust_xheaders, - ssl_options=ssl_options) + self.http_server = httpserver.HTTPServer( + self.web_app, xheaders=self.trust_xheaders, ssl_options=ssl_options + ) for port in random_ports(self.port, self.port_retries + 1): try: self.http_server.listen(port, self.ip) - except socket.error as e: + except OSError as e: if e.errno == errno.EADDRINUSE: - self.log.info('The port %i is already in use, trying another port.' % port) + self.log.info("The port %i is already in use, trying another port." % port) continue - elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)): + elif e.errno in (errno.EACCES, getattr(errno, "WSAEACCES", errno.EACCES)): self.log.warning("Permission to listen on port %i denied" % port) continue else: @@ -274,29 +267,36 @@ def init_http_server(self): self.port = port break else: - self.log.critical('ERROR: the gateway server could not be started because ' - 'no available port could be found.') + self.log.critical( + "ERROR: the gateway server could not be started because " + "no available port could be found." + ) self.exit(1) def start(self): - """Starts an IO loop for the application. """ + """Starts an IO loop for the application.""" - super(EnterpriseGatewayApp, self).start() + super().start() - self.log.info('Jupyter Enterprise Gateway {} is available at http{}://{}:{}'.format( - EnterpriseGatewayApp.version, 's' if self.keyfile else '', self.ip, self.port - )) + self.log.info( + "Jupyter Enterprise Gateway {} is available at http{}://{}:{}".format( + EnterpriseGatewayApp.version, "s" if self.keyfile else "", self.ip, self.port + ) + ) # If impersonation is enabled, issue a warning message if the gateway user is not in unauthorized_users. if self.impersonation_enabled: gateway_user = getpass.getuser() if gateway_user.lower() not in self.unauthorized_users: - self.log.warning("Impersonation is enabled and gateway user '{}' is NOT specified in the set of " - "unauthorized users! Kernels may execute as that user with elevated privileges.". - format(gateway_user)) + self.log.warning( + "Impersonation is enabled and gateway user '{}' is NOT specified in the set of " + "unauthorized users! Kernels may execute as that user with elevated privileges.".format( + gateway_user + ) + ) self.io_loop = ioloop.IOLoop.current() - if sys.platform != 'win32': + if sys.platform != "win32": signal.signal(signal.SIGHUP, signal.SIG_IGN) signal.signal(signal.SIGTERM, self._signal_stop) @@ -314,7 +314,9 @@ def shutdown(self): """Shuts down all running kernels.""" kids = self.kernel_manager.list_kernel_ids() for kid in kids: - asyncio.get_event_loop().run_until_complete(self.kernel_manager.shutdown_kernel(kid, now=True)) + asyncio.get_event_loop().run_until_complete( + self.kernel_manager.shutdown_kernel(kid, now=True) + ) def stop(self): """ @@ -346,7 +348,7 @@ def update_dynamic_configurables(self): for file in self.loaded_config_files: mod_time = int(os.path.getmtime(file)) if mod_time > self._last_config_update: - self.log.debug("Config file was updated: {}!".format(file)) + self.log.debug(f"Config file was updated: {file}!") self._last_config_update = mod_time updated = True @@ -364,8 +366,10 @@ def update_dynamic_configurables(self): configurable.update_config(self.config) configs.append(config_name) - self.log.info("Configuration file changes detected. Instances for the following " - "configurables have been updated: {}".format(configs)) + self.log.info( + "Configuration file changes detected. Instances for the following " + "configurables have been updated: {}".format(configs) + ) return updated def add_dynamic_configurable(self, config_name, configurable): @@ -376,7 +380,7 @@ def add_dynamic_configurable(self, config_name, configurable): :param configurable: the configurable instance corresponding to that config """ if not isinstance(configurable, Configurable): - raise RuntimeError("'{}' is not a subclass of Configurable!".format(configurable)) + raise RuntimeError(f"'{configurable}' is not a subclass of Configurable!") self._dynamic_configurables[config_name] = weakref.proxy(configurable) @@ -388,23 +392,29 @@ def init_dynamic_configs(self): :return: """ if self.dynamic_config_interval > 0: - self.add_dynamic_configurable('EnterpriseGatewayApp', self) - self.add_dynamic_configurable('MappingKernelManager', self.kernel_manager) - self.add_dynamic_configurable('KernelSpecManager', self.kernel_spec_manager) - self.add_dynamic_configurable('KernelSessionManager', self.kernel_session_manager) - - self.log.info("Dynamic updates have been configured. Checking every {} seconds.". - format(self.dynamic_config_interval)) - - self.log.info("The following configuration options will not be subject to dynamic updates " - "(configured via CLI):") + self.add_dynamic_configurable("EnterpriseGatewayApp", self) + self.add_dynamic_configurable("MappingKernelManager", self.kernel_manager) + self.add_dynamic_configurable("KernelSpecManager", self.kernel_spec_manager) + self.add_dynamic_configurable("KernelSessionManager", self.kernel_session_manager) + + self.log.info( + "Dynamic updates have been configured. Checking every {} seconds.".format( + self.dynamic_config_interval + ) + ) + + self.log.info( + "The following configuration options will not be subject to dynamic updates " + "(configured via CLI):" + ) for config, options in self.cli_config.items(): for option, value in options.items(): - self.log.info(" '{}.{}': '{}'".format(config, option, value)) + self.log.info(f" '{config}.{option}': '{value}'") if self.dynamic_config_poller is None: - self.dynamic_config_poller = ioloop.PeriodicCallback(self.update_dynamic_configurables, - self.dynamic_config_interval * 1000) + self.dynamic_config_poller = ioloop.PeriodicCallback( + self.update_dynamic_configurables, self.dynamic_config_interval * 1000 + ) self.dynamic_config_poller.start() diff --git a/enterprise_gateway/itests/__init__.py b/enterprise_gateway/itests/__init__.py index 45de8cc1d..015dfb82c 100644 --- a/enterprise_gateway/itests/__init__.py +++ b/enterprise_gateway/itests/__init__.py @@ -2,6 +2,7 @@ # Distributed under the terms of the Modified BSD License. from tornado import ioloop + def teardown(): """The test fixture appears to leak something on certain platforms that endlessly tries an async socket connect and fails after the tests end. diff --git a/enterprise_gateway/itests/kernels/authorization_test/kernel.json b/enterprise_gateway/itests/kernels/authorization_test/kernel.json index e8f025d06..27af6b680 100644 --- a/enterprise_gateway/itests/kernels/authorization_test/kernel.json +++ b/enterprise_gateway/itests/kernels/authorization_test/kernel.json @@ -10,13 +10,6 @@ } } }, - "env": { - }, - "argv": [ - "python", - "-m", - "ipykernel_launcher", - "-f", - "{connection_file}" - ] + "env": {}, + "argv": ["python", "-m", "ipykernel_launcher", "-f", "{connection_file}"] } diff --git a/enterprise_gateway/itests/test_authorization.py b/enterprise_gateway/itests/test_authorization.py index dc4506384..aa302c692 100644 --- a/enterprise_gateway/itests/test_authorization.py +++ b/enterprise_gateway/itests/test_authorization.py @@ -1,5 +1,6 @@ -import unittest import os +import unittest + from enterprise_gateway.client.gateway_client import GatewayClient @@ -8,8 +9,8 @@ class TestAuthorization(unittest.TestCase): @classmethod def setUpClass(cls): - super(TestAuthorization, cls).setUpClass() - print('>>>') + super().setUpClass() + print(">>>") # initialize environment cls.gateway_client = GatewayClient() @@ -23,7 +24,7 @@ def tearDown(self): def test_authorized_users(self): kernel = None try: - kernel = self.gateway_client.start_kernel(TestAuthorization.KERNELSPEC, username='bob') + kernel = self.gateway_client.start_kernel(TestAuthorization.KERNELSPEC, username="bob") result = kernel.execute("print('The cow jumped over the moon.')") self.assertEqual(result, "The cow jumped over the moon.\n") finally: @@ -33,14 +34,16 @@ def test_authorized_users(self): def test_unauthorized_users(self): kernel = None try: - kernel = self.gateway_client.start_kernel(TestAuthorization.KERNELSPEC, username='bad_guy') + kernel = self.gateway_client.start_kernel( + TestAuthorization.KERNELSPEC, username="bad_guy" + ) self.assertTrue(False, msg="Unauthorization exception expected!") except Exception as be: - self.assertRegexpMatches(be.args[0], "403") + self.assertRegex(be.args[0], "403") finally: if kernel: self.gateway_client.shutdown_kernel(kernel) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/enterprise_gateway/itests/test_base.py b/enterprise_gateway/itests/test_base.py index b6808d581..5f5e25b67 100644 --- a/enterprise_gateway/itests/test_base.py +++ b/enterprise_gateway/itests/test_base.py @@ -1,14 +1,15 @@ import os expected_hostname = os.getenv("ITEST_HOSTNAME_PREFIX", "") + "*" # use ${KERNEL_USERNAME} on k8s -expected_application_id = os.getenv("EXPECTED_APPLICATION_ID", "application_*") # use 'spark-application-*' on k8s +expected_application_id = os.getenv( + "EXPECTED_APPLICATION_ID", "application_*" +) # use 'spark-application-*' on k8s expected_spark_version = os.getenv("EXPECTED_SPARK_VERSION", "2.4.*") # use '2.4.*' on k8s expected_spark_master = os.getenv("EXPECTED_SPARK_MASTER", "yarn") # use 'k8s:*' on k8s expected_deploy_mode = os.getenv("EXPECTED_DEPLOY_MODE", "(cluster|client)") # use 'client' on k8s -class TestBase(object): - +class TestBase: def get_expected_application_id(self): return expected_application_id diff --git a/enterprise_gateway/itests/test_python_kernel.py b/enterprise_gateway/itests/test_python_kernel.py index 1e814c486..6e7c103a0 100644 --- a/enterprise_gateway/itests/test_python_kernel.py +++ b/enterprise_gateway/itests/test_python_kernel.py @@ -1,8 +1,10 @@ -import unittest import os -from .test_base import TestBase +import unittest + from enterprise_gateway.client.gateway_client import GatewayClient +from .test_base import TestBase + class PythonKernelBaseTestCase(TestBase): """ @@ -15,7 +17,7 @@ def test_get_hostname(self): def test_hello_world(self): result = self.kernel.execute("print('Hello World')") - self.assertRegex(result, 'Hello World') + self.assertRegex(result, "Hello World") def test_restart(self): @@ -30,7 +32,7 @@ def test_restart(self): self.assertTrue(self.kernel.restart()) error_result = self.kernel.execute("y = x + 1") - self.assertRegex(error_result, 'NameError') + self.assertRegex(error_result, "NameError") def test_interrupt(self): @@ -58,7 +60,7 @@ def test_interrupt(self): interrupted_result = self.kernel.execute(interrupted_code) # Ensure the result indicates an interrupt occurred - self.assertRegex(interrupted_result, 'KeyboardInterrupt') + self.assertRegex(interrupted_result, "KeyboardInterrupt") # Wait for thread to terminate - should be terminated already self.kernel.terminate_interrupt_thread() @@ -115,9 +117,9 @@ def test_run_pi_example(self): pi_code.append(" y = random() * 2 - 1\n") pi_code.append(" return 1 if x ** 2 + y ** 2 <= 1 else 0\n") pi_code.append("count = sc.parallelize(range(1, n + 1), partitions).map(f).reduce(add)\n") - pi_code.append("print(\"Pi is roughly %f\" % (4.0 * count / n))\n") + pi_code.append('print("Pi is roughly %f" % (4.0 * count / n))\n') result = self.kernel.execute(pi_code) - self.assertRegex(result, 'Pi is roughly 3.14*') + self.assertRegex(result, "Pi is roughly 3.14*") class TestPythonKernelLocal(unittest.TestCase, PythonKernelBaseTestCase): @@ -125,8 +127,8 @@ class TestPythonKernelLocal(unittest.TestCase, PythonKernelBaseTestCase): @classmethod def setUpClass(cls): - super(TestPythonKernelLocal, cls).setUpClass() - print('\nStarting Python kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().setUpClass() + print(f"\nStarting Python kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() @@ -134,20 +136,22 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - super(TestPythonKernelLocal, cls).tearDownClass() - print('\nShutting down Python kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().tearDownClass() + print(f"\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestPythonKernelDistributed(unittest.TestCase, PythonKernelBaseTestCase): - KERNELSPEC = os.getenv("PYTHON_KERNEL_DISTRIBUTED_NAME", "python_distributed") # python_kubernetes for k8s + KERNELSPEC = os.getenv( + "PYTHON_KERNEL_DISTRIBUTED_NAME", "python_distributed" + ) # python_kubernetes for k8s @classmethod def setUpClass(cls): - super(TestPythonKernelDistributed, cls).setUpClass() - print('\nStarting Python kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().setUpClass() + print(f"\nStarting Python kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() @@ -155,20 +159,22 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - super(TestPythonKernelDistributed, cls).tearDownClass() - print('\nShutting down Python kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().tearDownClass() + print(f"\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestPythonKernelClient(unittest.TestCase, PythonKernelBaseSparkTestCase): - KERNELSPEC = os.getenv("PYTHON_KERNEL_CLIENT_NAME", "spark_python_yarn_client") # spark_python_kubernetes for k8s + KERNELSPEC = os.getenv( + "PYTHON_KERNEL_CLIENT_NAME", "spark_python_yarn_client" + ) # spark_python_kubernetes for k8s @classmethod def setUpClass(cls): - super(TestPythonKernelClient, cls).setUpClass() - print('\nStarting Python kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().setUpClass() + print(f"\nStarting Python kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() @@ -176,20 +182,22 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - super(TestPythonKernelClient, cls).tearDownClass() - print('\nShutting down Python kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().tearDownClass() + print(f"\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestPythonKernelCluster(unittest.TestCase, PythonKernelBaseSparkTestCase): - KERNELSPEC = os.getenv("PYTHON_KERNEL_CLUSTER_NAME", "spark_python_yarn_cluster") # spark_python_kubernetes for k8s + KERNELSPEC = os.getenv( + "PYTHON_KERNEL_CLUSTER_NAME", "spark_python_yarn_cluster" + ) # spark_python_kubernetes for k8s @classmethod def setUpClass(cls): - super(TestPythonKernelCluster, cls).setUpClass() - print('\nStarting Python kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().setUpClass() + print(f"\nStarting Python kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() @@ -197,12 +205,12 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - super(TestPythonKernelCluster, cls).tearDownClass() - print('\nShutting down Python kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().tearDownClass() + print(f"\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/enterprise_gateway/itests/test_r_kernel.py b/enterprise_gateway/itests/test_r_kernel.py index 6da47db61..04822231e 100644 --- a/enterprise_gateway/itests/test_r_kernel.py +++ b/enterprise_gateway/itests/test_r_kernel.py @@ -1,8 +1,10 @@ -import unittest import os -from .test_base import TestBase +import unittest + from enterprise_gateway.client.gateway_client import GatewayClient +from .test_base import TestBase + class RKernelBaseTestCase(TestBase): """ @@ -15,7 +17,7 @@ def test_get_hostname(self): def test_hello_world(self): result = self.kernel.execute('print("Hello World", quote = FALSE)') - self.assertRegex(result, 'Hello World') + self.assertRegex(result, "Hello World") def test_restart(self): @@ -24,13 +26,15 @@ def test_restart(self): # 3. Attempt to increment the variable, verify an error was received (due to undefined variable) self.kernel.execute("x = 123") - original_value = int(self.kernel.execute("write(x,stdout())")) # This will only return the value. + original_value = int( + self.kernel.execute("write(x,stdout())") + ) # This will only return the value. self.assertEqual(original_value, 123) self.assertTrue(self.kernel.restart()) error_result = self.kernel.execute("y = x + 1") - self.assertRegex(error_result, 'Error in eval') + self.assertRegex(error_result, "Error in eval") def test_interrupt(self): @@ -41,7 +45,9 @@ def test_interrupt(self): # 5. Attempt to increment the variable, verify expected result. self.kernel.execute("x = 123") - original_value = int(self.kernel.execute("write(x,stdout())")) # This will only return the value. + original_value = int( + self.kernel.execute("write(x,stdout())") + ) # This will only return the value. self.assertEqual(original_value, 123) # Start a thread that performs the interrupt. This thread must wait long enough to issue @@ -56,14 +62,16 @@ def test_interrupt(self): interrupted_result = self.kernel.execute(interrupted_code) # Ensure the result indicates an interrupt occurred - self.assertEqual(interrupted_result.strip(), 'begin') + self.assertEqual(interrupted_result.strip(), "begin") # Wait for thread to terminate - should be terminated already self.kernel.terminate_interrupt_thread() # Increment the pre-interrupt variable and ensure its value is correct self.kernel.execute("y = x + 1") - interrupted_value = int(self.kernel.execute("write(y,stdout())")) # This will only return the value. + interrupted_value = int( + self.kernel.execute("write(y,stdout())") + ) # This will only return the value. self.assertEqual(interrupted_value, 124) @@ -73,7 +81,9 @@ class RKernelBaseSparkTestCase(RKernelBaseTestCase): """ def test_get_application_id(self): - result = self.kernel.execute('SparkR:::callJMethod(SparkR:::callJMethod(sc, "sc"), "applicationId")') + result = self.kernel.execute( + 'SparkR:::callJMethod(SparkR:::callJMethod(sc, "sc"), "applicationId")' + ) self.assertRegex(result, self.get_expected_application_id()) def test_get_spark_version(self): @@ -94,8 +104,8 @@ class TestRKernelLocal(unittest.TestCase, RKernelBaseTestCase): @classmethod def setUpClass(cls): - super(TestRKernelLocal, cls).setUpClass() - print('\nStarting R kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().setUpClass() + print(f"\nStarting R kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() @@ -103,19 +113,21 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - super(TestRKernelLocal, cls).tearDownClass() - print('\nShutting down R kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().tearDownClass() + print(f"\nShutting down R kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestRKernelClient(unittest.TestCase, RKernelBaseSparkTestCase): - KERNELSPEC = os.getenv("R_KERNEL_CLIENT_NAME", "spark_R_yarn_client") # spark_R_kubernetes for k8s + KERNELSPEC = os.getenv( + "R_KERNEL_CLIENT_NAME", "spark_R_yarn_client" + ) # spark_R_kubernetes for k8s @classmethod def setUpClass(cls): - print('\nStarting R kernel using {} kernelspec'.format(cls.KERNELSPEC)) + print(f"\nStarting R kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() @@ -123,20 +135,22 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - super(TestRKernelClient, cls).tearDownClass() - print('\nShutting down R kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().tearDownClass() + print(f"\nShutting down R kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestRKernelCluster(unittest.TestCase, RKernelBaseSparkTestCase): - KERNELSPEC = os.getenv("R_KERNEL_CLUSTER_NAME", "spark_R_yarn_cluster") # spark_R_kubernetes for k8s + KERNELSPEC = os.getenv( + "R_KERNEL_CLUSTER_NAME", "spark_R_yarn_cluster" + ) # spark_R_kubernetes for k8s @classmethod def setUpClass(cls): - super(TestRKernelCluster, cls).setUpClass() - print('\nStarting R kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().setUpClass() + print(f"\nStarting R kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() @@ -144,12 +158,12 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - super(TestRKernelCluster, cls).tearDownClass() - print('\nShutting down Python kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().tearDownClass() + print(f"\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/enterprise_gateway/itests/test_scala_kernel.py b/enterprise_gateway/itests/test_scala_kernel.py index 022b6bc8d..795ba4a8e 100644 --- a/enterprise_gateway/itests/test_scala_kernel.py +++ b/enterprise_gateway/itests/test_scala_kernel.py @@ -1,8 +1,10 @@ -import unittest import os -from .test_base import TestBase +import unittest + from enterprise_gateway.client.gateway_client import GatewayClient +from .test_base import TestBase + class ScalaKernelBaseTestCase(TestBase): """ @@ -10,14 +12,16 @@ class ScalaKernelBaseTestCase(TestBase): """ def test_get_hostname(self): - result = self.kernel.execute('import java.net._; \ + result = self.kernel.execute( + "import java.net._; \ val localhost: InetAddress = InetAddress.getLocalHost; \ - val localIpAddress: String = localhost.getHostName') + val localIpAddress: String = localhost.getHostName" + ) self.assertRegex(result, self.get_expected_hostname()) def test_hello_world(self): result = self.kernel.execute('println("Hello World")') - self.assertRegex(result, 'Hello World') + self.assertRegex(result, "Hello World") def test_restart(self): @@ -32,7 +36,7 @@ def test_restart(self): self.assertTrue(self.kernel.restart()) error_result = self.kernel.execute("var y = x + 1") - self.assertRegex(error_result, 'Compile Error') + self.assertRegex(error_result, "Compile Error") def test_interrupt(self): @@ -58,7 +62,7 @@ def test_interrupt(self): interrupted_result = self.kernel.execute(interrupted_code) # Ensure the result indicates an interrupt occurred - self.assertRegex(interrupted_result, 'java.lang.InterruptedException') + self.assertRegex(interrupted_result, "java.lang.InterruptedException") # Wait for thread to terminate - should be terminated already self.kernel.terminate_interrupt_thread() @@ -75,7 +79,7 @@ class ScalaKernelBaseSparkTestCase(ScalaKernelBaseTestCase): """ def test_get_application_id(self): - result = self.kernel.execute('sc.applicationId') + result = self.kernel.execute("sc.applicationId") self.assertRegex(result, self.get_expected_application_id()) def test_get_spark_version(self): @@ -93,13 +97,15 @@ def test_get_deploy_mode(self): class TestScalaKernelLocal(unittest.TestCase, ScalaKernelBaseTestCase): SPARK_VERSION = os.getenv("SPARK_VERSION") - DEFAULT_KERNELSPEC = "spark_{}_scala".format(SPARK_VERSION) - KERNELSPEC = os.getenv("SCALA_KERNEL_LOCAL_NAME", DEFAULT_KERNELSPEC) # scala_kubernetes for k8s + DEFAULT_KERNELSPEC = f"spark_{SPARK_VERSION}_scala" + KERNELSPEC = os.getenv( + "SCALA_KERNEL_LOCAL_NAME", DEFAULT_KERNELSPEC + ) # scala_kubernetes for k8s @classmethod def setUpClass(cls): - super(TestScalaKernelLocal, cls).setUpClass() - print('\nStarting Scala kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().setUpClass() + print(f"\nStarting Scala kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() @@ -107,20 +113,22 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - super(TestScalaKernelLocal, cls).tearDownClass() - print('\nShutting down Scala kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().tearDownClass() + print(f"\nShutting down Scala kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestScalaKernelClient(unittest.TestCase, ScalaKernelBaseSparkTestCase): - KERNELSPEC = os.getenv("SCALA_KERNEL_CLIENT_NAME", "spark_scala_yarn_client") # spark_scala_kubernetes for k8s + KERNELSPEC = os.getenv( + "SCALA_KERNEL_CLIENT_NAME", "spark_scala_yarn_client" + ) # spark_scala_kubernetes for k8s @classmethod def setUpClass(cls): - super(TestScalaKernelClient, cls).setUpClass() - print('\nStarting Scala kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().setUpClass() + print(f"\nStarting Scala kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() @@ -128,20 +136,22 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - super(TestScalaKernelClient, cls).tearDownClass() - print('\nShutting down Scala kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().tearDownClass() + print(f"\nShutting down Scala kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestScalaKernelCluster(unittest.TestCase, ScalaKernelBaseSparkTestCase): - KERNELSPEC = os.getenv("SCALA_KERNEL_CLUSTER_NAME", "spark_scala_yarn_cluster") # spark_scala_kubernetes for k8s + KERNELSPEC = os.getenv( + "SCALA_KERNEL_CLUSTER_NAME", "spark_scala_yarn_cluster" + ) # spark_scala_kubernetes for k8s @classmethod def setUpClass(cls): - super(TestScalaKernelCluster, cls).setUpClass() - print('\nStarting Scala kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().setUpClass() + print(f"\nStarting Scala kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() @@ -149,12 +159,12 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - super(TestScalaKernelCluster, cls).tearDownClass() - print('\nShutting down Python kernel using {} kernelspec'.format(cls.KERNELSPEC)) + super().tearDownClass() + print(f"\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/enterprise_gateway/mixins.py b/enterprise_gateway/mixins.py index d38708391..415d31d42 100644 --- a/enterprise_gateway/mixins.py +++ b/enterprise_gateway/mixins.py @@ -2,31 +2,42 @@ # Distributed under the terms of the Modified BSD License. """Mixins for Tornado handlers.""" -from distutils.util import strtobool -from http.client import responses import json import os import ssl import traceback +from distutils.util import strtobool +from http.client import responses from tornado import web from tornado.log import LogFormatter - -from traitlets import default, List, Set, Unicode, Type, Instance, Bool, CBool, Integer, observe +from traitlets import ( + Bool, + CBool, + Instance, + Integer, + List, + Set, + Type, + Unicode, + default, + observe, +) from traitlets.config import Configurable -class CORSMixin(object): +class CORSMixin: """ Mixes CORS headers into tornado.web.RequestHandlers. """ + SETTINGS_TO_HEADERS = { - 'eg_allow_credentials': 'Access-Control-Allow-Credentials', - 'eg_allow_headers': 'Access-Control-Allow-Headers', - 'eg_allow_methods': 'Access-Control-Allow-Methods', - 'eg_allow_origin': 'Access-Control-Allow-Origin', - 'eg_expose_headers': 'Access-Control-Expose-Headers', - 'eg_max_age': 'Access-Control-Max-Age' + "eg_allow_credentials": "Access-Control-Allow-Credentials", + "eg_allow_headers": "Access-Control-Allow-Headers", + "eg_allow_methods": "Access-Control-Allow-Methods", + "eg_allow_origin": "Access-Control-Allow-Origin", + "eg_expose_headers": "Access-Control-Expose-Headers", + "eg_max_age": "Access-Control-Max-Age", } def set_default_headers(self): @@ -36,7 +47,7 @@ def set_default_headers(self): Disables CSP configured by the notebook package. It's not necessary for a programmatic API. """ - super(CORSMixin, self).set_default_headers() + super().set_default_headers() # Add CORS headers after default if they have a non-blank value for settings_name, header_name in self.SETTINGS_TO_HEADERS.items(): header_value = self.settings.get(settings_name) @@ -44,7 +55,7 @@ def set_default_headers(self): self.set_header(header_name, header_value) # Don't set CSP: we're not serving frontend media types, only JSON - self.clear_header('Content-Security-Policy') + self.clear_header("Content-Security-Policy") def options(self): """ @@ -55,10 +66,11 @@ def options(self): self.finish() -class TokenAuthorizationMixin(object): +class TokenAuthorizationMixin: """Mixes token auth into tornado.web.RequestHandlers and tornado.websocket.WebsocketHandlers. """ + header_prefix = "token " header_prefix_len = len(header_prefix) @@ -76,24 +88,25 @@ def prepare(self): with the `@web.authenticated` decorated methods in the notebook package. """ - server_token = self.settings.get('eg_auth_token') - if server_token and not self.request.method == 'OPTIONS': - client_token = self.get_argument('token', None) + server_token = self.settings.get("eg_auth_token") + if server_token and not self.request.method == "OPTIONS": + client_token = self.get_argument("token", None) if client_token is None: - client_token = self.request.headers.get('Authorization') + client_token = self.request.headers.get("Authorization") if client_token and client_token.startswith(self.header_prefix): - client_token = client_token[self.header_prefix_len:] + client_token = client_token[self.header_prefix_len :] else: client_token = None if client_token != server_token: return self.send_error(401) - return super(TokenAuthorizationMixin, self).prepare() + return super().prepare() -class JSONErrorsMixin(object): +class JSONErrorsMixin: """Mixes `write_error` into tornado.web.RequestHandlers to respond with JSON format errors. """ + def write_error(self, status_code, **kwargs): """Responds with an application/json error object. @@ -113,400 +126,512 @@ def write_error(self, status_code, **kwargs): -------- {"401", reason="Unauthorized", message="Invalid auth token"} """ - exc_info = kwargs.get('exc_info') - message = '' - reason = responses.get(status_code, 'Unknown HTTP Error') + exc_info = kwargs.get("exc_info") + message = "" + reason = responses.get(status_code, "Unknown HTTP Error") reply = { - 'reason': reason, - 'message': message, + "reason": reason, + "message": message, } if exc_info: exception = exc_info[1] # Get the custom message, if defined if isinstance(exception, web.HTTPError): - reply['message'] = exception.log_message or message + reply["message"] = exception.log_message or message else: - reply['message'] = 'Unknown server error' - reply['traceback'] = ''.join(traceback.format_exception(*exc_info)) + reply["message"] = "Unknown server error" + reply["traceback"] = "".join(traceback.format_exception(*exc_info)) # Construct the custom reason, if defined - custom_reason = getattr(exception, 'reason', '') + custom_reason = getattr(exception, "reason", "") if custom_reason: - reply['reason'] = custom_reason + reply["reason"] = custom_reason - self.set_header('Content-Type', 'application/json') - self.set_status(status_code, reason=reply['reason']) + self.set_header("Content-Type", "application/json") + self.set_status(status_code, reason=reply["reason"]) self.finish(json.dumps(reply)) class EnterpriseGatewayConfigMixin(Configurable): # Server IP / PORT binding - port_env = 'EG_PORT' + port_env = "EG_PORT" port_default_value = 8888 - port = Integer(port_default_value, config=True, - help='Port on which to listen (EG_PORT env var)') + port = Integer( + port_default_value, config=True, help="Port on which to listen (EG_PORT env var)" + ) - @default('port') + @default("port") def port_default(self): - return int(os.getenv(self.port_env, os.getenv('KG_PORT', self.port_default_value))) + return int(os.getenv(self.port_env, os.getenv("KG_PORT", self.port_default_value))) - port_retries_env = 'EG_PORT_RETRIES' + port_retries_env = "EG_PORT_RETRIES" port_retries_default_value = 50 - port_retries = Integer(port_retries_default_value, config=True, - help="""Number of ports to try if the specified port is not available - (EG_PORT_RETRIES env var)""") + port_retries = Integer( + port_retries_default_value, + config=True, + help="""Number of ports to try if the specified port is not available + (EG_PORT_RETRIES env var)""", + ) - @default('port_retries') + @default("port_retries") def port_retries_default(self): - return int(os.getenv(self.port_retries_env, os.getenv('KG_PORT_RETRIES', self.port_retries_default_value))) - - ip_env = 'EG_IP' - ip_default_value = '127.0.0.1' - ip = Unicode(ip_default_value, config=True, - help='IP address on which to listen (EG_IP env var)') + return int( + os.getenv( + self.port_retries_env, os.getenv("KG_PORT_RETRIES", self.port_retries_default_value) + ) + ) + + ip_env = "EG_IP" + ip_default_value = "127.0.0.1" + ip = Unicode( + ip_default_value, config=True, help="IP address on which to listen (EG_IP env var)" + ) - @default('ip') + @default("ip") def ip_default(self): - return os.getenv(self.ip_env, os.getenv('KG_IP', self.ip_default_value)) + return os.getenv(self.ip_env, os.getenv("KG_IP", self.ip_default_value)) # Base URL - base_url_env = 'EG_BASE_URL' - base_url_default_value = '/' - base_url = Unicode(base_url_default_value, config=True, - help='The base path for mounting all API resources (EG_BASE_URL env var)') + base_url_env = "EG_BASE_URL" + base_url_default_value = "/" + base_url = Unicode( + base_url_default_value, + config=True, + help="The base path for mounting all API resources (EG_BASE_URL env var)", + ) - @default('base_url') + @default("base_url") def base_url_default(self): - return os.getenv(self.base_url_env, os.getenv('KG_BASE_URL', self.base_url_default_value)) + return os.getenv(self.base_url_env, os.getenv("KG_BASE_URL", self.base_url_default_value)) # Token authorization - auth_token_env = 'EG_AUTH_TOKEN' - auth_token = Unicode(config=True, - help='Authorization token required for all requests (EG_AUTH_TOKEN env var)') + auth_token_env = "EG_AUTH_TOKEN" + auth_token = Unicode( + config=True, help="Authorization token required for all requests (EG_AUTH_TOKEN env var)" + ) - @default('auth_token') + @default("auth_token") def _auth_token_default(self): - return os.getenv(self.auth_token_env, os.getenv('KG_AUTH_TOKEN', '')) + return os.getenv(self.auth_token_env, os.getenv("KG_AUTH_TOKEN", "")) # Begin CORS headers - allow_credentials_env = 'EG_ALLOW_CREDENTIALS' - allow_credentials = Unicode(config=True, - help='Sets the Access-Control-Allow-Credentials header. (EG_ALLOW_CREDENTIALS env var)') + allow_credentials_env = "EG_ALLOW_CREDENTIALS" + allow_credentials = Unicode( + config=True, + help="Sets the Access-Control-Allow-Credentials header. (EG_ALLOW_CREDENTIALS env var)", + ) - @default('allow_credentials') + @default("allow_credentials") def allow_credentials_default(self): - return os.getenv(self.allow_credentials_env, os.getenv('KG_ALLOW_CREDENTIALS', '')) + return os.getenv(self.allow_credentials_env, os.getenv("KG_ALLOW_CREDENTIALS", "")) - allow_headers_env = 'EG_ALLOW_HEADERS' - allow_headers = Unicode(config=True, - help='Sets the Access-Control-Allow-Headers header. (EG_ALLOW_HEADERS env var)') + allow_headers_env = "EG_ALLOW_HEADERS" + allow_headers = Unicode( + config=True, help="Sets the Access-Control-Allow-Headers header. (EG_ALLOW_HEADERS env var)" + ) - @default('allow_headers') + @default("allow_headers") def allow_headers_default(self): - return os.getenv(self.allow_headers_env, os.getenv('KG_ALLOW_HEADERS', '')) + return os.getenv(self.allow_headers_env, os.getenv("KG_ALLOW_HEADERS", "")) - allow_methods_env = 'EG_ALLOW_METHODS' - allow_methods = Unicode(config=True, - help='Sets the Access-Control-Allow-Methods header. (EG_ALLOW_METHODS env var)') + allow_methods_env = "EG_ALLOW_METHODS" + allow_methods = Unicode( + config=True, help="Sets the Access-Control-Allow-Methods header. (EG_ALLOW_METHODS env var)" + ) - @default('allow_methods') + @default("allow_methods") def allow_methods_default(self): - return os.getenv(self.allow_methods_env, os.getenv('KG_ALLOW_METHODS', '')) + return os.getenv(self.allow_methods_env, os.getenv("KG_ALLOW_METHODS", "")) - allow_origin_env = 'EG_ALLOW_ORIGIN' - allow_origin = Unicode(config=True, - help='Sets the Access-Control-Allow-Origin header. (EG_ALLOW_ORIGIN env var)') + allow_origin_env = "EG_ALLOW_ORIGIN" + allow_origin = Unicode( + config=True, help="Sets the Access-Control-Allow-Origin header. (EG_ALLOW_ORIGIN env var)" + ) - @default('allow_origin') + @default("allow_origin") def allow_origin_default(self): - return os.getenv(self.allow_origin_env, os.getenv('KG_ALLOW_ORIGIN', '')) + return os.getenv(self.allow_origin_env, os.getenv("KG_ALLOW_ORIGIN", "")) - expose_headers_env = 'EG_EXPOSE_HEADERS' - expose_headers = Unicode(config=True, - help='Sets the Access-Control-Expose-Headers header. (EG_EXPOSE_HEADERS env var)') + expose_headers_env = "EG_EXPOSE_HEADERS" + expose_headers = Unicode( + config=True, + help="Sets the Access-Control-Expose-Headers header. (EG_EXPOSE_HEADERS env var)", + ) - @default('expose_headers') + @default("expose_headers") def expose_headers_default(self): - return os.getenv(self.expose_headers_env, os.getenv('KG_EXPOSE_HEADERS', '')) + return os.getenv(self.expose_headers_env, os.getenv("KG_EXPOSE_HEADERS", "")) - trust_xheaders_env = 'EG_TRUST_XHEADERS' - trust_xheaders = CBool(False, config=True, - help="""Use x-* header values for overriding the remote-ip, useful when - application is behing a proxy. (EG_TRUST_XHEADERS env var)""") + trust_xheaders_env = "EG_TRUST_XHEADERS" + trust_xheaders = CBool( + False, + config=True, + help="""Use x-* header values for overriding the remote-ip, useful when + application is behing a proxy. (EG_TRUST_XHEADERS env var)""", + ) - @default('trust_xheaders') + @default("trust_xheaders") def trust_xheaders_default(self): - return strtobool(os.getenv(self.trust_xheaders_env, os.getenv('KG_TRUST_XHEADERS', 'False'))) + return strtobool( + os.getenv(self.trust_xheaders_env, os.getenv("KG_TRUST_XHEADERS", "False")) + ) - certfile_env = 'EG_CERTFILE' - certfile = Unicode(None, config=True, allow_none=True, - help='The full path to an SSL/TLS certificate file. (EG_CERTFILE env var)') + certfile_env = "EG_CERTFILE" + certfile = Unicode( + None, + config=True, + allow_none=True, + help="The full path to an SSL/TLS certificate file. (EG_CERTFILE env var)", + ) - @default('certfile') + @default("certfile") def certfile_default(self): - return os.getenv(self.certfile_env, os.getenv('KG_CERTFILE')) + return os.getenv(self.certfile_env, os.getenv("KG_CERTFILE")) - keyfile_env = 'EG_KEYFILE' - keyfile = Unicode(None, config=True, allow_none=True, - help='The full path to a private key file for usage with SSL/TLS. (EG_KEYFILE env var)') + keyfile_env = "EG_KEYFILE" + keyfile = Unicode( + None, + config=True, + allow_none=True, + help="The full path to a private key file for usage with SSL/TLS. (EG_KEYFILE env var)", + ) - @default('keyfile') + @default("keyfile") def keyfile_default(self): - return os.getenv(self.keyfile_env, os.getenv('KG_KEYFILE')) + return os.getenv(self.keyfile_env, os.getenv("KG_KEYFILE")) - client_ca_env = 'EG_CLIENT_CA' - client_ca = Unicode(None, config=True, allow_none=True, - help="""The full path to a certificate authority certificate for SSL/TLS - client authentication. (EG_CLIENT_CA env var)""") + client_ca_env = "EG_CLIENT_CA" + client_ca = Unicode( + None, + config=True, + allow_none=True, + help="""The full path to a certificate authority certificate for SSL/TLS + client authentication. (EG_CLIENT_CA env var)""", + ) - @default('client_ca') + @default("client_ca") def client_ca_default(self): - return os.getenv(self.client_ca_env, os.getenv('KG_CLIENT_CA')) + return os.getenv(self.client_ca_env, os.getenv("KG_CLIENT_CA")) - ssl_version_env = 'EG_SSL_VERSION' + ssl_version_env = "EG_SSL_VERSION" ssl_version_default_value = ssl.PROTOCOL_TLSv1_2 - ssl_version = Integer(None, config=True, allow_none=True, - help="""Sets the SSL version to use for the web socket - connection. (EG_SSL_VERSION env var)""") + ssl_version = Integer( + None, + config=True, + allow_none=True, + help="""Sets the SSL version to use for the web socket + connection. (EG_SSL_VERSION env var)""", + ) - @default('ssl_version') + @default("ssl_version") def ssl_version_default(self): - ssl_from_env = os.getenv(self.ssl_version_env, os.getenv('KG_SSL_VERSION')) + ssl_from_env = os.getenv(self.ssl_version_env, os.getenv("KG_SSL_VERSION")) return ssl_from_env if ssl_from_env is None else int(ssl_from_env) - max_age_env = 'EG_MAX_AGE' - max_age = Unicode(config=True, - help='Sets the Access-Control-Max-Age header. (EG_MAX_AGE env var)') + max_age_env = "EG_MAX_AGE" + max_age = Unicode( + config=True, help="Sets the Access-Control-Max-Age header. (EG_MAX_AGE env var)" + ) - @default('max_age') + @default("max_age") def max_age_default(self): - return os.getenv(self.max_age_env, os.getenv('KG_MAX_AGE', '')) + return os.getenv(self.max_age_env, os.getenv("KG_MAX_AGE", "")) + # End CORS headers - max_kernels_env = 'EG_MAX_KERNELS' - max_kernels = Integer(None, config=True, - allow_none=True, - help="""Limits the number of kernel instances allowed to run by this gateway. - Unbounded by default. (EG_MAX_KERNELS env var)""") + max_kernels_env = "EG_MAX_KERNELS" + max_kernels = Integer( + None, + config=True, + allow_none=True, + help="""Limits the number of kernel instances allowed to run by this gateway. + Unbounded by default. (EG_MAX_KERNELS env var)""", + ) - @default('max_kernels') + @default("max_kernels") def max_kernels_default(self): - val = os.getenv(self.max_kernels_env, os.getenv('KG_MAX_KERNELS')) + val = os.getenv(self.max_kernels_env, os.getenv("KG_MAX_KERNELS")) return val if val is None else int(val) - default_kernel_name_env = 'EG_DEFAULT_KERNEL_NAME' - default_kernel_name = Unicode(config=True, - help='Default kernel name when spawning a kernel (EG_DEFAULT_KERNEL_NAME env var)') + default_kernel_name_env = "EG_DEFAULT_KERNEL_NAME" + default_kernel_name = Unicode( + config=True, + help="Default kernel name when spawning a kernel (EG_DEFAULT_KERNEL_NAME env var)", + ) - @default('default_kernel_name') + @default("default_kernel_name") def default_kernel_name_default(self): # defaults to Jupyter's default kernel name on empty string - return os.getenv(self.default_kernel_name_env, os.getenv('KG_DEFAULT_KERNEL_NAME', '')) + return os.getenv(self.default_kernel_name_env, os.getenv("KG_DEFAULT_KERNEL_NAME", "")) - list_kernels_env = 'EG_LIST_KERNELS' - list_kernels = Bool(config=True, - help="""Permits listing of the running kernels using API endpoints /api/kernels + list_kernels_env = "EG_LIST_KERNELS" + list_kernels = Bool( + config=True, + help="""Permits listing of the running kernels using API endpoints /api/kernels and /api/sessions. (EG_LIST_KERNELS env var) Note: Jupyter Notebook - allows this by default but Jupyter Enterprise Gateway does not.""") + allows this by default but Jupyter Enterprise Gateway does not.""", + ) - @default('list_kernels') + @default("list_kernels") def list_kernels_default(self): - return os.getenv(self.list_kernels_env, os.getenv('KG_LIST_KERNELS', 'False')).lower() == 'true' + return ( + os.getenv(self.list_kernels_env, os.getenv("KG_LIST_KERNELS", "False")).lower() + == "true" + ) - env_whitelist_env = 'EG_ENV_WHITELIST' - env_whitelist = List(config=True, - help="""Environment variables allowed to be set when a client requests a + env_whitelist_env = "EG_ENV_WHITELIST" + env_whitelist = List( + config=True, + help="""Environment variables allowed to be set when a client requests a new kernel. Use '*' to allow all environment variables sent in the request. - (EG_ENV_WHITELIST env var)""") + (EG_ENV_WHITELIST env var)""", + ) - @default('env_whitelist') + @default("env_whitelist") def env_whitelist_default(self): - return os.getenv(self.env_whitelist_env, os.getenv('KG_ENV_WHITELIST', '')).split(',') + return os.getenv(self.env_whitelist_env, os.getenv("KG_ENV_WHITELIST", "")).split(",") - env_process_whitelist_env = 'EG_ENV_PROCESS_WHITELIST' - env_process_whitelist = List(config=True, - help="""Environment variables allowed to be inherited - from the spawning process by the kernel. (EG_ENV_PROCESS_WHITELIST env var)""") + env_process_whitelist_env = "EG_ENV_PROCESS_WHITELIST" + env_process_whitelist = List( + config=True, + help="""Environment variables allowed to be inherited + from the spawning process by the kernel. (EG_ENV_PROCESS_WHITELIST env var)""", + ) - @default('env_process_whitelist') + @default("env_process_whitelist") def env_process_whitelist_default(self): - return os.getenv(self.env_process_whitelist_env, os.getenv('KG_ENV_PROCESS_WHITELIST', '')).split(',') + return os.getenv( + self.env_process_whitelist_env, os.getenv("KG_ENV_PROCESS_WHITELIST", "") + ).split(",") - kernel_headers_env = 'EG_KERNEL_HEADERS' - kernel_headers = List(config=True, - help="""Request headers to make available to kernel launch framework. - (EG_KERNEL_HEADERS env var)""") + kernel_headers_env = "EG_KERNEL_HEADERS" + kernel_headers = List( + config=True, + help="""Request headers to make available to kernel launch framework. + (EG_KERNEL_HEADERS env var)""", + ) - @default('kernel_headers') + @default("kernel_headers") def kernel_headers_default(self): default_headers = os.getenv(self.kernel_headers_env) - return default_headers.split(',') if default_headers else [] + return default_headers.split(",") if default_headers else [] # Remote hosts - remote_hosts_env = 'EG_REMOTE_HOSTS' - remote_hosts_default_value = 'localhost' - remote_hosts = List(default_value=[remote_hosts_default_value], config=True, - help="""Bracketed comma-separated list of hosts on which DistributedProcessProxy + remote_hosts_env = "EG_REMOTE_HOSTS" + remote_hosts_default_value = "localhost" + remote_hosts = List( + default_value=[remote_hosts_default_value], + config=True, + help="""Bracketed comma-separated list of hosts on which DistributedProcessProxy kernels will be launched e.g., ['host1','host2']. (EG_REMOTE_HOSTS env var - - non-bracketed, just comma-separated)""") + - non-bracketed, just comma-separated)""", + ) - @default('remote_hosts') + @default("remote_hosts") def remote_hosts_default(self): - return os.getenv(self.remote_hosts_env, self.remote_hosts_default_value).split(',') + return os.getenv(self.remote_hosts_env, self.remote_hosts_default_value).split(",") # Yarn endpoint - yarn_endpoint_env = 'EG_YARN_ENDPOINT' - yarn_endpoint = Unicode(None, config=True, allow_none=True, - help="""The http url specifying the YARN Resource Manager. Note: If this value is NOT set, + yarn_endpoint_env = "EG_YARN_ENDPOINT" + yarn_endpoint = Unicode( + None, + config=True, + allow_none=True, + help="""The http url specifying the YARN Resource Manager. Note: If this value is NOT set, the YARN library will use the files within the local HADOOP_CONFIG_DIR to determine the - active resource manager. (EG_YARN_ENDPOINT env var)""") + active resource manager. (EG_YARN_ENDPOINT env var)""", + ) - @default('yarn_endpoint') + @default("yarn_endpoint") def yarn_endpoint_default(self): return os.getenv(self.yarn_endpoint_env) # Alt Yarn endpoint - alt_yarn_endpoint_env = 'EG_ALT_YARN_ENDPOINT' - alt_yarn_endpoint = Unicode(None, config=True, allow_none=True, - help="""The http url specifying the alternate YARN Resource Manager. This value should + alt_yarn_endpoint_env = "EG_ALT_YARN_ENDPOINT" + alt_yarn_endpoint = Unicode( + None, + config=True, + allow_none=True, + help="""The http url specifying the alternate YARN Resource Manager. This value should be set when YARN Resource Managers are configured for high availability. Note: If both YARN endpoints are NOT set, the YARN library will use the files within the local HADOOP_CONFIG_DIR to determine the active resource manager. - (EG_ALT_YARN_ENDPOINT env var)""") + (EG_ALT_YARN_ENDPOINT env var)""", + ) - @default('alt_yarn_endpoint') + @default("alt_yarn_endpoint") def alt_yarn_endpoint_default(self): return os.getenv(self.alt_yarn_endpoint_env) - yarn_endpoint_security_enabled_env = 'EG_YARN_ENDPOINT_SECURITY_ENABLED' + yarn_endpoint_security_enabled_env = "EG_YARN_ENDPOINT_SECURITY_ENABLED" yarn_endpoint_security_enabled_default_value = False - yarn_endpoint_security_enabled = Bool(yarn_endpoint_security_enabled_default_value, config=True, - help="""Is YARN Kerberos/SPNEGO Security enabled (True/False). - (EG_YARN_ENDPOINT_SECURITY_ENABLED env var)""") + yarn_endpoint_security_enabled = Bool( + yarn_endpoint_security_enabled_default_value, + config=True, + help="""Is YARN Kerberos/SPNEGO Security enabled (True/False). + (EG_YARN_ENDPOINT_SECURITY_ENABLED env var)""", + ) - @default('yarn_endpoint_security_enabled') + @default("yarn_endpoint_security_enabled") def yarn_endpoint_security_enabled_default(self): - return bool(os.getenv(self.yarn_endpoint_security_enabled_env, - self.yarn_endpoint_security_enabled_default_value)) + return bool( + os.getenv( + self.yarn_endpoint_security_enabled_env, + self.yarn_endpoint_security_enabled_default_value, + ) + ) # Conductor endpoint - conductor_endpoint_env = 'EG_CONDUCTOR_ENDPOINT' + conductor_endpoint_env = "EG_CONDUCTOR_ENDPOINT" conductor_endpoint_default_value = None - conductor_endpoint = Unicode(conductor_endpoint_default_value, - allow_none=True, - config=True, - help="""The http url for accessing the Conductor REST API. - (EG_CONDUCTOR_ENDPOINT env var)""") + conductor_endpoint = Unicode( + conductor_endpoint_default_value, + allow_none=True, + config=True, + help="""The http url for accessing the Conductor REST API. + (EG_CONDUCTOR_ENDPOINT env var)""", + ) - @default('conductor_endpoint') + @default("conductor_endpoint") def conductor_endpoint_default(self): return os.getenv(self.conductor_endpoint_env, self.conductor_endpoint_default_value) _log_formatter_cls = LogFormatter # traitlet default is LevelFormatter - @default('log_format') + @default("log_format") def _default_log_format(self): """override default log format to include milliseconds""" - return u"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s" + return ( + "%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s" + ) # Impersonation enabled - impersonation_enabled_env = 'EG_IMPERSONATION_ENABLED' - impersonation_enabled = Bool(False, config=True, - help="""Indicates whether impersonation will be performed during kernel launch. - (EG_IMPERSONATION_ENABLED env var)""") + impersonation_enabled_env = "EG_IMPERSONATION_ENABLED" + impersonation_enabled = Bool( + False, + config=True, + help="""Indicates whether impersonation will be performed during kernel launch. + (EG_IMPERSONATION_ENABLED env var)""", + ) - @default('impersonation_enabled') + @default("impersonation_enabled") def impersonation_enabled_default(self): - return bool(os.getenv(self.impersonation_enabled_env, 'false').lower() == 'true') + return bool(os.getenv(self.impersonation_enabled_env, "false").lower() == "true") # Unauthorized users - unauthorized_users_env = 'EG_UNAUTHORIZED_USERS' - unauthorized_users_default_value = 'root' - unauthorized_users = Set(default_value={unauthorized_users_default_value}, config=True, - help="""Comma-separated list of user names (e.g., ['root','admin']) against which + unauthorized_users_env = "EG_UNAUTHORIZED_USERS" + unauthorized_users_default_value = "root" + unauthorized_users = Set( + default_value={unauthorized_users_default_value}, + config=True, + help="""Comma-separated list of user names (e.g., ['root','admin']) against which KERNEL_USERNAME will be compared. Any match (case-sensitive) will prevent the kernel's launch and result in an HTTP 403 (Forbidden) error. - (EG_UNAUTHORIZED_USERS env var - non-bracketed, just comma-separated)""") + (EG_UNAUTHORIZED_USERS env var - non-bracketed, just comma-separated)""", + ) - @default('unauthorized_users') + @default("unauthorized_users") def unauthorized_users_default(self): - return os.getenv(self.unauthorized_users_env, self.unauthorized_users_default_value).split(',') + return os.getenv(self.unauthorized_users_env, self.unauthorized_users_default_value).split( + "," + ) # Authorized users - authorized_users_env = 'EG_AUTHORIZED_USERS' - authorized_users = Set(config=True, - help="""Comma-separated list of user names (e.g., ['bob','alice']) against which + authorized_users_env = "EG_AUTHORIZED_USERS" + authorized_users = Set( + config=True, + help="""Comma-separated list of user names (e.g., ['bob','alice']) against which KERNEL_USERNAME will be compared. Any match (case-sensitive) will allow the kernel's launch, otherwise an HTTP 403 (Forbidden) error will be raised. The set of unauthorized users takes precedence. This option should be used carefully as it can dramatically limit who can launch kernels. (EG_AUTHORIZED_USERS env var - non-bracketed, - just comma-separated)""") + just comma-separated)""", + ) - @default('authorized_users') + @default("authorized_users") def authorized_users_default(self): au_env = os.getenv(self.authorized_users_env) - return au_env.split(',') if au_env is not None else [] + return au_env.split(",") if au_env is not None else [] # Authorized origin - authorized_origin_env = 'EG_AUTHORIZED_ORIGIN' - authorized_origin = Unicode(config=True, - help="""Hostname (e.g. 'localhost', 'reverse.proxy.net') which the handler will match + authorized_origin_env = "EG_AUTHORIZED_ORIGIN" + authorized_origin = Unicode( + config=True, + help="""Hostname (e.g. 'localhost', 'reverse.proxy.net') which the handler will match against the request's SSL certificate. An HTTP 403 (Forbidden) error will be raised on a failed match. This option requires TLS to be enabled. It does not support IP - addresses. (EG_AUTHORIZED_ORIGIN env var)""") + addresses. (EG_AUTHORIZED_ORIGIN env var)""", + ) # Port range - port_range_env = 'EG_PORT_RANGE' + port_range_env = "EG_PORT_RANGE" port_range_default_value = "0..0" - port_range = Unicode(port_range_default_value, config=True, - help="""Specifies the lower and upper port numbers from which ports are created. + port_range = Unicode( + port_range_default_value, + config=True, + help="""Specifies the lower and upper port numbers from which ports are created. The bounded values are separated by '..' (e.g., 33245..34245 specifies a range of 1000 ports to be randomly selected). A range of zero (e.g., 33245..33245 or 0..0) disables port-range - enforcement. (EG_PORT_RANGE env var)""") + enforcement. (EG_PORT_RANGE env var)""", + ) - @default('port_range') + @default("port_range") def port_range_default(self): return os.getenv(self.port_range_env, self.port_range_default_value) # Max Kernels per User - max_kernels_per_user_env = 'EG_MAX_KERNELS_PER_USER' + max_kernels_per_user_env = "EG_MAX_KERNELS_PER_USER" max_kernels_per_user_default_value = -1 - max_kernels_per_user = Integer(max_kernels_per_user_default_value, config=True, - help="""Specifies the maximum number of kernels a user can have active + max_kernels_per_user = Integer( + max_kernels_per_user_default_value, + config=True, + help="""Specifies the maximum number of kernels a user can have active simultaneously. A value of -1 disables enforcement. - (EG_MAX_KERNELS_PER_USER env var)""") + (EG_MAX_KERNELS_PER_USER env var)""", + ) - @default('max_kernels_per_user') + @default("max_kernels_per_user") def max_kernels_per_user_default(self): - return int(os.getenv(self.max_kernels_per_user_env, self.max_kernels_per_user_default_value)) + return int( + os.getenv(self.max_kernels_per_user_env, self.max_kernels_per_user_default_value) + ) - ws_ping_interval_env = 'EG_WS_PING_INTERVAL_SECS' + ws_ping_interval_env = "EG_WS_PING_INTERVAL_SECS" ws_ping_interval_default_value = 30 - ws_ping_interval = Integer(ws_ping_interval_default_value, config=True, - help="""Specifies the ping interval(in seconds) that should be used by zmq port + ws_ping_interval = Integer( + ws_ping_interval_default_value, + config=True, + help="""Specifies the ping interval(in seconds) that should be used by zmq port associated withspawned kernels.Set this variable to 0 to disable ping mechanism. - (EG_WS_PING_INTERVAL_SECS env var)""") + (EG_WS_PING_INTERVAL_SECS env var)""", + ) - @default('ws_ping_interval') + @default("ws_ping_interval") def ws_ping_interval_default(self): return int(os.getenv(self.ws_ping_interval_env, self.ws_ping_interval_default_value)) # Dynamic Update Interval - dynamic_config_interval_env = 'EG_DYNAMIC_CONFIG_INTERVAL' + dynamic_config_interval_env = "EG_DYNAMIC_CONFIG_INTERVAL" dynamic_config_interval_default_value = 0 - dynamic_config_interval = Integer(dynamic_config_interval_default_value, min=0, config=True, - help="""Specifies the number of seconds configuration files are polled for + dynamic_config_interval = Integer( + dynamic_config_interval_default_value, + min=0, + config=True, + help="""Specifies the number of seconds configuration files are polled for changes. A value of 0 or less disables dynamic config updates. - (EG_DYNAMIC_CONFIG_INTERVAL env var)""") + (EG_DYNAMIC_CONFIG_INTERVAL env var)""", + ) - @default('dynamic_config_interval') + @default("dynamic_config_interval") def dynamic_config_interval_default(self): - return int(os.getenv(self.dynamic_config_interval_env, self.dynamic_config_interval_default_value)) + return int( + os.getenv(self.dynamic_config_interval_env, self.dynamic_config_interval_default_value) + ) - @observe('dynamic_config_interval') + @observe("dynamic_config_interval") def dynamic_config_interval_changed(self, event): - prev_val = event['old'] - self.dynamic_config_interval = event['new'] + prev_val = event["old"] + self.dynamic_config_interval = event["new"] if self.dynamic_config_interval != prev_val: # Values are different. Stop the current poller. If new value is > 0, start a poller. if self.dynamic_config_poller: @@ -514,8 +639,10 @@ def dynamic_config_interval_changed(self, event): self.dynamic_config_poller = None if self.dynamic_config_interval <= 0: - self.log.warning("Dynamic configuration updates have been disabled and cannot be re-enabled " - "without restarting Enterprise Gateway!") + self.log.warning( + "Dynamic configuration updates have been disabled and cannot be re-enabled " + "without restarting Enterprise Gateway!" + ) # The interval has been changed, but still positive elif prev_val > 0 and hasattr(self, "init_dynamic_configs"): self.init_dynamic_configs() # Restart the poller @@ -530,7 +657,7 @@ def dynamic_config_interval_changed(self, event): help=""" The kernel spec manager class to use. Must be a subclass of `jupyter_client.kernelspec.KernelSpecManager`. - """ + """, ) kernel_spec_cache_class = Type( @@ -539,7 +666,7 @@ def dynamic_config_interval_changed(self, event): help=""" The kernel spec cache class to use. Must be a subclass of `enterprise_gateway.services.kernelspecs.KernelSpecCache`. - """ + """, ) kernel_manager_class = Type( @@ -549,7 +676,7 @@ def dynamic_config_interval_changed(self, event): help=""" The kernel manager class to use. Must be a subclass of `enterprise_gateway.services.kernels.RemoteMappingKernelManager`. - """ + """, ) kernel_session_manager_class = Type( @@ -559,5 +686,5 @@ def dynamic_config_interval_changed(self, event): help=""" The kernel session manager class to use. Must be a subclass of `enterprise_gateway.services.sessions.KernelSessionManager`. - """ + """, ) diff --git a/enterprise_gateway/services/api/handlers.py b/enterprise_gateway/services/api/handlers.py index 8fe62d72a..e42286964 100644 --- a/enterprise_gateway/services/api/handlers.py +++ b/enterprise_gateway/services/api/handlers.py @@ -2,11 +2,12 @@ # Distributed under the terms of the Modified BSD License. """Tornado handlers for kernel specs.""" import os +from typing import List from jupyter_server.utils import ensure_async from tornado import web + from ...mixins import CORSMixin -from typing import List class BaseSpecHandler(CORSMixin, web.StaticFileHandler): @@ -14,8 +15,7 @@ class BaseSpecHandler(CORSMixin, web.StaticFileHandler): @staticmethod def get_resource_metadata() -> tuple: - """Returns the (resource, mime-type) for the handlers spec. - """ + """Returns the (resource, mime-type) for the handlers spec.""" pass def initialize(self) -> None: @@ -27,10 +27,9 @@ def initialize(self) -> None: web.StaticFileHandler.initialize(self, path=os.path.dirname(__file__)) async def get(self) -> None: - """Handler for a get on a specific handler - """ + """Handler for a get on a specific handler""" resource_name, content_type = self.get_resource_metadata() - self.set_header('Content-Type', content_type) + self.set_header("Content-Type", content_type) res = web.StaticFileHandler.get(self, resource_name) await ensure_async(res) @@ -41,19 +40,21 @@ def options(self, **kwargs) -> None: class SpecJsonHandler(BaseSpecHandler): """Exposes a JSON swagger specification""" + @staticmethod def get_resource_metadata() -> tuple: - return 'swagger.json', 'application/json' + return "swagger.json", "application/json" class APIYamlHandler(BaseSpecHandler): """Exposes a YAML swagger specification""" + @staticmethod def get_resource_metadata() -> tuple: - return 'swagger.yaml', 'text/x-yaml' + return "swagger.yaml", "text/x-yaml" default_handlers: List[str] = [ - ('/api/{}'.format(SpecJsonHandler.get_resource_metadata()[0]), SpecJsonHandler), - ('/api/{}'.format(APIYamlHandler.get_resource_metadata()[0]), APIYamlHandler) + (f"/api/{SpecJsonHandler.get_resource_metadata()[0]}", SpecJsonHandler), + (f"/api/{APIYamlHandler.get_resource_metadata()[0]}", APIYamlHandler), ] diff --git a/enterprise_gateway/services/api/swagger.json b/enterprise_gateway/services/api/swagger.json index 03cb7e738..a32d205d7 100644 --- a/enterprise_gateway/services/api/swagger.json +++ b/enterprise_gateway/services/api/swagger.json @@ -9,12 +9,8 @@ "url": "https://jupyter.org" } }, - "produces": [ - "application/json" - ], - "consumes": [ - "application/json" - ], + "produces": ["application/json"], + "consumes": ["application/json"], "parameters": { "kernel": { "name": "kernel_id", @@ -59,9 +55,7 @@ "/api": { "get": { "summary": "Get API info", - "tags": [ - "api" - ], + "tags": ["api"], "responses": { "200": { "description": "Returns information about the API", @@ -74,13 +68,9 @@ }, "/api/swagger.yaml": { "get": { - "produces": [ - "text/x-yaml" - ], + "produces": ["text/x-yaml"], "summary": "Get API info", - "tags": [ - "api" - ], + "tags": ["api"], "responses": { "200": { "description": "Returns a swagger specification in yaml" @@ -91,9 +81,7 @@ "/api/swagger.json": { "get": { "summary": "Get API info", - "tags": [ - "api" - ], + "tags": ["api"], "responses": { "200": { "description": "Returns a swagger specification in json" @@ -104,15 +92,13 @@ "/api/kernelspecs": { "get": { "summary": "Get kernel specs", - "tags": [ - "kernelspecs" - ], + "tags": ["kernelspecs"], "parameters": { - "name": "user", - "required": false, - "in": "query", - "description": "When present, kernelspec results will be filtered based on the configured authorization of specified value.", - "type": "string" + "name": "user", + "required": false, + "in": "query", + "description": "When present, kernelspec results will be filtered based on the configured authorization of specified value.", + "type": "string" }, "responses": { "200": { @@ -139,9 +125,7 @@ "/api/kernels": { "get": { "summary": "List the JSON data for all currently running kernels", - "tags": [ - "kernels" - ], + "tags": ["kernels"], "responses": { "200": { "description": "List of running kernels", @@ -162,9 +146,7 @@ }, "post": { "summary": "Start a kernel and return the uuid", - "tags": [ - "kernels" - ], + "tags": ["kernels"], "parameters": [ { "name": "start_kernel_body", @@ -218,9 +200,7 @@ ], "get": { "summary": "Get kernel information", - "tags": [ - "kernels" - ], + "tags": ["kernels"], "responses": { "200": { "description": "Information about the kernel", @@ -232,9 +212,7 @@ }, "delete": { "summary": "Kill a kernel and delete the kernel id", - "tags": [ - "kernels" - ], + "tags": ["kernels"], "responses": { "204": { "description": "Kernel deleted" @@ -250,9 +228,7 @@ ], "get": { "summary": "Upgrades the connection to a websocket connection.", - "tags": [ - "channels" - ], + "tags": ["channels"], "responses": { "200": { "description": "The connection will be upgraded to a websocket." @@ -268,9 +244,7 @@ ], "post": { "summary": "Interrupt a kernel", - "tags": [ - "kernels" - ], + "tags": ["kernels"], "responses": { "204": { "description": "Kernel interrupted" @@ -286,9 +260,7 @@ ], "post": { "summary": "Restart a kernel", - "tags": [ - "kernels" - ], + "tags": ["kernels"], "responses": { "200": { "description": "Kernel interrupted", @@ -309,9 +281,7 @@ "/api/sessions": { "get": { "summary": "List available sessions", - "tags": [ - "sessions" - ], + "tags": ["sessions"], "responses": { "200": { "description": "List of current sessions", @@ -332,9 +302,7 @@ }, "post": { "summary": "Create a new session, or return an existing session if a session of the same name already exists.", - "tags": [ - "sessions" - ], + "tags": ["sessions"], "parameters": [ { "name": "session", @@ -375,9 +343,7 @@ ], "get": { "summary": "Get session", - "tags": [ - "sessions" - ], + "tags": ["sessions"], "responses": { "200": { "description": "Session", @@ -389,9 +355,7 @@ }, "patch": { "summary": "This can be used to rename the session.", - "tags": [ - "sessions" - ], + "tags": ["sessions"], "parameters": [ { "name": "model", @@ -419,9 +383,7 @@ }, "delete": { "summary": "Delete a session", - "tags": [ - "sessions" - ], + "tags": ["sessions"], "responses": { "204": { "description": "Session (and kernel) were deleted" @@ -483,11 +445,7 @@ }, "KernelSpecFile": { "description": "Kernel spec json file", - "required": [ - "argv", - "display_name", - "language" - ], + "required": ["argv", "display_name", "language"], "properties": { "language": { "type": "string", @@ -525,10 +483,7 @@ "description": "Help items to be displayed in the help menu in the notebook UI.", "items": { "type": "object", - "required": [ - "text", - "url" - ], + "required": ["text", "url"], "properties": { "text": { "type": "string", @@ -546,10 +501,7 @@ }, "Kernel": { "description": "Kernel information", - "required": [ - "id", - "name" - ], + "required": ["id", "name"], "properties": { "id": { "type": "string", diff --git a/enterprise_gateway/services/api/swagger.yaml b/enterprise_gateway/services/api/swagger.yaml index 58ae72ff3..5e5ea1c05 100644 --- a/enterprise_gateway/services/api/swagger.yaml +++ b/enterprise_gateway/services/api/swagger.yaml @@ -1,4 +1,4 @@ -swagger: '2.0' +swagger: "2.0" info: title: Jupyter Enterprise Gateway API @@ -62,7 +62,7 @@ paths: 200: description: Returns information about the API schema: - $ref: '#/definitions/ApiInfo' + $ref: "#/definitions/ApiInfo" /api/swagger.yaml: get: produces: @@ -106,7 +106,7 @@ paths: kernelspecs: type: object additionalProperties: - $ref: '#/definitions/KernelSpec' + $ref: "#/definitions/KernelSpec" /api/kernels: get: summary: List the JSON data for all currently running kernels @@ -118,12 +118,12 @@ paths: schema: type: array items: - $ref: '#/definitions/Kernel' + $ref: "#/definitions/Kernel" 403: description: | This method is not accessible when `EnterpriseGatewayApp.list_kernels` is `False`. schema: - $ref: '#/definitions/Error' + $ref: "#/definitions/Error" post: summary: Start a kernel and return the uuid tags: @@ -135,8 +135,8 @@ paths: type: object properties: name: - type: string - description: Kernel spec name (defaults to default kernel spec for server) + type: string + description: Kernel spec name (defaults to default kernel spec for server) env: type: object description: | @@ -148,7 +148,7 @@ paths: 201: description: The metadata about the newly created kernel. schema: - $ref: '#/definitions/Kernel' + $ref: "#/definitions/Kernel" headers: Location: description: Model for started kernel @@ -157,10 +157,10 @@ paths: 403: description: The maximum number of kernels have been created. schema: - $ref: '#/definitions/Error' + $ref: "#/definitions/Error" /api/kernels/{kernel_id}: parameters: - - $ref: '#/parameters/kernel' + - $ref: "#/parameters/kernel" get: summary: Get kernel information tags: @@ -169,7 +169,7 @@ paths: 200: description: Information about the kernel schema: - $ref: '#/definitions/Kernel' + $ref: "#/definitions/Kernel" delete: summary: Kill a kernel and delete the kernel id tags: @@ -179,7 +179,7 @@ paths: description: Kernel deleted /api/kernels/{kernel_id}/channels: parameters: - - $ref: '#/parameters/kernel' + - $ref: "#/parameters/kernel" get: summary: Upgrades the connection to a websocket connection. tags: @@ -189,7 +189,7 @@ paths: description: The connection will be upgraded to a websocket. /kernels/{kernel_id}/interrupt: parameters: - - $ref: '#/parameters/kernel' + - $ref: "#/parameters/kernel" post: summary: Interrupt a kernel tags: @@ -199,7 +199,7 @@ paths: description: Kernel interrupted /kernels/{kernel_id}/restart: parameters: - - $ref: '#/parameters/kernel' + - $ref: "#/parameters/kernel" post: summary: Restart a kernel tags: @@ -213,7 +213,7 @@ paths: type: string format: url schema: - $ref: '#/definitions/Kernel' + $ref: "#/definitions/Kernel" /api/sessions: get: summary: List available sessions @@ -225,13 +225,13 @@ paths: schema: type: array items: - $ref: '#/definitions/Session' + $ref: "#/definitions/Session" 403: description: | This method is not accessible when the kernel gateway when the `list_kernels` option is `False`. schema: - $ref: '#/definitions/Error' + $ref: "#/definitions/Error" post: summary: | Create a new session, or return an existing session if a session @@ -242,12 +242,12 @@ paths: - name: session in: body schema: - $ref: '#/definitions/Session' + $ref: "#/definitions/Session" responses: 201: description: Session created or returned schema: - $ref: '#/definitions/Session' + $ref: "#/definitions/Session" headers: Location: description: URL for session commands @@ -256,11 +256,11 @@ paths: 501: description: Session not available schema: - $ref: '#/definitions/Error' + $ref: "#/definitions/Error" /api/sessions/{session}: parameters: - - $ref: '#/parameters/session' + - $ref: "#/parameters/session" get: summary: Get session tags: @@ -269,7 +269,7 @@ paths: 200: description: Session schema: - $ref: '#/definitions/Session' + $ref: "#/definitions/Session" patch: summary: This can be used to rename the session. tags: @@ -279,16 +279,16 @@ paths: in: body required: true schema: - $ref: '#/definitions/Session' + $ref: "#/definitions/Session" responses: 200: description: Session schema: - $ref: '#/definitions/Session' + $ref: "#/definitions/Session" 400: description: No data provided schema: - $ref: '#/definitions/Error' + $ref: "#/definitions/Error" delete: summary: Delete a session tags: @@ -319,7 +319,7 @@ definitions: type: string description: Unique name for kernel KernelSpecFile: - $ref: '#/definitions/KernelSpecFile' + $ref: "#/definitions/KernelSpecFile" description: Kernel spec json file resources: type: object @@ -382,8 +382,8 @@ definitions: items: type: object required: - - text - - url + - text + - url properties: text: type: string @@ -439,7 +439,7 @@ definitions: type: string description: session type kernel: - $ref: '#/definitions/Kernel' + $ref: "#/definitions/Kernel" ApiInfo: description: Information about the api type: object diff --git a/enterprise_gateway/services/kernels/handlers.py b/enterprise_gateway/services/kernels/handlers.py index a4f5705a2..35a07ec8a 100644 --- a/enterprise_gateway/services/kernels/handlers.py +++ b/enterprise_gateway/services/kernels/handlers.py @@ -3,30 +3,30 @@ """Tornado handlers for kernel CRUD and communication.""" import json import os +from functools import partial -import tornado import jupyter_server.services.kernels.handlers as jupyter_server_handlers +import tornado from jupyter_client.jsonutil import date_default from tornado import web -from functools import partial -from ...mixins import TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin + +from ...mixins import CORSMixin, JSONErrorsMixin, TokenAuthorizationMixin -class MainKernelHandler(TokenAuthorizationMixin, - CORSMixin, - JSONErrorsMixin, - jupyter_server_handlers.MainKernelHandler): +class MainKernelHandler( + TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, jupyter_server_handlers.MainKernelHandler +): """Extends the jupyter_server main kernel handler with token auth, CORS, and JSON errors. """ @property def env_whitelist(self): - return self.settings['eg_env_whitelist'] + return self.settings["eg_env_whitelist"] @property def env_process_whitelist(self): - return self.settings['eg_env_process_whitelist'] + return self.settings["eg_env_process_whitelist"] async def post(self): """Overrides the super class method to manage env in the request body. @@ -38,36 +38,46 @@ async def post(self): tornado.web.HTTPError 403 Forbidden if either max kernel limit is reached (total or per user, if configured) """ - max_kernels = self.settings['eg_max_kernels'] + max_kernels = self.settings["eg_max_kernels"] if max_kernels is not None: - km = self.settings['kernel_manager'] + km = self.settings["kernel_manager"] kernels = km.list_kernels() if len(kernels) >= max_kernels: - raise tornado.web.HTTPError(403, 'Resource Limit') + raise tornado.web.HTTPError(403, "Resource Limit") # Try to get env vars from the request body model = self.get_json_body() - if model is not None and 'env' in model: - if not isinstance(model['env'], dict): + if model is not None and "env" in model: + if not isinstance(model["env"], dict): raise tornado.web.HTTPError(400) # Start with the PATH from the current env. Do not provide the entire environment # which might contain server secrets that should not be passed to kernels. - env = {'PATH': os.getenv('PATH', '')} + env = {"PATH": os.getenv("PATH", "")} # Whitelist environment variables from current process environment - env.update({key: value for key, value in os.environ.items() - if key in self.env_process_whitelist}) + env.update( + { + key: value + for key, value in os.environ.items() + if key in self.env_process_whitelist + } + ) # Whitelist KERNEL_* args and those allowed by configuration from client. If all # envs are requested, just use the keys from the payload. env_whitelist = self.env_whitelist - if env_whitelist == ['*']: - env_whitelist = model['env'].keys() - env.update({key: value for key, value in model['env'].items() - if key.startswith('KERNEL_') or key in env_whitelist}) + if env_whitelist == ["*"]: + env_whitelist = model["env"].keys() + env.update( + { + key: value + for key, value in model["env"].items() + if key.startswith("KERNEL_") or key in env_whitelist + } + ) # If kernel_headers are configured, fetch each of those and include in start request kernel_headers = {} missing_headers = [] - kernel_header_names = self.settings['eg_kernel_headers'] + kernel_header_names = self.settings["eg_kernel_headers"] for name in kernel_header_names: if name: # Ignore things like empty strings value = self.request.headers.get(name) @@ -77,21 +87,24 @@ async def post(self): missing_headers.append(name) if len(missing_headers): - self.log.warning("The following headers specified in 'kernel-headers' were not found: {}". - format(missing_headers)) + self.log.warning( + "The following headers specified in 'kernel-headers' were not found: {}".format( + missing_headers + ) + ) # No way to override the call to start_kernel on the kernel manager # so do a temporary partial (ugh) orig_start = self.kernel_manager.start_kernel - self.kernel_manager.start_kernel = partial(self.kernel_manager.start_kernel, - env=env, - kernel_headers=kernel_headers) + self.kernel_manager.start_kernel = partial( + self.kernel_manager.start_kernel, env=env, kernel_headers=kernel_headers + ) try: - await super(MainKernelHandler, self).post() + await super().post() finally: self.kernel_manager.start_kernel = orig_start else: - await super(MainKernelHandler, self).post() + await super().post() async def get(self): """Overrides the super class method to honor the kernel listing @@ -104,20 +117,19 @@ async def get(self): tornado.web.HTTPError 403 Forbidden if kernel listing is disabled """ - if not self.settings.get('eg_list_kernels'): - raise tornado.web.HTTPError(403, 'Forbidden') + if not self.settings.get("eg_list_kernels"): + raise tornado.web.HTTPError(403, "Forbidden") else: - await super(MainKernelHandler, self).get() + await super().get() def options(self, **kwargs): """Method for properly handling CORS pre-flight""" self.finish() -class KernelHandler(TokenAuthorizationMixin, - CORSMixin, - JSONErrorsMixin, - jupyter_server_handlers.KernelHandler): +class KernelHandler( + TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, jupyter_server_handlers.KernelHandler +): """Extends the jupyter_server kernel handler with token auth, CORS, and JSON errors. """ diff --git a/enterprise_gateway/services/kernels/remotemanager.py b/enterprise_gateway/services/kernels/remotemanager.py index c99dfcf54..4cb142f78 100644 --- a/enterprise_gateway/services/kernels/remotemanager.py +++ b/enterprise_gateway/services/kernels/remotemanager.py @@ -3,18 +3,20 @@ """Kernel managers that operate against a remote process.""" import os -import signal import re +import signal import uuid -from tornado import web -from jupyter_server.services.kernels.kernelmanager import AsyncMappingKernelManager from jupyter_client.ioloop.manager import AsyncIOLoopKernelManager -from traitlets import directional_link, log as traitlets_log +from jupyter_server.services.kernels.kernelmanager import AsyncMappingKernelManager +from tornado import web +from traitlets import directional_link +from traitlets import log as traitlets_log + +from enterprise_gateway.mixins import EnterpriseGatewayConfigMixin from ..processproxies.processproxy import LocalProcessProxy, RemoteProcessProxy from ..sessions.kernelsessionmanager import KernelSessionManager -from enterprise_gateway.mixins import EnterpriseGatewayConfigMixin def import_item(name): @@ -31,7 +33,7 @@ def import_item(name): The module that was imported. """ - parts = name.rsplit('.', 1) + parts = name.rsplit(".", 1) if len(parts) == 2: # called with 'foo.bar....' package, obj = parts @@ -39,7 +41,7 @@ def import_item(name): try: pak = getattr(module, obj) except AttributeError: - raise ImportError('No module named %s' % obj) + raise ImportError("No module named %s" % obj) return pak else: # called with un-dotted string @@ -65,13 +67,16 @@ def get_process_proxy_config(kernelspec): information. If no `config` sub-dictionary exists, an empty `config` dictionary will be present. """ - if 'process_proxy' in kernelspec.metadata: - process_proxy = kernelspec.metadata.get('process_proxy') - if 'class_name' in process_proxy: # If no class_name, return default - if 'config' not in process_proxy: # if class_name, but no config stanza, add one + if "process_proxy" in kernelspec.metadata: + process_proxy = kernelspec.metadata.get("process_proxy") + if "class_name" in process_proxy: # If no class_name, return default + if "config" not in process_proxy: # if class_name, but no config stanza, add one process_proxy.update({"config": {}}) return process_proxy # Return what we found (plus config stanza if necessary) - return {"class_name": "enterprise_gateway.services.processproxies.processproxy.LocalProcessProxy", "config": {}} + return { + "class_name": "enterprise_gateway.services.processproxies.processproxy.LocalProcessProxy", + "config": {}, + } def new_kernel_id(**kwargs): @@ -93,34 +98,37 @@ def new_kernel_id(**kwargs): log = kwargs.pop("log", None) or traitlets_log.get_logger() kernel_id_fn = kwargs.pop("kernel_id_fn", None) or (lambda: str(uuid.uuid4())) - env = kwargs.get('env') - if env and env.get('KERNEL_ID'): # If there's a KERNEL_ID in the env, check it out + env = kwargs.get("env") + if env and env.get("KERNEL_ID"): # If there's a KERNEL_ID in the env, check it out # convert string back to UUID - validating string in the process. - str_kernel_id = env.get('KERNEL_ID') + str_kernel_id = env.get("KERNEL_ID") try: str_v4_kernel_id = str(uuid.UUID(str_kernel_id, version=4)) if str_kernel_id != str_v4_kernel_id: # Given string is not uuid v4 compliant raise ValueError("value is not uuid v4 compliant") except ValueError as ve: - log.error("Invalid v4 UUID value detected in ['env']['KERNEL_ID']: '{}'! Error: {}". - format(str_kernel_id, ve)) + log.error( + "Invalid v4 UUID value detected in ['env']['KERNEL_ID']: '{}'! Error: {}".format( + str_kernel_id, ve + ) + ) raise ve # user-provided id is valid, use it kernel_id = str(str_kernel_id) - log.debug("Using user-provided kernel_id: {}".format(kernel_id)) + log.debug(f"Using user-provided kernel_id: {kernel_id}") else: kernel_id = kernel_id_fn(**kwargs) return kernel_id -class TrackPendingRequests(): +class TrackPendingRequests: """ - Simple class to track (increment/decrement) pending kernel start requests, both total and per user. + Simple class to track (increment/decrement) pending kernel start requests, both total and per user. - This tracking is necessary due to an inherent race condition that occurs now that kernel startup is - asynchronous. As a result, multiple/simultaneous requests must be considered, in addition all existing - kernel sessions. + This tracking is necessary due to an inherent race condition that occurs now that kernel startup is + asynchronous. As a result, multiple/simultaneous requests must be considered, in addition all existing + kernel sessions. """ _pending_requests_all = 0 @@ -129,12 +137,12 @@ class TrackPendingRequests(): def increment(self, username: str) -> None: self._pending_requests_all += 1 cur_val = int(self._pending_requests_user.get(username, 0)) - self._pending_requests_user[username] = (cur_val + 1) + self._pending_requests_user[username] = cur_val + 1 def decrement(self, username: str) -> None: self._pending_requests_all -= 1 cur_val = int(self._pending_requests_user.get(username)) - self._pending_requests_user[username] = (cur_val - 1) + self._pending_requests_user[username] = cur_val - 1 def get_counts(self, username): return self._pending_requests_all, int(self._pending_requests_user.get(username, 0)) @@ -148,14 +156,14 @@ class RemoteMappingKernelManager(AsyncMappingKernelManager): pending_requests = TrackPendingRequests() # Used to enforce max-kernel limits def _kernel_manager_class_default(self): - return 'enterprise_gateway.services.kernels.remotemanager.RemoteKernelManager' + return "enterprise_gateway.services.kernels.remotemanager.RemoteKernelManager" def check_kernel_id(self, kernel_id): """Check that a kernel_id exists and raise 404 if not.""" if kernel_id not in self: if not self._refresh_kernel(kernel_id): self.parent.kernel_session_manager.delete_session(kernel_id) - raise web.HTTPError(404, u'Kernel does not exist: %s' % kernel_id) + raise web.HTTPError(404, "Kernel does not exist: %s" % kernel_id) def _refresh_kernel(self, kernel_id): self.parent.kernel_session_manager.load_session(kernel_id) @@ -172,15 +180,18 @@ async def start_kernel(self, *args, **kwargs): of the input parameter `kernel_id` if one was provided. """ username = KernelSessionManager.get_kernel_username(**kwargs) - self.log.debug("RemoteMappingKernelManager.start_kernel: {kernel_name}, kernel_username: {username}". - format(kernel_name=kwargs['kernel_name'], username=username)) + self.log.debug( + "RemoteMappingKernelManager.start_kernel: {kernel_name}, kernel_username: {username}".format( + kernel_name=kwargs["kernel_name"], username=username + ) + ) # Check max kernel limits self._enforce_kernel_limits(username) RemoteMappingKernelManager.pending_requests.increment(username) try: - kernel_id = await super(RemoteMappingKernelManager, self).start_kernel(*args, **kwargs) + kernel_id = await super().start_kernel(*args, **kwargs) finally: RemoteMappingKernelManager.pending_requests.decrement(username) self.parent.kernel_session_manager.create_session(kernel_id, **kwargs) @@ -192,28 +203,41 @@ def _enforce_kernel_limits(self, username: str) -> None: """ if self.parent.max_kernels is not None or self.parent.max_kernels_per_user >= 0: - pending_all, pending_user = RemoteMappingKernelManager.pending_requests.get_counts(username) + pending_all, pending_user = RemoteMappingKernelManager.pending_requests.get_counts( + username + ) # Enforce overall limit... if self.parent.max_kernels is not None: active_and_pending = len(self.list_kernels()) + pending_all if active_and_pending >= self.parent.max_kernels: - error_message = "A max kernels limit has been set to {} and there are " \ - "currently {} active and pending {}.". \ - format(self.parent.max_kernels, active_and_pending, - "kernel" if active_and_pending == 1 else "kernels") + error_message = ( + "A max kernels limit has been set to {} and there are " + "currently {} active and pending {}.".format( + self.parent.max_kernels, + active_and_pending, + "kernel" if active_and_pending == 1 else "kernels", + ) + ) self.log.error(error_message) raise web.HTTPError(403, error_message) # Enforce per-user limit... if self.parent.max_kernels_per_user >= 0: if self.parent.kernel_session_manager: - active_and_pending = self.parent.kernel_session_manager.active_sessions(username) + pending_user + active_and_pending = ( + self.parent.kernel_session_manager.active_sessions(username) + pending_user + ) if active_and_pending >= self.parent.max_kernels_per_user: - error_message = "A max kernels per user limit has been set to {} and user '{}' " \ - "currently has {} active and pending {}.".\ - format(self.parent.max_kernels_per_user, username, active_and_pending, - "kernel" if active_and_pending == 1 else "kernels") + error_message = ( + "A max kernels per user limit has been set to {} and user '{}' " + "currently has {} active and pending {}.".format( + self.parent.max_kernels_per_user, + username, + active_and_pending, + "kernel" if active_and_pending == 1 else "kernels", + ) + ) self.log.error(error_message) raise web.HTTPError(403, error_message) return @@ -222,10 +246,12 @@ def remove_kernel(self, kernel_id): """ Removes the kernel associated with `kernel_id` from the internal map and deletes the kernel session. """ - super(RemoteMappingKernelManager, self).remove_kernel(kernel_id) + super().remove_kernel(kernel_id) self.parent.kernel_session_manager.delete_session(kernel_id) - def start_kernel_from_session(self, kernel_id, kernel_name, connection_info, process_info, launch_args): + def start_kernel_from_session( + self, kernel_id, kernel_name, connection_info, process_info, launch_args + ): """ Starts a kernel from a persisted kernel session. @@ -265,13 +291,16 @@ def start_kernel_from_session(self, kernel_id, kernel_name, connection_info, pro # alive. constructor_kwargs = {} if self.kernel_spec_manager: - constructor_kwargs['kernel_spec_manager'] = self.kernel_spec_manager + constructor_kwargs["kernel_spec_manager"] = self.kernel_spec_manager # Construct a kernel manager... - km = self.kernel_manager_factory(connection_file=os.path.join( - self.connection_dir, "kernel-%s.json" % kernel_id), - parent=self, log=self.log, kernel_name=kernel_name, - **constructor_kwargs) + km = self.kernel_manager_factory( + connection_file=os.path.join(self.connection_dir, "kernel-%s.json" % kernel_id), + parent=self, + log=self.log, + kernel_name=kernel_name, + **constructor_kwargs, + ) # Load connection info into member vars - no need to write out connection file km.load_connection_info(connection_info) @@ -280,8 +309,8 @@ def start_kernel_from_session(self, kernel_id, kernel_name, connection_info, pro # Construct a process-proxy process_proxy = get_process_proxy_config(km.kernel_spec) - process_proxy_class = import_item(process_proxy.get('class_name')) - km.process_proxy = process_proxy_class(km, proxy_config=process_proxy.get('config')) + process_proxy_class = import_item(process_proxy.get("class_name")) + km.process_proxy = process_proxy_class(km, proxy_config=process_proxy.get("config")) km.process_proxy.load_process_info(process_info) # Confirm we can even poll the process. If not, remove the persisted session. @@ -294,11 +323,13 @@ def start_kernel_from_session(self, kernel_id, kernel_name, connection_info, pro self._kernels[kernel_id] = km self._kernel_connections[kernel_id] = 0 self.start_watching_activity(kernel_id) - self.add_restart_callback(kernel_id, - lambda: self._handle_kernel_died(kernel_id), - 'dead', ) + self.add_restart_callback( + kernel_id, + lambda: self._handle_kernel_died(kernel_id), + "dead", + ) # Only initialize culling if available. Warning message will be issued in gatewayapp at startup. - func = getattr(self, 'initialize_culler', None) + func = getattr(self, "initialize_culler", None) if func: func() return True @@ -308,7 +339,7 @@ def new_kernel_id(self, **kwargs): Determines the kernel_id to use for a new kernel. """ - return new_kernel_id(kernel_id_fn=super(RemoteMappingKernelManager, self).new_kernel_id, log=self.log, **kwargs) + return new_kernel_id(kernel_id_fn=super().new_kernel_id, log=self.log, **kwargs) class RemoteKernelManager(EnterpriseGatewayConfigMixin, AsyncIOLoopKernelManager): @@ -321,7 +352,7 @@ class RemoteKernelManager(EnterpriseGatewayConfigMixin, AsyncIOLoopKernelManager """ def __init__(self, **kwargs): - super(RemoteKernelManager, self).__init__(**kwargs) + super().__init__(**kwargs) self.process_proxy = None self.response_address = None self.public_key = None @@ -360,20 +391,23 @@ def _link_dependent_props(self): eg_instance = self.parent.parent except AttributeError: return - dependent_props = ["authorized_users", - "unauthorized_users", - "port_range", - "impersonation_enabled", - "max_kernels_per_user", - "env_whitelist", - "env_process_whitelist", - "yarn_endpoint", - "alt_yarn_endpoint", - "yarn_endpoint_security_enabled", - "conductor_endpoint", - "remote_hosts" - ] - self._links = [directional_link((eg_instance, prop), (self, prop)) for prop in dependent_props] + dependent_props = [ + "authorized_users", + "unauthorized_users", + "port_range", + "impersonation_enabled", + "max_kernels_per_user", + "env_whitelist", + "env_process_whitelist", + "yarn_endpoint", + "alt_yarn_endpoint", + "yarn_endpoint_security_enabled", + "conductor_endpoint", + "remote_hosts", + ] + self._links = [ + directional_link((eg_instance, prop), (self, prop)) for prop in dependent_props + ] async def start_kernel(self, **kwargs): """ @@ -389,38 +423,43 @@ async def start_kernel(self, **kwargs): """ self._get_process_proxy() self._capture_user_overrides(**kwargs) - await super(RemoteKernelManager, self).start_kernel(**kwargs) + await super().start_kernel(**kwargs) def _capture_user_overrides(self, **kwargs): """ - Make a copy of any whitelist or KERNEL_ env values provided by user. These will be injected - back into the env after the kernelspec env has been applied. This enables defaulting behavior - of the kernelspec env stanza that would have otherwise overridden the user-provided values. - """ - env = kwargs.get('env', {}) - self.user_overrides.update({key: value for key, value in env.items() - if key.startswith('KERNEL_') or - key in self.env_process_whitelist or - key in self.env_whitelist}) + Make a copy of any whitelist or KERNEL_ env values provided by user. These will be injected + back into the env after the kernelspec env has been applied. This enables defaulting behavior + of the kernelspec env stanza that would have otherwise overridden the user-provided values. + """ + env = kwargs.get("env", {}) + self.user_overrides.update( + { + key: value + for key, value in env.items() + if key.startswith("KERNEL_") + or key in self.env_process_whitelist + or key in self.env_whitelist + } + ) def format_kernel_cmd(self, extra_arguments=None): """ Replace templated args (e.g. {response_address}, {port_range}, or {kernel_id}). """ - cmd = super(RemoteKernelManager, self).format_kernel_cmd(extra_arguments) + cmd = super().format_kernel_cmd(extra_arguments) if self.response_address or self.port_range or self.kernel_id or self.public_key: ns = self._launch_args.copy() if self.response_address: - ns['response_address'] = self.response_address + ns["response_address"] = self.response_address if self.public_key: - ns['public_key'] = self.public_key + ns["public_key"] = self.public_key if self.port_range: - ns['port_range'] = self.port_range + ns["port_range"] = self.port_range if self.kernel_id: - ns['kernel_id'] = self.kernel_id + ns["kernel_id"] = self.kernel_id - pat = re.compile(r'\{([A-Za-z0-9_]+)\}') + pat = re.compile(r"\{([A-Za-z0-9_]+)\}") def from_ns(match): """Get the key out of ns if it's there, otherwise no change.""" @@ -434,20 +473,24 @@ async def _launch_kernel(self, kernel_cmd, **kwargs): # this method should be "[overridden] in a subclass to launch kernel subprocesses differently". # So that's what we've done. - env = kwargs['env'] + env = kwargs["env"] # Apply user_overrides to enable defaulting behavior from kernelspec.env stanza. Note that we do this # BEFORE setting KERNEL_GATEWAY and removing {EG,KG}_AUTH_TOKEN so those operations cannot be overridden. env.update(self.user_overrides) # No longer using Kernel Gateway, but retain references of B/C purposes - env['KERNEL_GATEWAY'] = '1' - if 'EG_AUTH_TOKEN' in env: - del env['EG_AUTH_TOKEN'] - if 'KG_AUTH_TOKEN' in env: - del env['KG_AUTH_TOKEN'] - - self.log.debug("Launching kernel: '{}' with command: {}".format(self.kernel_spec.display_name, kernel_cmd)) + env["KERNEL_GATEWAY"] = "1" + if "EG_AUTH_TOKEN" in env: + del env["EG_AUTH_TOKEN"] + if "KG_AUTH_TOKEN" in env: + del env["KG_AUTH_TOKEN"] + + self.log.debug( + "Launching kernel: '{}' with command: {}".format( + self.kernel_spec.display_name, kernel_cmd + ) + ) proxy = await self.process_proxy.launch_process(kernel_cmd, **kwargs) return proxy @@ -456,7 +499,7 @@ def request_shutdown(self, restart=False): """ Send a shutdown request via control channel and process proxy (if remote). """ - super(RemoteKernelManager, self).request_shutdown(restart) + super().request_shutdown(restart) # If we're using a remote proxy, we need to send the launcher indication that we're # shutting down so it can exit its listener thread, if its using one. @@ -486,18 +529,26 @@ async def restart_kernel(self, now=False, **kwargs): kernel. """ self.restarting = True - kernel_id = self.kernel_id or os.path.basename(self.connection_file).replace('kernel-', '').replace('.json', '') + kernel_id = self.kernel_id or os.path.basename(self.connection_file).replace( + "kernel-", "" + ).replace(".json", "") # Check if this is a remote process proxy and if now = True. If so, check its connection count. If no # connections, shutdown else perform the restart. Note: auto-restart sets now=True, but handlers use # the default value (False). - if isinstance(self.process_proxy, RemoteProcessProxy) and now and self.mapping_kernel_manager: + if ( + isinstance(self.process_proxy, RemoteProcessProxy) + and now + and self.mapping_kernel_manager + ): if self.mapping_kernel_manager._kernel_connections.get(kernel_id, 0) == 0: - self.log.warning("Remote kernel ({}) will not be automatically restarted since there are no " - "clients connected at this time.".format(kernel_id)) + self.log.warning( + "Remote kernel ({}) will not be automatically restarted since there are no " + "clients connected at this time.".format(kernel_id) + ) # Use the parent mapping kernel manager so activity monitoring and culling is also shutdown self.mapping_kernel_manager.shutdown_kernel(kernel_id, now=now) return - await super(RemoteKernelManager, self).restart_kernel(now, **kwargs) + await super().restart_kernel(now, **kwargs) if isinstance(self.process_proxy, RemoteProcessProxy): # for remote kernels... # Re-establish activity watching... if self._activity_stream: @@ -522,7 +573,7 @@ async def signal_kernel(self, signum): # This is required for kernels whose language may prevent signals across # process/user boundaries (Scala, for example). self.sigint_value = signum # use default - alt_sigint = self.kernel_spec.env.get('EG_ALTERNATE_SIGINT') + alt_sigint = self.kernel_spec.env.get("EG_ALTERNATE_SIGINT") if alt_sigint: try: sig_value = getattr(signal, alt_sigint) @@ -532,12 +583,15 @@ async def signal_kernel(self, signum): self.sigint_value = sig_value.value self.log.debug( "Converted EG_ALTERNATE_SIGINT '{}' to value '{}' to use as interrupt signal.".format( - alt_sigint, self.sigint_value)) + alt_sigint, self.sigint_value + ) + ) except AttributeError: - self.log.warning("Error received when attempting to convert EG_ALTERNATE_SIGINT of " - "'{}' to a value. Check kernelspec entry for kernel '{}' - using " - "default 'SIGINT'". - format(alt_sigint, self.kernel_spec.display_name)) + self.log.warning( + "Error received when attempting to convert EG_ALTERNATE_SIGINT of " + "'{}' to a value. Check kernelspec entry for kernel '{}' - using " + "default 'SIGINT'".format(alt_sigint, self.kernel_spec.display_name) + ) self.kernel.send_signal(self.sigint_value) else: self.kernel.send_signal(signum) @@ -559,7 +613,7 @@ def cleanup(self, connection_file=True): if self.process_proxy: self.process_proxy.cleanup() self.process_proxy = None - return super(RemoteKernelManager, self).cleanup(connection_file) + return super().cleanup(connection_file) def cleanup_resources(self, restart=False): """ @@ -577,7 +631,7 @@ def cleanup_resources(self, restart=False): self.process_proxy.cleanup() self.process_proxy = None - return super(RemoteKernelManager, self).cleanup_resources(restart) + return super().cleanup_resources(restart) def write_connection_file(self): """ @@ -587,7 +641,9 @@ def write_connection_file(self): write_connection_file since it will create 5 useless ports that would not adhere to port-range restrictions if configured. """ - if (isinstance(self.process_proxy, LocalProcessProxy) or not self.response_address) and not self.restarting: + if ( + isinstance(self.process_proxy, LocalProcessProxy) or not self.response_address + ) and not self.restarting: # However, since we *may* want to limit the selected ports, go ahead and get the ports using # the process proxy (will be LocalProcessProxy for default case) since the port selection will # handle the default case when the member ports aren't set anyway. @@ -598,21 +654,26 @@ def write_connection_file(self): self.hb_port = ports[3] self.control_port = ports[4] - return super(RemoteKernelManager, self).write_connection_file() + return super().write_connection_file() def _get_process_proxy(self): """ - Reads the associated kernelspec and to see if has a process proxy stanza. - If one exists, it instantiates an instance. If a process proxy is not - specified in the kernelspec, a LocalProcessProxy stanza is fabricated and - instantiated. + Reads the associated kernelspec and to see if has a process proxy stanza. + If one exists, it instantiates an instance. If a process proxy is not + specified in the kernelspec, a LocalProcessProxy stanza is fabricated and + instantiated. """ process_proxy_cfg = get_process_proxy_config(self.kernel_spec) - process_proxy_class_name = process_proxy_cfg.get('class_name') - self.log.debug("Instantiating kernel '{}' with process proxy: {}". - format(self.kernel_spec.display_name, process_proxy_class_name)) + process_proxy_class_name = process_proxy_cfg.get("class_name") + self.log.debug( + "Instantiating kernel '{}' with process proxy: {}".format( + self.kernel_spec.display_name, process_proxy_class_name + ) + ) process_proxy_class = import_item(process_proxy_class_name) - self.process_proxy = process_proxy_class(kernel_manager=self, proxy_config=process_proxy_cfg.get('config')) + self.process_proxy = process_proxy_class( + kernel_manager=self, proxy_config=process_proxy_cfg.get("config") + ) # When this class is used by an EnterpriseGatewayApp instance, it will be able to # access the app's configuration using the traitlet parent chain. diff --git a/enterprise_gateway/services/kernelspecs/__init__.py b/enterprise_gateway/services/kernelspecs/__init__.py index 115de8eda..4882665cc 100644 --- a/enterprise_gateway/services/kernelspecs/__init__.py +++ b/enterprise_gateway/services/kernelspecs/__init__.py @@ -1,4 +1,4 @@ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. -from .kernelspec_cache import KernelSpecCache \ No newline at end of file +from .kernelspec_cache import KernelSpecCache diff --git a/enterprise_gateway/services/kernelspecs/handlers.py b/enterprise_gateway/services/kernelspecs/handlers.py index 5a95435ea..b334765b5 100644 --- a/enterprise_gateway/services/kernelspecs/handlers.py +++ b/enterprise_gateway/services/kernelspecs/handlers.py @@ -2,22 +2,28 @@ # Distributed under the terms of the Modified BSD License. """Tornado handlers for kernel specs.""" import json +from typing import Dict, List, Optional from jupyter_server.base.handlers import JupyterHandler -from jupyter_server.services.kernelspecs.handlers import is_kernelspec_model, kernelspec_model +from jupyter_server.services.kernelspecs.handlers import ( + is_kernelspec_model, + kernelspec_model, +) from jupyter_server.utils import ensure_async, url_unescape from tornado import web +from traitlets import Set + from ...base.handlers import APIHandler -from ...mixins import TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin +from ...mixins import CORSMixin, JSONErrorsMixin, TokenAuthorizationMixin from .kernelspec_cache import KernelSpecCache -from traitlets import Set -from typing import List, Optional, Dict -def apply_user_filter(kernelspec_model: Dict[str, object], - global_authorized_list: Set, - global_unauthorized_list: Set, - kernel_user: str = None) -> Optional[Dict[str, object]]: +def apply_user_filter( + kernelspec_model: Dict[str, object], + global_authorized_list: Set, + global_unauthorized_list: Set, + kernel_user: str = None, +) -> Optional[Dict[str, object]]: """ If authorization lists are configured - either within the kernelspec or globally, ensure the user is authorized for the given kernelspec. @@ -27,7 +33,9 @@ def apply_user_filter(kernelspec_model: Dict[str, object], # semantics of which are a union of the two lists. try: # Check if kernel_user in kernelspec_model - unauthorized_list = kernelspec_model["spec"]["metadata"]["process_proxy"]["config"]["unauthorized_users"] + unauthorized_list = kernelspec_model["spec"]["metadata"]["process_proxy"]["config"][ + "unauthorized_users" + ] except KeyError: pass else: @@ -41,7 +49,9 @@ def apply_user_filter(kernelspec_model: Dict[str, object], # users may be a subset of globally authorized users and is, essentially, used as a denial to those # not defined in the kernelspec's list. try: - authorized_list = kernelspec_model["spec"]["metadata"]["process_proxy"]["config"]["authorized_users"] + authorized_list = kernelspec_model["spec"]["metadata"]["process_proxy"]["config"][ + "authorized_users" + ] except KeyError: if global_authorized_list and kernel_user not in global_authorized_list: return None @@ -52,24 +62,20 @@ def apply_user_filter(kernelspec_model: Dict[str, object], return kernelspec_model -class MainKernelSpecHandler(TokenAuthorizationMixin, - CORSMixin, - JSONErrorsMixin, - APIHandler): - +class MainKernelSpecHandler(TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, APIHandler): @property def kernel_spec_cache(self) -> KernelSpecCache: - return self.settings['kernel_spec_cache'] + return self.settings["kernel_spec_cache"] @web.authenticated async def get(self) -> None: ksm = self.kernel_spec_cache km = self.kernel_manager model = {} - model['default'] = km.default_kernel_name - model['kernelspecs'] = specs = {} + model["default"] = km.default_kernel_name + model["kernelspecs"] = specs = {} - kernel_user_filter = self.request.query_arguments.get('user') + kernel_user_filter = self.request.query_arguments.get("user") kernel_user = None if kernel_user_filter: kernel_user = kernel_user_filter[0].decode("utf-8") @@ -84,71 +90,79 @@ async def get(self) -> None: if is_kernelspec_model(kernel_info): d = kernel_info else: - d = kernelspec_model(self, kernel_name, kernel_info['spec'], kernel_info['resource_dir']) - d = apply_user_filter(d, self.settings['eg_authorized_users'], - self.settings['eg_unauthorized_users'], kernel_user) + d = kernelspec_model( + self, kernel_name, kernel_info["spec"], kernel_info["resource_dir"] + ) + d = apply_user_filter( + d, + self.settings["eg_authorized_users"], + self.settings["eg_unauthorized_users"], + kernel_user, + ) if d is not None: specs[kernel_name] = d - list_kernels_found.append(d['name']) + list_kernels_found.append(d["name"]) else: - self.log.debug('User %s is not authorized to use kernel spec %s' % (kernel_user, kernel_name)) + self.log.debug( + "User %s is not authorized to use kernel spec %s" + % (kernel_user, kernel_name) + ) except Exception: self.log.error("Failed to load kernel spec: '%s'", kernel_name) continue - self.set_header("Content-Type", 'application/json') + self.set_header("Content-Type", "application/json") self.finish(json.dumps(model)) -class KernelSpecHandler(TokenAuthorizationMixin, - CORSMixin, - JSONErrorsMixin, - APIHandler): - +class KernelSpecHandler(TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, APIHandler): @property def kernel_spec_cache(self) -> KernelSpecCache: - return self.settings['kernel_spec_cache'] + return self.settings["kernel_spec_cache"] @web.authenticated async def get(self, kernel_name: str) -> None: ksm = self.kernel_spec_cache kernel_name = url_unescape(kernel_name) - kernel_user_filter = self.request.query_arguments.get('user') + kernel_user_filter = self.request.query_arguments.get("user") kernel_user = None if kernel_user_filter: kernel_user = kernel_user_filter[0].decode("utf-8") try: spec = await ensure_async(ksm.get_kernel_spec(kernel_name)) except KeyError: - raise web.HTTPError(404, u'Kernel spec %s not found' % kernel_name) + raise web.HTTPError(404, "Kernel spec %s not found" % kernel_name) if is_kernelspec_model(spec): model = spec else: model = kernelspec_model(self, kernel_name, spec.to_dict(), spec.resource_dir) - d = apply_user_filter(model, self.settings['eg_authorized_users'], - self.settings['eg_unauthorized_users'], kernel_user) + d = apply_user_filter( + model, + self.settings["eg_authorized_users"], + self.settings["eg_unauthorized_users"], + kernel_user, + ) if d is None: - raise web.HTTPError(403, u'User %s is not authorized to use kernel spec %s' - % (kernel_user, kernel_name)) + raise web.HTTPError( + 403, f"User {kernel_user} is not authorized to use kernel spec {kernel_name}" + ) - self.set_header("Content-Type", 'application/json') + self.set_header("Content-Type", "application/json") self.finish(json.dumps(model)) -class KernelSpecResourceHandler(TokenAuthorizationMixin, - CORSMixin, - JSONErrorsMixin, - web.StaticFileHandler, - JupyterHandler): - SUPPORTED_METHODS = ('GET', 'HEAD') +class KernelSpecResourceHandler( + TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, web.StaticFileHandler, JupyterHandler +): + SUPPORTED_METHODS = ("GET", "HEAD") @property def kernel_spec_cache(self) -> KernelSpecCache: - return self.settings['kernel_spec_cache'] + return self.settings["kernel_spec_cache"] def initialize(self) -> None: - web.StaticFileHandler.initialize(self, path='') + web.StaticFileHandler.initialize(self, path="") @web.authenticated async def get(self, kernel_name: str, path: str, include_body: bool = True) -> None: @@ -157,8 +171,7 @@ async def get(self, kernel_name: str, path: str, include_body: bool = True) -> N kernelspec = await ensure_async(ksm.get_kernel_spec(kernel_name)) self.root = kernelspec.resource_dir except KeyError as e: - raise web.HTTPError(404, - u'Kernel spec %s not found' % kernel_name) from e + raise web.HTTPError(404, "Kernel spec %s not found" % kernel_name) from e self.log.debug("Serving kernel resource from: %s", self.root) return await web.StaticFileHandler.get(self, path, include_body=include_body) diff --git a/enterprise_gateway/services/kernelspecs/kernelspec_cache.py b/enterprise_gateway/services/kernelspecs/kernelspec_cache.py index ecdf20986..19c51a45e 100644 --- a/enterprise_gateway/services/kernelspecs/kernelspec_cache.py +++ b/enterprise_gateway/services/kernelspecs/kernelspec_cache.py @@ -3,15 +3,14 @@ """Cache handling for kernel specs.""" import os -from watchdog.observers import Observer -from watchdog.events import FileSystemEventHandler, FileMovedEvent +from typing import Dict, Optional, Union from jupyter_client.kernelspec import KernelSpec from jupyter_server.utils import ensure_async from traitlets.config import SingletonConfigurable from traitlets.traitlets import CBool, default -from typing import Dict, Optional, Union - +from watchdog.events import FileMovedEvent, FileSystemEventHandler +from watchdog.observers import Observer # Simplify the typing. Cache items are essentially dictionaries of strings # to either strings or dictionaries. The items themselves are indexed by @@ -32,13 +31,16 @@ class KernelSpecCache(SingletonConfigurable): conversion between formats is necessary, depending on which method is called. """ - cache_enabled_env = 'EG_KERNELSPEC_CACHE_ENABLED' - cache_enabled = CBool(False, config=True, - help="""Enable Kernel Specification caching. (EG_KERNELSPEC_CACHE_ENABLED env var)""") + cache_enabled_env = "EG_KERNELSPEC_CACHE_ENABLED" + cache_enabled = CBool( + False, + config=True, + help="""Enable Kernel Specification caching. (EG_KERNELSPEC_CACHE_ENABLED env var)""", + ) - @default('cache_enabled') + @default("cache_enabled") def cache_enabled_default(self): - return os.getenv(self.cache_enabled_env, 'false').lower() in ('true', '1') + return os.getenv(self.cache_enabled_env, "false").lower() in ("true", "1") def __init__(self, kernel_spec_manager, **kwargs) -> None: super().__init__(**kwargs) @@ -101,8 +103,11 @@ def get_item(self, kernel_name: str) -> Optional[KernelSpec]: pass if not kernelspec: self.cache_misses += 1 - self.log.debug("Cache miss ({misses}) for kernelspec: {kernel_name}". - format(misses=self.cache_misses, kernel_name=kernel_name)) + self.log.debug( + "Cache miss ({misses}) for kernelspec: {kernel_name}".format( + misses=self.cache_misses, kernel_name=kernel_name + ) + ) return kernelspec def get_all_items(self) -> Optional[Dict[str, CacheItemType]]: @@ -130,22 +135,30 @@ def put_item(self, kernel_name: str, cache_item: Union[KernelSpec, CacheItemType If it determines the cache entry corresponds to a currently unwatched directory, that directory will be added to list of observed directories and scheduled accordingly. """ - self.log.info("KernelSpecCache: adding/updating kernelspec: {kernel_name}".format(kernel_name=kernel_name)) + self.log.info( + "KernelSpecCache: adding/updating kernelspec: {kernel_name}".format( + kernel_name=kernel_name + ) + ) if self.cache_enabled: if type(cache_item) is KernelSpec: cache_item = KernelSpecCache.kernel_spec_to_cache_item(cache_item) - resource_dir = cache_item['resource_dir'] + resource_dir = cache_item["resource_dir"] self.cache_items[kernel_name.lower()] = cache_item observed_dir = os.path.dirname(resource_dir) if observed_dir not in self.observed_dirs: # New directory to watch, schedule it... - self.log.debug("KernelSpecCache: observing directory: {observed_dir}".format(observed_dir=observed_dir)) + self.log.debug( + "KernelSpecCache: observing directory: {observed_dir}".format( + observed_dir=observed_dir + ) + ) self.observed_dirs.add(observed_dir) self.observer.schedule(KernelSpecChangeHandler(self), observed_dir, recursive=True) def put_all_items(self, kernelspecs: Dict[str, CacheItemType]) -> None: - """Adds or updates a dictionary of kernel specification in the cache. """ + """Adds or updates a dictionary of kernel specification in the cache.""" if self.cache_enabled and kernelspecs: for kernel_name, cache_item in kernelspecs.items(): self.put_item(kernel_name, cache_item) @@ -156,11 +169,15 @@ def remove_item(self, kernel_name: str) -> Optional[CacheItemType]: if self.cache_enabled: if kernel_name.lower() in self.cache_items: cache_item = self.cache_items.pop(kernel_name.lower()) - self.log.info("KernelSpecCache: removed kernelspec: {kernel_name}".format(kernel_name=kernel_name)) + self.log.info( + "KernelSpecCache: removed kernelspec: {kernel_name}".format( + kernel_name=kernel_name + ) + ) return cache_item def _initialize(self): - """Initializes the cache and starts the observer. """ + """Initializes the cache and starts the observer.""" # The kernelspec cache consists of a dictionary mapping the kernel name to the actual # kernelspec data (CacheItemType). @@ -177,27 +194,34 @@ def _initialize(self): for kernel_dir in self.kernel_spec_manager.kernel_dirs: if kernel_dir not in self.observed_dirs: if os.path.exists(kernel_dir): - self.log.info("KernelSpecCache: observing directory: {kernel_dir}". - format(kernel_dir=kernel_dir)) + self.log.info( + "KernelSpecCache: observing directory: {kernel_dir}".format( + kernel_dir=kernel_dir + ) + ) self.observed_dirs.add(kernel_dir) - self.observer.schedule(KernelSpecChangeHandler(self), kernel_dir, recursive=True) + self.observer.schedule( + KernelSpecChangeHandler(self), kernel_dir, recursive=True + ) else: - self.log.warning("KernelSpecCache: kernel_dir '{kernel_dir}' does not exist" - " and will not be observed.".format(kernel_dir=kernel_dir)) + self.log.warning( + "KernelSpecCache: kernel_dir '{kernel_dir}' does not exist" + " and will not be observed.".format(kernel_dir=kernel_dir) + ) self.observer.start() @staticmethod def kernel_spec_to_cache_item(kernelspec: KernelSpec) -> CacheItemType: """Convets a KernelSpec instance to a CacheItemType for storage into the cache.""" cache_item = dict() - cache_item['spec'] = kernelspec.to_dict() - cache_item['resource_dir'] = kernelspec.resource_dir + cache_item["spec"] = kernelspec.to_dict() + cache_item["resource_dir"] = kernelspec.resource_dir return cache_item @staticmethod def cache_item_to_kernel_spec(cache_item: CacheItemType) -> KernelSpec: """Converts a CacheItemType to a KernelSpec instance for user consumption.""" - return KernelSpec.from_resource_dir(cache_item['resource_dir']) + return KernelSpec.from_resource_dir(cache_item["resource_dir"]) class KernelSpecChangeHandler(FileSystemEventHandler): @@ -206,10 +230,10 @@ class KernelSpecChangeHandler(FileSystemEventHandler): # Events related to these files trigger the management of the KernelSpec cache. Should we find # other files qualify as indicators of a kernel specification's state (like perhaps detached parameter # files in the future) should be added to this list - at which time it should become configurable. - watched_files = ['kernel.json'] + watched_files = ["kernel.json"] def __init__(self, kernel_spec_cache: KernelSpecCache, **kwargs): - super(KernelSpecChangeHandler, self).__init__(**kwargs) + super().__init__(**kwargs) self.kernel_spec_cache = kernel_spec_cache self.log = kernel_spec_cache.log @@ -234,7 +258,7 @@ def dispatch(self, event): event.dest_resource_dir = dest_resource_dir event.dest_kernel_name = os.path.basename(dest_resource_dir) - super(KernelSpecChangeHandler, self).dispatch(event) + super().dispatch(event) def on_created(self, event): """Fires when a watched file is created. @@ -247,8 +271,10 @@ def on_created(self, event): kernelspec = self.kernel_spec_cache.kernel_spec_manager.get_kernel_spec(kernel_name) self.kernel_spec_cache.put_item(kernel_name, kernelspec) except Exception as e: - self.log.warning("The following exception occurred creating cache entry for: {src_resource_dir} " - "- continuing... ({e})".format(src_resource_dir=event.src_resource_dir, e=e)) + self.log.warning( + "The following exception occurred creating cache entry for: {src_resource_dir} " + "- continuing... ({e})".format(src_resource_dir=event.src_resource_dir, e=e) + ) def on_deleted(self, event): """Fires when a watched file is deleted, triggering a removal of the corresponding item from the cache.""" @@ -266,8 +292,10 @@ def on_modified(self, event): kernelspec = self.kernel_spec_cache.kernel_spec_manager.get_kernel_spec(kernel_name) self.kernel_spec_cache.put_item(kernel_name, kernelspec) except Exception as e: - self.log.warning("The following exception occurred updating cache entry for: {src_resource_dir} " - "- continuing... ({e})".format(src_resource_dir=event.src_resource_dir, e=e)) + self.log.warning( + "The following exception occurred updating cache entry for: {src_resource_dir} " + "- continuing... ({e})".format(src_resource_dir=event.src_resource_dir, e=e) + ) def on_moved(self, event): """Fires when a watched file is moved. @@ -278,5 +306,5 @@ def on_moved(self, event): src_kernel_name = event.src_kernel_name dest_kernel_name = event.dest_kernel_name cache_item = self.kernel_spec_cache.remove_item(src_kernel_name) - cache_item['resource_dir'] = event.dest_resource_dir + cache_item["resource_dir"] = event.dest_resource_dir self.kernel_spec_cache.put_item(dest_kernel_name, cache_item) diff --git a/enterprise_gateway/services/processproxies/conductor.py b/enterprise_gateway/services/processproxies/conductor.py index f8890585c..3d5d874ff 100644 --- a/enterprise_gateway/services/processproxies/conductor.py +++ b/enterprise_gateway/services/processproxies/conductor.py @@ -3,79 +3,88 @@ """Code related to managing kernels running in Conductor clusters.""" import asyncio -import os import json +import os import re import signal import socket import subprocess import time +from random import randint from jupyter_client import localinterfaces - -from .processproxy import RemoteProcessProxy - from jupyter_server.utils import url_unescape -from random import randint +from .processproxy import RemoteProcessProxy pjoin = os.path.join local_ip = localinterfaces.public_ips()[0] -poll_interval = float(os.getenv('EG_POLL_INTERVAL', '0.5')) -max_poll_attempts = int(os.getenv('EG_MAX_POLL_ATTEMPTS', '10')) +poll_interval = float(os.getenv("EG_POLL_INTERVAL", "0.5")) +max_poll_attempts = int(os.getenv("EG_MAX_POLL_ATTEMPTS", "10")) class ConductorClusterProcessProxy(RemoteProcessProxy): """ Kernel lifecycle management for Conductor clusters. """ - initial_states = {'SUBMITTED', 'WAITING', 'RUNNING'} - final_states = {'FINISHED', 'KILLED', 'RECLAIMED'} # Don't include FAILED state + + initial_states = {"SUBMITTED", "WAITING", "RUNNING"} + final_states = {"FINISHED", "KILLED", "RECLAIMED"} # Don't include FAILED state def __init__(self, kernel_manager, proxy_config): - super(ConductorClusterProcessProxy, self).__init__(kernel_manager, proxy_config) + super().__init__(kernel_manager, proxy_config) self.application_id = None self.driver_id = None self.env = None self.rest_credential = None self.jwt_token = None - self.conductor_endpoint = proxy_config.get('conductor_endpoint', - kernel_manager.conductor_endpoint) + self.conductor_endpoint = proxy_config.get( + "conductor_endpoint", kernel_manager.conductor_endpoint + ) self.ascd_endpoint = self.conductor_endpoint async def launch_process(self, kernel_cmd, **kwargs): """ Launches the specified process within a Conductor cluster environment. """ - await super(ConductorClusterProcessProxy, self).launch_process(kernel_cmd, **kwargs) + await super().launch_process(kernel_cmd, **kwargs) - self.env = kwargs.get('env') - self.kernel_headers = kwargs.get('kernel_headers') + self.env = kwargs.get("env") + self.kernel_headers = kwargs.get("kernel_headers") # Get Conductor cred from process env env_dict = dict(os.environ.copy()) - if env_dict and 'EGO_SERVICE_CREDENTIAL' in env_dict: - self.rest_credential = env_dict['EGO_SERVICE_CREDENTIAL'] - elif self.kernel_headers and 'Jwt-Auth-User-Payload' in self.kernel_headers: - kwargs.get('env')['KERNEL_NOTEBOOK_COOKIE_JAR'] = 'kernelcookie' + str(randint(0, 1000)) - jsonKH = json.loads(self.kernel_headers['Jwt-Auth-User-Payload']) - self.jwt_token = jsonKH['accessToken'] - await asyncio.get_event_loop().run_in_executor(None, self._performConductorJWTLogonAndRetrieval, - self.jwt_token, kwargs.get('env')) + if env_dict and "EGO_SERVICE_CREDENTIAL" in env_dict: + self.rest_credential = env_dict["EGO_SERVICE_CREDENTIAL"] + elif self.kernel_headers and "Jwt-Auth-User-Payload" in self.kernel_headers: + kwargs.get("env")["KERNEL_NOTEBOOK_COOKIE_JAR"] = "kernelcookie" + str(randint(0, 1000)) + jsonKH = json.loads(self.kernel_headers["Jwt-Auth-User-Payload"]) + self.jwt_token = jsonKH["accessToken"] + await asyncio.get_event_loop().run_in_executor( + None, self._performConductorJWTLogonAndRetrieval, self.jwt_token, kwargs.get("env") + ) else: - error_message = "ConductorClusterProcessProxy failed to obtain the Conductor credential." + error_message = ( + "ConductorClusterProcessProxy failed to obtain the Conductor credential." + ) self.log_and_raise(http_status_code=500, reason=error_message) # dynamically update Spark submit parameters - await asyncio.get_event_loop().run_in_executor(None, self._update_launch_info, kernel_cmd, kwargs.get('env')) + await asyncio.get_event_loop().run_in_executor( + None, self._update_launch_info, kernel_cmd, kwargs.get("env") + ) # Enable stderr PIPE for the run command - kwargs.update({'stderr': subprocess.PIPE}) + kwargs.update({"stderr": subprocess.PIPE}) self.local_proc = self.launch_kernel(kernel_cmd, **kwargs) self.pid = self.local_proc.pid self.ip = local_ip - self.log.debug("Conductor cluster kernel launched using Conductor endpoint: {}, pid: {}, Kernel ID: {}, " - "cmd: '{}'".format(self.conductor_endpoint, self.local_proc.pid, self.kernel_id, kernel_cmd)) + self.log.debug( + "Conductor cluster kernel launched using Conductor endpoint: {}, pid: {}, Kernel ID: {}, " + "cmd: '{}'".format( + self.conductor_endpoint, self.local_proc.pid, self.kernel_id, kernel_cmd + ) + ) await self.confirm_remote_startup() return self @@ -83,32 +92,39 @@ def _update_launch_info(self, kernel_cmd, env_dict): """ Dynamically assemble the spark-submit configuration passed from NB2KG. """ - if any(arg.endswith('.sh') for arg in kernel_cmd): + if any(arg.endswith(".sh") for arg in kernel_cmd): self.log.debug("kernel_cmd contains execution script") else: - kernel_dir = self.kernel_manager.kernel_spec_manager._find_spec_directory(self.kernel_manager.kernel_name) - cmd = pjoin(kernel_dir, 'bin/run.sh') + kernel_dir = self.kernel_manager.kernel_spec_manager._find_spec_directory( + self.kernel_manager.kernel_name + ) + cmd = pjoin(kernel_dir, "bin/run.sh") kernel_cmd.insert(0, cmd) # add SPARK_HOME, PYSPARK_PYTHON, update SPARK_OPT to contain SPARK_MASTER and EGO_SERVICE_CREDENTIAL - env_dict['SPARK_HOME'] = env_dict['KERNEL_SPARK_HOME'] - env_dict['PYSPARK_PYTHON'] = env_dict['KERNEL_PYSPARK_PYTHON'] + env_dict["SPARK_HOME"] = env_dict["KERNEL_SPARK_HOME"] + env_dict["PYSPARK_PYTHON"] = env_dict["KERNEL_PYSPARK_PYTHON"] # add KERNEL_SPARK_OPTS to append user configured Spark configuration - user_defined_spark_opts = '' - if 'KERNEL_SPARK_OPTS' in env_dict: - user_defined_spark_opts = env_dict['KERNEL_SPARK_OPTS'] + user_defined_spark_opts = "" + if "KERNEL_SPARK_OPTS" in env_dict: + user_defined_spark_opts = env_dict["KERNEL_SPARK_OPTS"] # Get updated one_notebook_master_rest_url for KERNEL_NOTEBOOK_MASTER_REST and SPARK_OPTS. if self.jwt_token is None: self._update_notebook_master_rest_url(env_dict) - if "--master" not in env_dict['SPARK_OPTS']: - env_dict['SPARK_OPTS'] = '--master {master} --conf spark.ego.credential={rest_cred} ' \ - '--conf spark.pyspark.python={pyspark_python} {spark_opts} ' \ - '{user_defined_spark_opts}'.\ - format(master=env_dict['KERNEL_NOTEBOOK_MASTER_REST'], rest_cred="'" + self.rest_credential + "'", - pyspark_python=env_dict['PYSPARK_PYTHON'], spark_opts=env_dict['SPARK_OPTS'], - user_defined_spark_opts=user_defined_spark_opts) + if "--master" not in env_dict["SPARK_OPTS"]: + env_dict["SPARK_OPTS"] = ( + "--master {master} --conf spark.ego.credential={rest_cred} " + "--conf spark.pyspark.python={pyspark_python} {spark_opts} " + "{user_defined_spark_opts}".format( + master=env_dict["KERNEL_NOTEBOOK_MASTER_REST"], + rest_cred="'" + self.rest_credential + "'", + pyspark_python=env_dict["PYSPARK_PYTHON"], + spark_opts=env_dict["SPARK_OPTS"], + user_defined_spark_opts=user_defined_spark_opts, + ) + ) def _update_notebook_master_rest_url(self, env_dict): """ @@ -119,60 +135,97 @@ def _update_notebook_master_rest_url(self, env_dict): self.log.debug("Updating notebook master rest urls.") response = None # Assemble REST call - header = 'Accept: application/json' - authorization = 'Authorization: %s' % self.rest_credential - if 'KERNEL_NOTEBOOK_DATA_DIR' not in env_dict or 'KERNEL_NOTEBOOK_COOKIE_JAR' not in env_dict \ - or 'KERNEL_CURL_SECURITY_OPT' not in env_dict: - self.log.warning("Could not find KERNEL environment variables. Not updating notebook master rest url.") + header = "Accept: application/json" + authorization = "Authorization: %s" % self.rest_credential + if ( + "KERNEL_NOTEBOOK_DATA_DIR" not in env_dict + or "KERNEL_NOTEBOOK_COOKIE_JAR" not in env_dict + or "KERNEL_CURL_SECURITY_OPT" not in env_dict + ): + self.log.warning( + "Could not find KERNEL environment variables. Not updating notebook master rest url." + ) return - if 'CONDUCTOR_REST_URL' not in env_dict or 'KERNEL_SIG_ID' not in env_dict \ - or 'KERNEL_NOTEBOOK_MASTER_REST' not in env_dict: - self.log.warning("Could not find CONDUCTOR_REST_URL or KERNEL_SIG_ID or KERNEL_NOTEBOOK_MASTER_REST. " - "Not updating notebook master rest url.") + if ( + "CONDUCTOR_REST_URL" not in env_dict + or "KERNEL_SIG_ID" not in env_dict + or "KERNEL_NOTEBOOK_MASTER_REST" not in env_dict + ): + self.log.warning( + "Could not find CONDUCTOR_REST_URL or KERNEL_SIG_ID or KERNEL_NOTEBOOK_MASTER_REST. " + "Not updating notebook master rest url." + ) return - cookie_jar = pjoin(env_dict['KERNEL_NOTEBOOK_DATA_DIR'], env_dict['KERNEL_NOTEBOOK_COOKIE_JAR']) - sslconf = env_dict['KERNEL_CURL_SECURITY_OPT'].split() - ascd_rest_url = env_dict['CONDUCTOR_REST_URL'] - ig_id = env_dict['KERNEL_SIG_ID'] - url = '%sconductor/v1/instances?id=%s&fields=outputs' % (ascd_rest_url, ig_id) - cmd = ['curl', '-v', '-b', cookie_jar, '-X', 'GET', '-H', header, '-H', authorization, url] + cookie_jar = pjoin( + env_dict["KERNEL_NOTEBOOK_DATA_DIR"], env_dict["KERNEL_NOTEBOOK_COOKIE_JAR"] + ) + sslconf = env_dict["KERNEL_CURL_SECURITY_OPT"].split() + ascd_rest_url = env_dict["CONDUCTOR_REST_URL"] + ig_id = env_dict["KERNEL_SIG_ID"] + url = f"{ascd_rest_url}conductor/v1/instances?id={ig_id}&fields=outputs" + cmd = ["curl", "-v", "-b", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf # Perform REST call try: - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) + process = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True + ) output, stderr = process.communicate() response = json.loads(output) if output else None - if response is None or len(response) < 1 or not response[0] or not response[0]['outputs']: + if ( + response is None + or len(response) < 1 + or not response[0] + or not response[0]["outputs"] + ): response = None except Exception as e: - self.log.warning("Getting instance group with cmd '{}' failed with exception: '{}'. Continuing...". - format(cmd, e)) + self.log.warning( + "Getting instance group with cmd '{}' failed with exception: '{}'. Continuing...".format( + cmd, e + ) + ) return - outputs = response[0]['outputs'] - - if 'one_notebook_master_rest_url' not in outputs or not outputs['one_notebook_master_rest_url'] \ - or 'value' not in outputs['one_notebook_master_rest_url'] \ - or not outputs['one_notebook_master_rest_url']['value']: - self.log.warning("Could not get one_notebook_master_rest_url from instance group. " - "Not updating notebook master rest url.") + outputs = response[0]["outputs"] + + if ( + "one_notebook_master_rest_url" not in outputs + or not outputs["one_notebook_master_rest_url"] + or "value" not in outputs["one_notebook_master_rest_url"] + or not outputs["one_notebook_master_rest_url"]["value"] + ): + self.log.warning( + "Could not get one_notebook_master_rest_url from instance group. " + "Not updating notebook master rest url." + ) return - if 'one_notebook_master_web_submission_url' not in outputs \ - or not outputs['one_notebook_master_web_submission_url'] \ - or 'value' not in outputs['one_notebook_master_web_submission_url'] \ - or not outputs['one_notebook_master_web_submission_url']['value']: - self.log.warning("Could not get one_notebook_master_web_submission_url from instance group. " - "Not updating notebook master rest url.") + if ( + "one_notebook_master_web_submission_url" not in outputs + or not outputs["one_notebook_master_web_submission_url"] + or "value" not in outputs["one_notebook_master_web_submission_url"] + or not outputs["one_notebook_master_web_submission_url"]["value"] + ): + self.log.warning( + "Could not get one_notebook_master_web_submission_url from instance group. " + "Not updating notebook master rest url." + ) return - updated_one_notebook_master_rest_url = outputs['one_notebook_master_rest_url']['value'] - updated_one_notebook_master_web_submission_url = outputs['one_notebook_master_web_submission_url']['value'] + updated_one_notebook_master_rest_url = outputs["one_notebook_master_rest_url"]["value"] + updated_one_notebook_master_web_submission_url = outputs[ + "one_notebook_master_web_submission_url" + ]["value"] if updated_one_notebook_master_rest_url and updated_one_notebook_master_web_submission_url: - self.log.debug("Updating KERNEL_NOTEBOOK_MASTER_REST to '{}'.".format(updated_one_notebook_master_rest_url)) - os.environ['KERNEL_NOTEBOOK_MASTER_REST'] = updated_one_notebook_master_rest_url - env_dict['KERNEL_NOTEBOOK_MASTER_REST'] = updated_one_notebook_master_rest_url + self.log.debug( + "Updating KERNEL_NOTEBOOK_MASTER_REST to '{}'.".format( + updated_one_notebook_master_rest_url + ) + ) + os.environ["KERNEL_NOTEBOOK_MASTER_REST"] = updated_one_notebook_master_rest_url + env_dict["KERNEL_NOTEBOOK_MASTER_REST"] = updated_one_notebook_master_rest_url self.conductor_endpoint = updated_one_notebook_master_web_submission_url def poll(self): @@ -196,13 +249,13 @@ def send_signal(self, signum): :param signum :return: """ - self.log.debug("ConductorClusterProcessProxy.send_signal {}".format(signum)) + self.log.debug(f"ConductorClusterProcessProxy.send_signal {signum}") if signum == 0: return self.poll() elif signum == signal.SIGKILL: return self.kill() else: - return super(ConductorClusterProcessProxy, self).send_signal(signum) + return super().send_signal(signum) def kill(self): """ @@ -213,8 +266,10 @@ def kill(self): result = False if self.driver_id: resp = self._kill_app_by_driver_id(self.driver_id) - self.log.debug("ConductorClusterProcessProxy.kill: kill_app_by_driver_id({}) response: {}, confirming " - "app state is not RUNNING".format(self.driver_id, resp)) + self.log.debug( + "ConductorClusterProcessProxy.kill: kill_app_by_driver_id({}) response: {}, confirming " + "app state is not RUNNING".format(self.driver_id, resp) + ) i = 1 state = self._query_app_state_by_driver_id(self.driver_id) while state not in ConductorClusterProcessProxy.final_states and i <= max_poll_attempts: @@ -225,28 +280,34 @@ def kill(self): if state in ConductorClusterProcessProxy.final_states: result = None - super(ConductorClusterProcessProxy, self).kill() + super().kill() - self.log.debug("ConductorClusterProcessProxy.kill, application ID: {}, kernel ID: {}, state: {}" - .format(self.application_id, self.kernel_id, state)) + self.log.debug( + "ConductorClusterProcessProxy.kill, application ID: {}, kernel ID: {}, state: {}".format( + self.application_id, self.kernel_id, state + ) + ) return result def cleanup(self): # we might have a defunct process (if using waitAppCompletion = false) - so poll, kill, wait when we have # a local_proc. if self.local_proc: - self.log.debug("ConductorClusterProcessProxy.cleanup: Clearing possible defunct process, pid={}...". - format(self.local_proc.pid)) - if super(ConductorClusterProcessProxy, self).poll(): - super(ConductorClusterProcessProxy, self).kill() - super(ConductorClusterProcessProxy, self).wait() + self.log.debug( + "ConductorClusterProcessProxy.cleanup: Clearing possible defunct process, pid={}...".format( + self.local_proc.pid + ) + ) + if super().poll(): + super().kill() + super().wait() self.local_proc = None # reset application id to force new query - handles kernel restarts/interrupts self.application_id = None # for cleanup, we should call the superclass last - super(ConductorClusterProcessProxy, self).cleanup() + super().cleanup() def _parse_driver_submission_id(self, submission_response): """ @@ -254,21 +315,29 @@ def _parse_driver_submission_id(self, submission_response): :param submission_response """ if submission_response: - self.log.debug("Submission Response: {}\n".format(submission_response)) - matched_lines = [line for line in submission_response.split('\n') if "submissionId" in line] + self.log.debug(f"Submission Response: {submission_response}\n") + matched_lines = [ + line for line in submission_response.split("\n") if "submissionId" in line + ] if matched_lines and len(matched_lines) > 0: driver_info = matched_lines[0] - self.log.debug("Driver Info: {}".format(driver_info)) + self.log.debug(f"Driver Info: {driver_info}") driver_id = driver_info.split(":")[1] driver_id = re.findall(r'"([^"]*)"', driver_id) if driver_id and len(driver_id) > 0: self.driver_id = driver_id[0] - self.log.debug("Driver ID: {}".format(driver_id[0])) + self.log.debug(f"Driver ID: {driver_id[0]}") # Handle Checking for submission error to report - err_lines = [line for line in submission_response.split('\n') if "Application submission failed" in line] + err_lines = [ + line + for line in submission_response.split("\n") + if "Application submission failed" in line + ] if err_lines and len(err_lines) > 0: - self.log_and_raise(http_status_code=500, - reason=err_lines[0][err_lines[0].find("Application submission failed"):]) + self.log_and_raise( + http_status_code=500, + reason=err_lines[0][err_lines[0].find("Application submission failed") :], + ) async def confirm_remote_startup(self): """ @@ -292,15 +361,21 @@ async def confirm_remote_startup(self): app_state = self._get_application_state() if app_state in ConductorClusterProcessProxy.final_states: - error_message = "KernelID: '{}', ApplicationID: '{}' unexpectedly found in state '{}' " \ - "during kernel startup!".\ - format(self.kernel_id, self.application_id, app_state) + error_message = ( + "KernelID: '{}', ApplicationID: '{}' unexpectedly found in state '{}' " + "during kernel startup!".format( + self.kernel_id, self.application_id, app_state + ) + ) self.log_and_raise(http_status_code=500, reason=error_message) - self.log.debug("{}: State: '{}', Host: '{}', KernelID: '{}', ApplicationID: '{}'". - format(i, app_state, self.assigned_host, self.kernel_id, self.application_id)) + self.log.debug( + "{}: State: '{}', Host: '{}', KernelID: '{}', ApplicationID: '{}'".format( + i, app_state, self.assigned_host, self.kernel_id, self.application_id + ) + ) - if self.assigned_host != '': + if self.assigned_host != "": ready_to_connect = await self.receive_connection_info() else: self.detect_launch_failure() @@ -315,10 +390,10 @@ def _get_application_state(self): if apps: for app in apps: - if 'state' in app: - app_state = app['state'] - if self.assigned_host == '' and app['driver']: - self.assigned_host = app['driver']['host'] + if "state" in app: + app_state = app["state"] + if self.assigned_host == "" and app["driver"]: + self.assigned_host = app["driver"]["host"] # Set the driver host to the actual host where the application landed. self.assigned_ip = socket.gethostbyname(self.assigned_host) return app_state @@ -328,22 +403,32 @@ async def handle_timeout(self): Checks to see if the kernel launch timeout has been exceeded while awaiting connection info. """ await asyncio.sleep(poll_interval) - time_interval = RemoteProcessProxy.get_time_diff(self.start_time, RemoteProcessProxy.get_current_time()) + time_interval = RemoteProcessProxy.get_time_diff( + self.start_time, RemoteProcessProxy.get_current_time() + ) if time_interval > self.kernel_launch_timeout: - reason = "Application failed to start within {} seconds.". \ - format(self.kernel_launch_timeout) + reason = "Application failed to start within {} seconds.".format( + self.kernel_launch_timeout + ) error_http_code = 500 if self._get_application_id(True): if self._query_app_state_by_driver_id(self.driver_id) != "WAITING": - reason = "Kernel unavailable after {} seconds for driver_id {}, app_id {}, launch timeout: {}!". \ - format(time_interval, self.driver_id, self.application_id, self.kernel_launch_timeout) + reason = "Kernel unavailable after {} seconds for driver_id {}, app_id {}, launch timeout: {}!".format( + time_interval, + self.driver_id, + self.application_id, + self.kernel_launch_timeout, + ) error_http_code = 503 else: - reason = "App {} is WAITING, but waited too long ({} secs) to get connection file". \ - format(self.application_id, self.kernel_launch_timeout) + reason = "App {} is WAITING, but waited too long ({} secs) to get connection file".format( + self.application_id, self.kernel_launch_timeout + ) await asyncio.get_event_loop().run_in_executor(None, self.kill) - timeout_message = "KernelID: '{}' launch timeout due to: {}".format(self.kernel_id, reason) + timeout_message = "KernelID: '{}' launch timeout due to: {}".format( + self.kernel_id, reason + ) self.log_and_raise(http_status_code=error_http_code, reason=timeout_message) def _get_application_id(self, ignore_final_states=False): @@ -356,39 +441,51 @@ def _get_application_id(self, ignore_final_states=False): state_condition = True if apps: for app in apps: - if 'state' in app and ignore_final_states: - state_condition = app['state'] not in ConductorClusterProcessProxy.final_states - if 'applicationid' in app and len(app['applicationid']) > 0 and state_condition: - self.application_id = app['applicationid'] - time_interval = RemoteProcessProxy.get_time_diff(self.start_time, - RemoteProcessProxy.get_current_time()) - self.log.info("ApplicationID: '{}' assigned for KernelID: '{}', state: {}, " - "{} seconds after starting.".format(app['applicationid'], self.kernel_id, - app['state'], time_interval)) + if "state" in app and ignore_final_states: + state_condition = ( + app["state"] not in ConductorClusterProcessProxy.final_states + ) + if "applicationid" in app and len(app["applicationid"]) > 0 and state_condition: + self.application_id = app["applicationid"] + time_interval = RemoteProcessProxy.get_time_diff( + self.start_time, RemoteProcessProxy.get_current_time() + ) + self.log.info( + "ApplicationID: '{}' assigned for KernelID: '{}', state: {}, " + "{} seconds after starting.".format( + app["applicationid"], self.kernel_id, app["state"], time_interval + ) + ) else: - self.log.debug("ApplicationID not yet assigned for KernelID: '{}' - retrying...". - format(self.kernel_id)) + self.log.debug( + "ApplicationID not yet assigned for KernelID: '{}' - retrying...".format( + self.kernel_id + ) + ) else: - self.log.debug("ApplicationID not yet assigned for KernelID: '{}' - retrying...". - format(self.kernel_id)) + self.log.debug( + "ApplicationID not yet assigned for KernelID: '{}' - retrying...".format( + self.kernel_id + ) + ) return self.application_id def get_process_info(self): """ Captures the base information necessary for kernel persistence relative to Conductor clusters. """ - process_info = super(ConductorClusterProcessProxy, self).get_process_info() - process_info.update({'application_id': self.application_id}) - process_info.update({'rest_credential': self.rest_credential}) + process_info = super().get_process_info() + process_info.update({"application_id": self.application_id}) + process_info.update({"rest_credential": self.rest_credential}) return process_info def load_process_info(self, process_info): """ Captures the base information necessary for kernel persistence relative to Conductor clusters. """ - super(ConductorClusterProcessProxy, self).load_process_info(process_info) - self.application_id = process_info['application_id'] - self.rest_credential = process_info['rest_credential'] + super().load_process_info(process_info) + self.application_id = process_info["application_id"] + self.rest_credential = process_info["rest_credential"] def _query_app_by_driver_id(self, driver_id): """ @@ -401,26 +498,31 @@ def _query_app_by_driver_id(self, driver_id): return response # Assemble REST call env = self.env - header = 'Accept: application/json' - authorization = 'Authorization: %s' % self.rest_credential - cookie_jar = pjoin(env['KERNEL_NOTEBOOK_DATA_DIR'], env['KERNEL_NOTEBOOK_COOKIE_JAR']) - sslconf = env['KERNEL_CURL_SECURITY_OPT'].split() - url = '%s/v1/applications?driverid=%s' % (self.conductor_endpoint, driver_id) - cmd = ['curl', '-v', '-b', cookie_jar, '-X', 'GET', '-H', header, '-H', authorization, url] + header = "Accept: application/json" + authorization = "Authorization: %s" % self.rest_credential + cookie_jar = pjoin(env["KERNEL_NOTEBOOK_DATA_DIR"], env["KERNEL_NOTEBOOK_COOKIE_JAR"]) + sslconf = env["KERNEL_CURL_SECURITY_OPT"].split() + url = f"{self.conductor_endpoint}/v1/applications?driverid={driver_id}" + cmd = ["curl", "-v", "-b", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf # Perform REST call try: - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) + process = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True + ) output, stderr = process.communicate() response = json.loads(output) if output else None - if not response or not response['applist']: + if not response or not response["applist"]: response = None else: - response = response['applist'] + response = response["applist"] except Exception as e: - self.log.warning("Getting application with cmd '{}' failed with exception: '{}'. Continuing...". - format(cmd, e)) + self.log.warning( + "Getting application with cmd '{}' failed with exception: '{}'. Continuing...".format( + cmd, e + ) + ) return response def _query_app_by_id(self, app_id): @@ -432,25 +534,30 @@ def _query_app_by_id(self, app_id): response = None # Assemble REST call env = self.env - header = 'Accept: application/json' - authorization = 'Authorization: %s' % self.rest_credential - cookie_jar = pjoin(env['KERNEL_NOTEBOOK_DATA_DIR'], env['KERNEL_NOTEBOOK_COOKIE_JAR']) - sslconf = env['KERNEL_CURL_SECURITY_OPT'].split() - url = '%s/v1/applications?applicationid=%s' % (self.conductor_endpoint, app_id) - cmd = ['curl', '-v', '-b', cookie_jar, '-X', 'GET', '-H', header, '-H', authorization, url] + header = "Accept: application/json" + authorization = "Authorization: %s" % self.rest_credential + cookie_jar = pjoin(env["KERNEL_NOTEBOOK_DATA_DIR"], env["KERNEL_NOTEBOOK_COOKIE_JAR"]) + sslconf = env["KERNEL_CURL_SECURITY_OPT"].split() + url = f"{self.conductor_endpoint}/v1/applications?applicationid={app_id}" + cmd = ["curl", "-v", "-b", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf # Perform REST call try: - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) + process = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True + ) output, stderr = process.communicate() response = json.loads(output) if output else None - if response is None or not response['applist']: + if response is None or not response["applist"]: response = None else: - response = response['applist'] + response = response["applist"] except Exception as e: - self.log.warning("Getting application with cmd '{}' failed with exception: '{}'. Continuing...". - format(cmd, e)) + self.log.warning( + "Getting application with cmd '{}' failed with exception: '{}'. Continuing...".format( + cmd, e + ) + ) return response def _query_app_state_by_driver_id(self, driver_id): @@ -463,8 +570,8 @@ def _query_app_state_by_driver_id(self, driver_id): apps = self._query_app_by_driver_id(driver_id) if apps: for app in apps: - if 'state' in app: - response = app['state'] + if "state" in app: + response = app["state"] return response def _get_driver_by_app_id(self, app_id): @@ -477,9 +584,9 @@ def _get_driver_by_app_id(self, app_id): apps = self._query_app_by_id(app_id) if apps: for app in apps: - if app and app['driver']: - self.log.debug("Obtain Driver ID: {}".format(app['driver']['id'])) - response = app['driver'] + if app and app["driver"]: + self.log.debug("Obtain Driver ID: {}".format(app["driver"]["id"])) + response = app["driver"] else: self.log.warning("Application id does not exist") return response @@ -490,55 +597,73 @@ def _kill_app_by_driver_id(self, driver_id): :param driver_id :return: The JSON response of killing the application. None if driver is not found. """ - self.log.debug("Kill driver: {}".format(driver_id)) + self.log.debug(f"Kill driver: {driver_id}") if driver_id is None: if self.application_id is None: return None - self.log.debug("Driver does not exist, retrieving DriverID with ApplicationID: {}". - format(self.application_id)) + self.log.debug( + "Driver does not exist, retrieving DriverID with ApplicationID: {}".format( + self.application_id + ) + ) driver_info = self._get_driver_by_app_id(self.application_id) if driver_info: - self.driver_id = driver_info['id'] + self.driver_id = driver_info["id"] else: return None # Assemble REST call response = None env = self.env - header = 'Accept: application/json' - authorization = 'Authorization: %s' % self.rest_credential - cookie_jar = pjoin(env['KERNEL_NOTEBOOK_DATA_DIR'], env['KERNEL_NOTEBOOK_COOKIE_JAR']) - sslconf = env['KERNEL_CURL_SECURITY_OPT'].split() - url = '%s/v1/submissions/kill/%s' % (self.conductor_endpoint, self.driver_id) - cmd = ['curl', '-v', '-b', cookie_jar, '-X', 'POST', '-H', header, '-H', authorization, url] + header = "Accept: application/json" + authorization = "Authorization: %s" % self.rest_credential + cookie_jar = pjoin(env["KERNEL_NOTEBOOK_DATA_DIR"], env["KERNEL_NOTEBOOK_COOKIE_JAR"]) + sslconf = env["KERNEL_CURL_SECURITY_OPT"].split() + url = f"{self.conductor_endpoint}/v1/submissions/kill/{self.driver_id}" + cmd = ["curl", "-v", "-b", cookie_jar, "-X", "POST", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf # Perform REST call try: - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) + process = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True + ) output, stderr = process.communicate() response = json.loads(output) if output else None except Exception as e: - self.log.warning("Termination of application with cmd '{}' failed with exception: '{}'. Continuing...". - format(cmd, e)) - self.log.debug("Kill response: {}".format(response)) + self.log.warning( + "Termination of application with cmd '{}' failed with exception: '{}'. Continuing...".format( + cmd, e + ) + ) + self.log.debug(f"Kill response: {response}") return response def _performRestCall(self, cmd, url, HA_LIST): for HA in HA_LIST: - portcolon = url.rfind(':') - slash = url.find('://') - url = url[0:slash + 3] + HA + url[portcolon:] + portcolon = url.rfind(":") + slash = url.find("://") + url = url[0 : slash + 3] + HA + url[portcolon:] cmd[-1] = url self.log.debug(cmd) - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, - universal_newlines=True) + process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + universal_newlines=True, + ) output, stderr = process.communicate() - if 'Could not resolve host' not in stderr and 'Failed connect to' not in stderr \ - and 'Connection refused' not in stderr: + if ( + "Could not resolve host" not in stderr + and "Failed connect to" not in stderr + and "Connection refused" not in stderr + ): return output, stderr - self.log_and_raise(http_status_code=500, reason='Could not connect to ascd. Verify ascd is running.') - return 'Error', 'Error' + self.log_and_raise( + http_status_code=500, reason="Could not connect to ascd. Verify ascd is running." + ) + return "Error", "Error" def _performConductorJWTLogonAndRetrieval(self, jwt_token, env_dict): """ @@ -553,77 +678,102 @@ def _performConductorJWTLogonAndRetrieval(self, jwt_token, env_dict): # Assemble JWT Auth logon REST call env = self.env - if env['KERNEL_IG_UUID'] is None: - reasonErr = 'Instance group specified is None. Check environment ' \ - 'specified instance group is available.' + if env["KERNEL_IG_UUID"] is None: + reasonErr = ( + "Instance group specified is None. Check environment " + "specified instance group is available." + ) self.log_and_raise(http_status_code=500, reason=reasonErr) # Determine hostname of ascd_endpoint and setup the HA List - portcolon = self.ascd_endpoint.rfind(':') - slash = self.ascd_endpoint.find('://') - host = self.ascd_endpoint[slash + 3:portcolon] - HA_LIST = env['KERNEL_CONDUCTOR_HA_ENDPOINTS'].split(',') + portcolon = self.ascd_endpoint.rfind(":") + slash = self.ascd_endpoint.find("://") + host = self.ascd_endpoint[slash + 3 : portcolon] + HA_LIST = env["KERNEL_CONDUCTOR_HA_ENDPOINTS"].split(",") HA_LIST.insert(0, host) - header = 'Accept: application/json' - authorization = 'Authorization: Bearer %s' % jwt_token - cookie_jar = pjoin(env['KERNEL_NOTEBOOK_DATA_DIR'], env['KERNEL_NOTEBOOK_COOKIE_JAR']) - sslconf = env['KERNEL_CURL_SECURITY_OPT'].split() - url = '%s/auth/logon/jwt?topology=%s' % (self.ascd_endpoint, env['KERNEL_TOPOLOGY']) - cmd = ['curl', '-v', '-b', cookie_jar, '-X', 'GET', '-H', header, '-H', authorization, url] + header = "Accept: application/json" + authorization = "Authorization: Bearer %s" % jwt_token + cookie_jar = pjoin(env["KERNEL_NOTEBOOK_DATA_DIR"], env["KERNEL_NOTEBOOK_COOKIE_JAR"]) + sslconf = env["KERNEL_CURL_SECURITY_OPT"].split() + url = "{}/auth/logon/jwt?topology={}".format(self.ascd_endpoint, env["KERNEL_TOPOLOGY"]) + cmd = ["curl", "-v", "-b", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf output, stderr = self._performRestCall(cmd, url, HA_LIST) - if 'Error' in output: - reasonErr = 'Failed to perform JWT Auth Logon. ' + output.splitlines()[0] + if "Error" in output: + reasonErr = "Failed to perform JWT Auth Logon. " + output.splitlines()[0] self.log.warning(cmd) self.log_and_raise(http_status_code=500, reason=reasonErr) self.rest_credential = url_unescape(output)[1:-1] # Assemble EGO Token Logon REST call - authorization = 'Authorization: PlatformToken token=' + output.strip('"') - url = '%s/auth/logon' % self.ascd_endpoint - cmd = ['curl', '-v', '-c', cookie_jar, '-X', 'GET', '-H', header, '-H', authorization, url] + authorization = "Authorization: PlatformToken token=" + output.strip('"') + url = "%s/auth/logon" % self.ascd_endpoint + cmd = ["curl", "-v", "-c", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf output, stderr = self._performRestCall(cmd, url, HA_LIST) - if 'Error' in output: - reasonErr = 'Failed to perform EGO Auth Logon. ' + output.splitlines()[0] + if "Error" in output: + reasonErr = "Failed to perform EGO Auth Logon. " + output.splitlines()[0] self.log.warning(cmd) self.log_and_raise(http_status_code=500, reason=reasonErr) # Get the Python path to use to make sure the right conda environment is used - url = '%s/anaconda/instances/%s' % (self.ascd_endpoint, env['KERNEL_ANACONDA_INST_UUID']) - cmd = ['curl', '-v', '-b', cookie_jar, '-X', 'GET', '-H', header, '-H', authorization, url] + url = "{}/anaconda/instances/{}".format( + self.ascd_endpoint, env["KERNEL_ANACONDA_INST_UUID"] + ) + cmd = ["curl", "-v", "-b", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf output, stderr = self._performRestCall(cmd, url, HA_LIST) response = json.loads(output) if output else None - if response is None or not response['parameters']['deploy_home']['value']: - reasonErr = 'Could not retrieve anaconda instance. Verify anaconda instance with id ' - reasonErr = reasonErr + env['KERNEL_ANACONDA_INST_UUID'] + ' exists' + if response is None or not response["parameters"]["deploy_home"]["value"]: + reasonErr = "Could not retrieve anaconda instance. Verify anaconda instance with id " + reasonErr = reasonErr + env["KERNEL_ANACONDA_INST_UUID"] + " exists" self.log.warning(cmd) self.log_and_raise(http_status_code=500, reason=reasonErr) else: - env_dict['KERNEL_PYSPARK_PYTHON'] = response['parameters']['deploy_home']['value'] \ - + '/anaconda/envs/' + env['KERNEL_ANACONDA_ENV'] + '/bin/python' + env_dict["KERNEL_PYSPARK_PYTHON"] = ( + response["parameters"]["deploy_home"]["value"] + + "/anaconda/envs/" + + env["KERNEL_ANACONDA_ENV"] + + "/bin/python" + ) # Get instance group information we need - url = '%s/instances?id=%s&fields=sparkinstancegroup,outputs' % (self.ascd_endpoint, env['KERNEL_IG_UUID']) - cmd = ['curl', '-v', '-b', cookie_jar, '-X', 'GET', '-H', header, '-H', authorization, url] + url = "{}/instances?id={}&fields=sparkinstancegroup,outputs".format( + self.ascd_endpoint, + env["KERNEL_IG_UUID"], + ) + cmd = ["curl", "-v", "-b", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf output, stderr = self._performRestCall(cmd, url, HA_LIST) response = json.loads(output) if output else None if response is None or len(response) == 0 or response[0] is None: - reasonErr = 'Could not retrieve instance group. Verify instance group with id ' \ - + env['KERNEL_IG_UUID'] + ' exists.' + reasonErr = ( + "Could not retrieve instance group. Verify instance group with id " + + env["KERNEL_IG_UUID"] + + " exists." + ) self.log.warning(cmd) self.log_and_raise(http_status_code=500, reason=reasonErr) - elif response is None or response[0] is None or 'value' not in response[0]['outputs']['batch_master_rest_urls']: - reasonErr = 'Could not retrieve outputs for instance group. Verify instance group with id ' \ - + env['KERNEL_IG_UUID'] + ' is started' + elif ( + response is None + or response[0] is None + or "value" not in response[0]["outputs"]["batch_master_rest_urls"] + ): + reasonErr = ( + "Could not retrieve outputs for instance group. Verify instance group with id " + + env["KERNEL_IG_UUID"] + + " is started" + ) self.log.warning(cmd) self.log_and_raise(http_status_code=500, reason=reasonErr) else: - env_dict['KERNEL_SPARK_HOME'] = response[0]['sparkinstancegroup']['sparkhomedir'] - env_dict['KERNEL_NOTEBOOK_MASTER_REST'] = response[0]['outputs']['batch_master_rest_urls']['value'] - self.conductor_endpoint = response[0]['outputs']['one_batch_master_web_submission_url']['value'] + env_dict["KERNEL_SPARK_HOME"] = response[0]["sparkinstancegroup"]["sparkhomedir"] + env_dict["KERNEL_NOTEBOOK_MASTER_REST"] = response[0]["outputs"][ + "batch_master_rest_urls" + ]["value"] + self.conductor_endpoint = response[0]["outputs"]["one_batch_master_web_submission_url"][ + "value" + ] return response diff --git a/enterprise_gateway/services/processproxies/container.py b/enterprise_gateway/services/processproxies/container.py index 96441a12d..5ffa4c9b5 100644 --- a/enterprise_gateway/services/processproxies/container.py +++ b/enterprise_gateway/services/processproxies/container.py @@ -5,8 +5,8 @@ import abc import os import signal -import urllib3 # docker ends up using this and it causes lots of noise, so turn off warnings +import urllib3 # docker ends up using this and it causes lots of noise, so turn off warnings from jupyter_client import localinterfaces from .processproxy import RemoteProcessProxy @@ -15,29 +15,30 @@ local_ip = localinterfaces.public_ips()[0] -default_kernel_uid = '1000' # jovyan user is the default -default_kernel_gid = '100' # users group is the default +default_kernel_uid = "1000" # jovyan user is the default +default_kernel_gid = "100" # users group is the default # These could be enforced via a PodSecurityPolicy, but those affect # all pods so the cluster admin would need to configure those for # all applications. -prohibited_uids = os.getenv("EG_PROHIBITED_UIDS", "0").split(',') -prohibited_gids = os.getenv("EG_PROHIBITED_GIDS", "0").split(',') +prohibited_uids = os.getenv("EG_PROHIBITED_UIDS", "0").split(",") +prohibited_gids = os.getenv("EG_PROHIBITED_GIDS", "0").split(",") -mirror_working_dirs = bool((os.getenv('EG_MIRROR_WORKING_DIRS', 'false').lower() == 'true')) +mirror_working_dirs = bool(os.getenv("EG_MIRROR_WORKING_DIRS", "false").lower() == "true") # Get the globally-configured default images. Defaulting to None if not set. -default_kernel_image = os.getenv('EG_KERNEL_IMAGE') -default_kernel_executor_image = os.getenv('EG_KERNEL_EXECUTOR_IMAGE') +default_kernel_image = os.getenv("EG_KERNEL_IMAGE") +default_kernel_executor_image = os.getenv("EG_KERNEL_EXECUTOR_IMAGE") class ContainerProcessProxy(RemoteProcessProxy): """ Kernel lifecycle management for container-based kernels. """ + def __init__(self, kernel_manager, proxy_config): - super(ContainerProcessProxy, self).__init__(kernel_manager, proxy_config) - self.container_name = '' + super().__init__(kernel_manager, proxy_config) + self.container_name = "" self.assigned_node_ip = None def _determine_kernel_images(self, **kwargs): @@ -47,18 +48,23 @@ def _determine_kernel_images(self, **kwargs): Initialize to any defined in the process proxy override that then let those provided by client via env override. """ - kernel_image = self.proxy_config.get('image_name', default_kernel_image) - self.kernel_image = kwargs['env'].get('KERNEL_IMAGE', kernel_image) + kernel_image = self.proxy_config.get("image_name", default_kernel_image) + self.kernel_image = kwargs["env"].get("KERNEL_IMAGE", kernel_image) if self.kernel_image is None: - self.log_and_raise(http_status_code=500, - reason="No kernel image could be determined! Set the `image_name` in the " - "process_proxy.config stanza of the corresponding kernel.json file.") + self.log_and_raise( + http_status_code=500, + reason="No kernel image could be determined! Set the `image_name` in the " + "process_proxy.config stanza of the corresponding kernel.json file.", + ) # If no default executor image is configured, default it to current image - kernel_executor_image = self.proxy_config.get('executor_image_name', - default_kernel_executor_image or self.kernel_image) - self.kernel_executor_image = kwargs['env'].get('KERNEL_EXECUTOR_IMAGE', kernel_executor_image) + kernel_executor_image = self.proxy_config.get( + "executor_image_name", default_kernel_executor_image or self.kernel_image + ) + self.kernel_executor_image = kwargs["env"].get( + "KERNEL_EXECUTOR_IMAGE", kernel_executor_image + ) async def launch_process(self, kernel_cmd, **kwargs): """ @@ -67,44 +73,57 @@ async def launch_process(self, kernel_cmd, **kwargs): # Set env before superclass call so we see these in the debug output self._determine_kernel_images(**kwargs) - kwargs['env']['KERNEL_IMAGE'] = self.kernel_image - kwargs['env']['KERNEL_EXECUTOR_IMAGE'] = self.kernel_executor_image + kwargs["env"]["KERNEL_IMAGE"] = self.kernel_image + kwargs["env"]["KERNEL_EXECUTOR_IMAGE"] = self.kernel_executor_image - if not mirror_working_dirs: # If mirroring is not enabled, remove working directory if present - if 'KERNEL_WORKING_DIR' in kwargs['env']: - del kwargs['env']['KERNEL_WORKING_DIR'] + if ( + not mirror_working_dirs + ): # If mirroring is not enabled, remove working directory if present + if "KERNEL_WORKING_DIR" in kwargs["env"]: + del kwargs["env"]["KERNEL_WORKING_DIR"] self._enforce_prohibited_ids(**kwargs) - await super(ContainerProcessProxy, self).launch_process(kernel_cmd, **kwargs) + await super().launch_process(kernel_cmd, **kwargs) self.local_proc = self.launch_kernel(kernel_cmd, **kwargs) self.pid = self.local_proc.pid self.ip = local_ip - self.log.info("{}: kernel launched. Kernel image: {}, KernelID: {}, cmd: '{}'" - .format(self.__class__.__name__, self.kernel_image, self.kernel_id, kernel_cmd)) + self.log.info( + "{}: kernel launched. Kernel image: {}, KernelID: {}, cmd: '{}'".format( + self.__class__.__name__, self.kernel_image, self.kernel_id, kernel_cmd + ) + ) await self.confirm_remote_startup() return self def _enforce_prohibited_ids(self, **kwargs): """Determine UID and GID with which to launch container and ensure they are not prohibited.""" - kernel_uid = kwargs['env'].get('KERNEL_UID', default_kernel_uid) - kernel_gid = kwargs['env'].get('KERNEL_GID', default_kernel_gid) + kernel_uid = kwargs["env"].get("KERNEL_UID", default_kernel_uid) + kernel_gid = kwargs["env"].get("KERNEL_GID", default_kernel_gid) if kernel_uid in prohibited_uids: http_status_code = 403 - error_message = "Kernel's UID value of '{}' has been denied via EG_PROHIBITED_UIDS!".format(kernel_uid) + error_message = ( + "Kernel's UID value of '{}' has been denied via EG_PROHIBITED_UIDS!".format( + kernel_uid + ) + ) self.log_and_raise(http_status_code=http_status_code, reason=error_message) elif kernel_gid in prohibited_gids: http_status_code = 403 - error_message = "Kernel's GID value of '{}' has been denied via EG_PROHIBITED_GIDS!".format(kernel_gid) + error_message = ( + "Kernel's GID value of '{}' has been denied via EG_PROHIBITED_GIDS!".format( + kernel_gid + ) + ) self.log_and_raise(http_status_code=http_status_code, reason=error_message) # Ensure the kernel's env has what it needs in case they came from defaults - kwargs['env']['KERNEL_UID'] = kernel_uid - kwargs['env']['KERNEL_GID'] = kernel_gid + kwargs["env"]["KERNEL_UID"] = kernel_uid + kwargs["env"]["KERNEL_GID"] = kernel_gid def poll(self): """Determines if container is still active. @@ -144,7 +163,7 @@ def send_signal(self, signum): else: # This is very likely an interrupt signal, so defer to the super class # which should use the communication port. - return super(ContainerProcessProxy, self).send_signal(signum) + return super().send_signal(signum) def kill(self): """Kills a containerized kernel. @@ -165,7 +184,7 @@ def cleanup(self): # cleanup we'd normally perform on forced kill situations. self.kill() - super(ContainerProcessProxy, self).cleanup() + super().cleanup() async def confirm_remote_startup(self): """Confirms the container has started and returned necessary connection information.""" @@ -178,23 +197,29 @@ async def confirm_remote_startup(self): container_status = self.get_container_status(str(i)) if container_status: - if self.assigned_host != '': + if self.assigned_host != "": ready_to_connect = await self.receive_connection_info() - self.pid = 0 # We won't send process signals for kubernetes lifecycle management + self.pid = ( + 0 # We won't send process signals for kubernetes lifecycle management + ) self.pgid = 0 else: self.detect_launch_failure() def get_process_info(self): """Captures the base information necessary for kernel persistence relative to containers.""" - process_info = super(ContainerProcessProxy, self).get_process_info() - process_info.update({'assigned_node_ip': self.assigned_node_ip, }) + process_info = super().get_process_info() + process_info.update( + { + "assigned_node_ip": self.assigned_node_ip, + } + ) return process_info def load_process_info(self, process_info): """Loads the base information necessary for kernel persistence relative to containers.""" - super(ContainerProcessProxy, self).load_process_info(process_info) - self.assigned_node_ip = process_info['assigned_node_ip'] + super().load_process_info(process_info) + self.assigned_node_ip = process_info["assigned_node_ip"] @abc.abstractmethod def get_initial_states(self): diff --git a/enterprise_gateway/services/processproxies/crd.py b/enterprise_gateway/services/processproxies/crd.py index fc375fb1d..986093c83 100644 --- a/enterprise_gateway/services/processproxies/crd.py +++ b/enterprise_gateway/services/processproxies/crd.py @@ -2,9 +2,10 @@ # Distributed under the terms of the Modified BSD License. """Code related to managing kernels running based on k8s custom resource.""" -from .k8s import KubernetesProcessProxy from kubernetes import client +from .k8s import KubernetesProcessProxy + class CustomResourceProcessProxy(KubernetesProcessProxy): group = version = plural = None @@ -12,15 +13,17 @@ class CustomResourceProcessProxy(KubernetesProcessProxy): kernel_resource_name = None def __init__(self, kernel_manager, proxy_config): - super(CustomResourceProcessProxy, self).__init__(kernel_manager, proxy_config) + super().__init__(kernel_manager, proxy_config) async def launch_process(self, kernel_cmd, **kwargs): - kwargs['env']['KERNEL_RESOURCE_NAME'] = self.kernel_resource_name = self._determine_kernel_pod_name(**kwargs) - kwargs['env']['KERNEL_CRD_GROUP'] = self.group - kwargs['env']['KERNEL_CRD_VERSION'] = self.version - kwargs['env']['KERNEL_CRD_PLURAL'] = self.plural + kwargs["env"][ + "KERNEL_RESOURCE_NAME" + ] = self.kernel_resource_name = self._determine_kernel_pod_name(**kwargs) + kwargs["env"]["KERNEL_CRD_GROUP"] = self.group + kwargs["env"]["KERNEL_CRD_VERSION"] = self.version + kwargs["env"]["KERNEL_CRD_PLURAL"] = self.plural - await super(CustomResourceProcessProxy, self).launch_process(kernel_cmd, **kwargs) + await super().launch_process(kernel_cmd, **kwargs) return self def kill(self): @@ -28,11 +31,15 @@ def kill(self): if self.kernel_resource_name: if self.delete_kernel_namespace and not self.kernel_manager.restarting: - body = client.V1DeleteOptions(grace_period_seconds=0, propagation_policy='Background') - v1_status = client.CoreV1Api().delete_namespace(name=self.kernel_namespace, body=body) + body = client.V1DeleteOptions( + grace_period_seconds=0, propagation_policy="Background" + ) + v1_status = client.CoreV1Api().delete_namespace( + name=self.kernel_namespace, body=body + ) if v1_status and v1_status.status: - termination_status = ['Succeeded', 'Failed', 'Terminating'] + termination_status = ["Succeeded", "Failed", "Terminating"] if any(status in v1_status.status for status in termination_status): result = True else: @@ -45,11 +52,11 @@ def kill(self): def terminate_custom_resource(self): try: - delete_status = client.CustomObjectsApi().delete_cluster_custom_object(self.group, self.version, - self.plurals, - self.kernel_resource_name) + delete_status = client.CustomObjectsApi().delete_cluster_custom_object( + self.group, self.version, self.plurals, self.kernel_resource_name + ) - result = delete_status and delete_status.get('status', None) == 'Success' + result = delete_status and delete_status.get("status", None) == "Success" except Exception as err: result = isinstance(err, client.rest.ApiException) and err.status == 404 diff --git a/enterprise_gateway/services/processproxies/distributed.py b/enterprise_gateway/services/processproxies/distributed.py index fb55b6a7b..6cfedcdfa 100644 --- a/enterprise_gateway/services/processproxies/distributed.py +++ b/enterprise_gateway/services/processproxies/distributed.py @@ -6,27 +6,29 @@ import json import os import signal - -from subprocess import STDOUT from socket import gethostbyname +from subprocess import STDOUT -from .processproxy import RemoteProcessProxy, BaseProcessProxyABC +from .processproxy import BaseProcessProxyABC, RemoteProcessProxy -poll_interval = float(os.getenv('EG_POLL_INTERVAL', '0.5')) -kernel_log_dir = os.getenv("EG_KERNEL_LOG_DIR", '/tmp') # would prefer /var/log, but its only writable by root +poll_interval = float(os.getenv("EG_POLL_INTERVAL", "0.5")) +kernel_log_dir = os.getenv( + "EG_KERNEL_LOG_DIR", "/tmp" +) # would prefer /var/log, but its only writable by root class DistributedProcessProxy(RemoteProcessProxy): """ Manages the lifecycle of kernels distributed across a set of hosts. """ + host_index = 0 def __init__(self, kernel_manager, proxy_config): - super(DistributedProcessProxy, self).__init__(kernel_manager, proxy_config) + super().__init__(kernel_manager, proxy_config) self.kernel_log = None - if proxy_config.get('remote_hosts'): - self.hosts = proxy_config.get('remote_hosts').split(',') + if proxy_config.get("remote_hosts"): + self.hosts = proxy_config.get("remote_hosts").split(",") else: self.hosts = kernel_manager.remote_hosts # from command line or env @@ -34,7 +36,7 @@ async def launch_process(self, kernel_cmd, **kwargs): """ Launches a kernel process on a selected host. """ - await super(DistributedProcessProxy, self).launch_process(kernel_cmd, **kwargs) + await super().launch_process(kernel_cmd, **kwargs) self.assigned_host = self._determine_next_host() self.ip = gethostbyname(self.assigned_host) # convert to ip if host is provided @@ -44,29 +46,39 @@ async def launch_process(self, kernel_cmd, **kwargs): result_pid = self._launch_remote_process(kernel_cmd, **kwargs) self.pid = int(result_pid) except Exception as e: - error_message = "Failure occurred starting kernel on '{}'. Returned result: {}".\ - format(self.ip, e) + error_message = "Failure occurred starting kernel on '{}'. Returned result: {}".format( + self.ip, e + ) self.log_and_raise(http_status_code=500, reason=error_message) - self.log.info("Kernel launched on '{}', pid: {}, ID: {}, Log file: {}:{}, Command: '{}'. ". - format(self.assigned_host, self.pid, self.kernel_id, self.assigned_host, - self.kernel_log, kernel_cmd)) + self.log.info( + "Kernel launched on '{}', pid: {}, ID: {}, Log file: {}:{}, Command: '{}'. ".format( + self.assigned_host, + self.pid, + self.kernel_id, + self.assigned_host, + self.kernel_log, + kernel_cmd, + ) + ) await self.confirm_remote_startup() return self def _launch_remote_process(self, kernel_cmd, **kwargs): """ - Launch the kernel as indicated by the argv stanza in the kernelspec. Note that this method - will bypass use of ssh if the remote host is also the local machine. + Launch the kernel as indicated by the argv stanza in the kernelspec. Note that this method + will bypass use of ssh if the remote host is also the local machine. """ cmd = self._build_startup_command(kernel_cmd, **kwargs) - self.log.debug("Invoking cmd: '{}' on host: {}".format(cmd, self.assigned_host)) - result_pid = 'bad_pid' # purposely initialize to bad int value + self.log.debug(f"Invoking cmd: '{cmd}' on host: {self.assigned_host}") + result_pid = "bad_pid" # purposely initialize to bad int value if BaseProcessProxyABC.ip_is_local(self.ip): # launch the local command with redirection in place - self.local_proc = self.launch_kernel(cmd, stdout=open(self.kernel_log, mode='a'), stderr=STDOUT, **kwargs) + self.local_proc = self.launch_kernel( + cmd, stdout=open(self.kernel_log, mode="a"), stderr=STDOUT, **kwargs + ) result_pid = str(self.local_proc.pid) else: # launch remote command via ssh @@ -87,14 +99,14 @@ def _build_startup_command(self, argv_cmd, **kwargs): """ # Optimized case needs to also redirect the kernel output, so unconditionally compose kernel_log - env_dict = kwargs['env'] - kid = env_dict.get('KERNEL_ID') - self.kernel_log = os.path.join(kernel_log_dir, "kernel-{}.log".format(kid)) + env_dict = kwargs["env"] + kid = env_dict.get("KERNEL_ID") + self.kernel_log = os.path.join(kernel_log_dir, f"kernel-{kid}.log") if BaseProcessProxyABC.ip_is_local(self.ip): # We're local so just use what we're given cmd = argv_cmd else: # Add additional envs, including those in kernelspec - cmd = '' + cmd = "" for key, value in env_dict.items(): cmd += "export {}={};".format(key, json.dumps(value).replace("'", "''")) @@ -102,11 +114,11 @@ def _build_startup_command(self, argv_cmd, **kwargs): for key, value in self.kernel_manager.kernel_spec.env.items(): cmd += "export {}={};".format(key, json.dumps(value).replace("'", "''")) - cmd += 'nohup' + cmd += "nohup" for arg in argv_cmd: - cmd += ' {}'.format(arg) + cmd += f" {arg}" - cmd += ' >> {} 2>&1 & echo $!'.format(self.kernel_log) # return the process id + cmd += f" >> {self.kernel_log} 2>&1 & echo $!" # return the process id return cmd @@ -117,7 +129,7 @@ def _determine_next_host(self): return next_host async def confirm_remote_startup(self): - """ Confirms the remote kernel has started by obtaining connection information from the remote host.""" + """Confirms the remote kernel has started by obtaining connection information from the remote host.""" self.start_time = RemoteProcessProxy.get_current_time() i = 0 ready_to_connect = False # we're ready to connect when we have a connection file to use @@ -125,22 +137,32 @@ async def confirm_remote_startup(self): i += 1 await self.handle_timeout() - self.log.debug("{}: Waiting to connect. Host: '{}', KernelID: '{}'". - format(i, self.assigned_host, self.kernel_id)) + self.log.debug( + "{}: Waiting to connect. Host: '{}', KernelID: '{}'".format( + i, self.assigned_host, self.kernel_id + ) + ) - if self.assigned_host != '': + if self.assigned_host != "": ready_to_connect = await self.receive_connection_info() async def handle_timeout(self): """Checks to see if the kernel launch timeout has been exceeded while awaiting connection info.""" await asyncio.sleep(poll_interval) - time_interval = RemoteProcessProxy.get_time_diff(self.start_time, RemoteProcessProxy.get_current_time()) + time_interval = RemoteProcessProxy.get_time_diff( + self.start_time, RemoteProcessProxy.get_current_time() + ) if time_interval > self.kernel_launch_timeout: - reason = "Waited too long ({}s) to get connection file. Check Enterprise Gateway log and kernel " \ - "log ({}:{}) for more information.".\ - format(self.kernel_launch_timeout, self.assigned_host, self.kernel_log) - timeout_message = "KernelID: '{}' launch timeout due to: {}".format(self.kernel_id, reason) + reason = ( + "Waited too long ({}s) to get connection file. Check Enterprise Gateway log and kernel " + "log ({}:{}) for more information.".format( + self.kernel_launch_timeout, self.assigned_host, self.kernel_log + ) + ) + timeout_message = "KernelID: '{}' launch timeout due to: {}".format( + self.kernel_id, reason + ) await asyncio.get_event_loop().run_in_executor(None, self.kill) self.log_and_raise(http_status_code=500, reason=timeout_message) @@ -153,4 +175,4 @@ def cleanup(self): def shutdown_listener(self): """Ensure that kernel process is terminated.""" self.send_signal(signal.SIGTERM) - super(DistributedProcessProxy, self).shutdown_listener() + super().shutdown_listener() diff --git a/enterprise_gateway/services/processproxies/docker_swarm.py b/enterprise_gateway/services/processproxies/docker_swarm.py index 9f446fcf3..a71b401ce 100644 --- a/enterprise_gateway/services/processproxies/docker_swarm.py +++ b/enterprise_gateway/services/processproxies/docker_swarm.py @@ -2,8 +2,8 @@ # Distributed under the terms of the Modified BSD License. """Code related to managing kernels running in docker-based containers.""" -import os import logging +import os from docker.client import DockerClient from docker.errors import NotFound @@ -11,9 +11,11 @@ from .container import ContainerProcessProxy # Debug logging level of docker produces too much noise - raise to info by default. -logging.getLogger('urllib3.connectionpool').setLevel(os.environ.get('EG_DOCKER_LOG_LEVEL', logging.WARNING)) +logging.getLogger("urllib3.connectionpool").setLevel( + os.environ.get("EG_DOCKER_LOG_LEVEL", logging.WARNING) +) -docker_network = os.environ.get('EG_DOCKER_NETWORK', 'bridge') +docker_network = os.environ.get("EG_DOCKER_NETWORK", "bridge") client = DockerClient.from_env() @@ -22,31 +24,35 @@ class DockerSwarmProcessProxy(ContainerProcessProxy): """ Kernel lifecycle management for kernels in Docker Swarm. """ + def __init__(self, kernel_manager, proxy_config): - super(DockerSwarmProcessProxy, self).__init__(kernel_manager, proxy_config) + super().__init__(kernel_manager, proxy_config) def launch_process(self, kernel_cmd, **kwargs): """ Launches the specified process within a Docker Swarm environment. """ # Convey the network to the docker launch script - kwargs['env']['EG_DOCKER_NETWORK'] = docker_network - kwargs['env']['EG_DOCKER_MODE'] = 'swarm' - return super(DockerSwarmProcessProxy, self).launch_process(kernel_cmd, **kwargs) + kwargs["env"]["EG_DOCKER_NETWORK"] = docker_network + kwargs["env"]["EG_DOCKER_MODE"] = "swarm" + return super().launch_process(kernel_cmd, **kwargs) def get_initial_states(self): """Return list of states indicating container is starting (includes running).""" - return {'preparing', 'starting', 'running'} + return {"preparing", "starting", "running"} def _get_service(self): # Fetches the service object corresponding to the kernel with a matching label. service = None - services = client.services.list(filters={'label': 'kernel_id=' + self.kernel_id}) + services = client.services.list(filters={"label": "kernel_id=" + self.kernel_id}) num_services = len(services) if num_services != 1: if num_services > 1: - raise RuntimeError("{}: Found more than one service ({}) for kernel_id '{}'!". - format(self.__class__.__name__, num_services, self.kernel_id)) + raise RuntimeError( + "{}: Found more than one service ({}) for kernel_id '{}'!".format( + self.__class__.__name__, num_services, self.kernel_id + ) + ) else: service = services[0] self.container_name = service.name @@ -59,12 +65,15 @@ def _get_task(self): task = None service = self._get_service() if service: - tasks = service.tasks(filters={'desired-state': 'running'}) + tasks = service.tasks(filters={"desired-state": "running"}) num_tasks = len(tasks) if num_tasks != 1: if num_tasks > 1: - raise RuntimeError("{}: Found more than one task ({}) for service '{}', kernel_id '{}'!". - format(self.__class__.__name__, num_tasks, service.name, self.kernel_id)) + raise RuntimeError( + "{}: Found more than one task ({}) for service '{}', kernel_id '{}'!".format( + self.__class__.__name__, num_tasks, service.name, self.kernel_id + ) + ) else: task = tasks[0] return task @@ -77,24 +86,33 @@ def get_container_status(self, iteration): task_id = None task = self._get_task() if task: - task_status = task['Status'] - task_id = task['ID'] + task_status = task["Status"] + task_id = task["ID"] if task_status: - task_state = task_status['State'] - if self.assigned_host == '' and task_state == 'running': # in self.get_initial_states(): + task_state = task_status["State"] + if ( + self.assigned_host == "" and task_state == "running" + ): # in self.get_initial_states(): # get the NetworkAttachments and pick out the first of the Network and first - networks_attachments = task['NetworksAttachments'] + networks_attachments = task["NetworksAttachments"] if len(networks_attachments) > 0: - address = networks_attachments[0]['Addresses'][0] - ip = address.split('/')[0] + address = networks_attachments[0]["Addresses"][0] + ip = address.split("/")[0] self.assigned_ip = ip self.assigned_host = self.container_name if iteration: # only log if iteration is not None (otherwise poll() is too noisy) - self.log.debug("{}: Waiting to connect to docker container. " - "Name: '{}', Status: '{}', IPAddress: '{}', KernelID: '{}', TaskID: '{}'". - format(iteration, self.container_name, task_state, - self.assigned_ip, self.kernel_id, task_id)) + self.log.debug( + "{}: Waiting to connect to docker container. " + "Name: '{}', Status: '{}', IPAddress: '{}', KernelID: '{}', TaskID: '{}'".format( + iteration, + self.container_name, + task_state, + self.assigned_ip, + self.kernel_id, + task_id, + ) + ) return task_state def terminate_container_resources(self): @@ -107,51 +125,64 @@ def terminate_container_resources(self): try: service.remove() # Service still exists, attempt removal except Exception as err: - self.log.debug("{} Termination of service: {} raised exception: {}". - format(self.__class__.__name__, service.name, err)) + self.log.debug( + "{} Termination of service: {} raised exception: {}".format( + self.__class__.__name__, service.name, err + ) + ) if isinstance(err, NotFound): pass # okay if its not found else: result = False - self.log.warning("Error occurred removing service: {}".format(err)) + self.log.warning(f"Error occurred removing service: {err}") if result: - self.log.debug("{}.terminate_container_resources, service {}, kernel ID: {} has been terminated.". - format(self.__class__.__name__, self.container_name, self.kernel_id)) + self.log.debug( + "{}.terminate_container_resources, service {}, kernel ID: {} has been terminated.".format( + self.__class__.__name__, self.container_name, self.kernel_id + ) + ) self.container_name = None result = None # maintain jupyter contract else: - self.log.warning("{}.terminate_container_resources, container {}, kernel ID: {} has not been terminated.". - format(self.__class__.__name__, self.container_name, self.kernel_id)) + self.log.warning( + "{}.terminate_container_resources, container {}, kernel ID: {} has not been terminated.".format( + self.__class__.__name__, self.container_name, self.kernel_id + ) + ) return result class DockerProcessProxy(ContainerProcessProxy): """Kernel lifecycle management for Docker kernels (non-Swarm).""" + def __init__(self, kernel_manager, proxy_config): - super(DockerProcessProxy, self).__init__(kernel_manager, proxy_config) + super().__init__(kernel_manager, proxy_config) def launch_process(self, kernel_cmd, **kwargs): """Launches the specified process within a Docker environment.""" # Convey the network to the docker launch script - kwargs['env']['EG_DOCKER_NETWORK'] = docker_network - kwargs['env']['EG_DOCKER_MODE'] = 'docker' - return super(DockerProcessProxy, self).launch_process(kernel_cmd, **kwargs) + kwargs["env"]["EG_DOCKER_NETWORK"] = docker_network + kwargs["env"]["EG_DOCKER_MODE"] = "docker" + return super().launch_process(kernel_cmd, **kwargs) def get_initial_states(self): """Return list of states indicating container is starting (includes running).""" - return {'created', 'running'} + return {"created", "running"} def _get_container(self): # Fetches the container object corresponding the the kernel_id label. # Only used when docker mode == regular (not swarm) container = None - containers = client.containers.list(filters={'label': 'kernel_id=' + self.kernel_id}) + containers = client.containers.list(filters={"label": "kernel_id=" + self.kernel_id}) num_containers = len(containers) if num_containers != 1: if num_containers > 1: - raise RuntimeError("{}: Found more than one container ({}) for kernel_id '{}'!". - format(self.__class__.__name__, num_containers, self.kernel_id)) + raise RuntimeError( + "{}: Found more than one container ({}) for kernel_id '{}'!".format( + self.__class__.__name__, num_containers, self.kernel_id + ) + ) else: container = containers[0] return container @@ -167,26 +198,38 @@ def get_container_status(self, iteration): self.container_name = container.name if container.status: container_status = container.status - if container_status == 'running' and self.assigned_host == '': + if container_status == "running" and self.assigned_host == "": # Container is running, capture IP # we'll use this as a fallback in case we don't find our network - self.assigned_ip = container.attrs.get('NetworkSettings').get('IPAddress') - networks = container.attrs.get('NetworkSettings').get('Networks') + self.assigned_ip = container.attrs.get("NetworkSettings").get("IPAddress") + networks = container.attrs.get("NetworkSettings").get("Networks") if len(networks) > 0: - self.assigned_ip = networks.get(docker_network).get('IPAddress') - self.log.debug("Using assigned_ip {} from docker network '{}'.". - format(self.assigned_ip, docker_network)) + self.assigned_ip = networks.get(docker_network).get("IPAddress") + self.log.debug( + "Using assigned_ip {} from docker network '{}'.".format( + self.assigned_ip, docker_network + ) + ) else: - self.log.warning("Docker network '{}' could not be located in container attributes - " - "using assigned_ip '{}'.".format(docker_network, self.assigned_ip)) + self.log.warning( + "Docker network '{}' could not be located in container attributes - " + "using assigned_ip '{}'.".format(docker_network, self.assigned_ip) + ) self.assigned_host = self.container_name if iteration: # only log if iteration is not None (otherwise poll() is too noisy) - self.log.debug("{}: Waiting to connect to docker container. " - "Name: '{}', Status: '{}', IPAddress: '{}', KernelID: '{}'". - format(iteration, self.container_name, container_status, self.assigned_ip, self.kernel_id)) + self.log.debug( + "{}: Waiting to connect to docker container. " + "Name: '{}', Status: '{}', IPAddress: '{}', KernelID: '{}'".format( + iteration, + self.container_name, + container_status, + self.assigned_ip, + self.kernel_id, + ) + ) return container_status @@ -200,20 +243,29 @@ def terminate_container_resources(self): try: container.remove(force=True) # Container still exists, attempt forced removal except Exception as err: - self.log.debug("Container termination for container: {} raised exception: {}". - format(container.name, err)) + self.log.debug( + "Container termination for container: {} raised exception: {}".format( + container.name, err + ) + ) if isinstance(err, NotFound): pass # okay if its not found else: result = False - self.log.warning("Error occurred removing container: {}".format(err)) + self.log.warning(f"Error occurred removing container: {err}") if result: - self.log.debug("{}.terminate_container_resources, container {}, kernel ID: {} has been terminated.". - format(self.__class__.__name__, self.container_name, self.kernel_id)) + self.log.debug( + "{}.terminate_container_resources, container {}, kernel ID: {} has been terminated.".format( + self.__class__.__name__, self.container_name, self.kernel_id + ) + ) self.container_name = None result = None # maintain jupyter contract else: - self.log.warning("{}.terminate_container_resources, container {}, kernel ID: {} has not been terminated.". - format(self.__class__.__name__, self.container_name, self.kernel_id)) + self.log.warning( + "{}.terminate_container_resources, container {}, kernel ID: {} has not been terminated.".format( + self.__class__.__name__, self.container_name, self.kernel_id + ) + ) return result diff --git a/enterprise_gateway/services/processproxies/k8s.py b/enterprise_gateway/services/processproxies/k8s.py index 535321cb3..cb7fe669b 100644 --- a/enterprise_gateway/services/processproxies/k8s.py +++ b/enterprise_gateway/services/processproxies/k8s.py @@ -5,22 +5,24 @@ import logging import os import re -import urllib3 +import urllib3 from kubernetes import client, config -from .container import ContainerProcessProxy from ..sessions.kernelsessionmanager import KernelSessionManager +from .container import ContainerProcessProxy urllib3.disable_warnings() # Default logging level of kubernetes produces too much noise - raise to warning only. -logging.getLogger('kubernetes').setLevel(os.environ.get('EG_KUBERNETES_LOG_LEVEL', logging.WARNING)) +logging.getLogger("kubernetes").setLevel(os.environ.get("EG_KUBERNETES_LOG_LEVEL", logging.WARNING)) -enterprise_gateway_namespace = os.environ.get('EG_NAMESPACE', 'default') -default_kernel_service_account_name = os.environ.get('EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME', 'default') -kernel_cluster_role = os.environ.get('EG_KERNEL_CLUSTER_ROLE', 'cluster-admin') -share_gateway_namespace = bool(os.environ.get('EG_SHARED_NAMESPACE', 'False').lower() == 'true') +enterprise_gateway_namespace = os.environ.get("EG_NAMESPACE", "default") +default_kernel_service_account_name = os.environ.get( + "EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME", "default" +) +kernel_cluster_role = os.environ.get("EG_KERNEL_CLUSTER_ROLE", "cluster-admin") +share_gateway_namespace = bool(os.environ.get("EG_SHARED_NAMESPACE", "False").lower() == "true") config.load_incluster_config() @@ -29,8 +31,9 @@ class KubernetesProcessProxy(ContainerProcessProxy): """ Kernel lifecycle management for Kubernetes kernels. """ + def __init__(self, kernel_manager, proxy_config): - super(KubernetesProcessProxy, self).__init__(kernel_manager, proxy_config) + super().__init__(kernel_manager, proxy_config) self.kernel_pod_name = None self.kernel_namespace = None @@ -42,16 +45,20 @@ async def launch_process(self, kernel_cmd, **kwargs): # Kubernetes relies on many internal env variables. Since EG is running in a k8s pod, we will # transfer its env to each launched kernel. - kwargs['env'] = dict(os.environ, **kwargs['env']) # FIXME: Should probably use process-whitelist in JKG #280 + kwargs["env"] = dict( + os.environ, **kwargs["env"] + ) # FIXME: Should probably use process-whitelist in JKG #280 self.kernel_pod_name = self._determine_kernel_pod_name(**kwargs) - self.kernel_namespace = self._determine_kernel_namespace(**kwargs) # will create namespace if not provided + self.kernel_namespace = self._determine_kernel_namespace( + **kwargs + ) # will create namespace if not provided - await super(KubernetesProcessProxy, self).launch_process(kernel_cmd, **kwargs) + await super().launch_process(kernel_cmd, **kwargs) return self def get_initial_states(self): """Return list of states indicating container is starting (includes running).""" - return {'Pending', 'Running'} + return {"Pending", "Running"} def get_container_status(self, iteration): """Return current container state.""" @@ -59,24 +66,32 @@ def get_container_status(self, iteration): # is used for the assigned_ip. pod_status = None kernel_label_selector = "kernel_id=" + self.kernel_id + ",component=kernel" - ret = client.CoreV1Api().list_namespaced_pod(namespace=self.kernel_namespace, - label_selector=kernel_label_selector) + ret = client.CoreV1Api().list_namespaced_pod( + namespace=self.kernel_namespace, label_selector=kernel_label_selector + ) if ret and ret.items: pod_info = ret.items[0] self.container_name = pod_info.metadata.name if pod_info.status: pod_status = pod_info.status.phase - if pod_status == 'Running' and self.assigned_host == '': + if pod_status == "Running" and self.assigned_host == "": # Pod is running, capture IP self.assigned_ip = pod_info.status.pod_ip self.assigned_host = self.container_name self.assigned_node_ip = pod_info.status.host_ip if iteration: # only log if iteration is not None (otherwise poll() is too noisy) - self.log.debug("{}: Waiting to connect to k8s pod in namespace '{}'. " - "Name: '{}', Status: '{}', Pod IP: '{}', KernelID: '{}'". - format(iteration, self.kernel_namespace, self.container_name, pod_status, - self.assigned_ip, self.kernel_id)) + self.log.debug( + "{}: Waiting to connect to k8s pod in namespace '{}'. " + "Name: '{}', Status: '{}', Pod IP: '{}', KernelID: '{}'".format( + iteration, + self.kernel_namespace, + self.container_name, + pod_status, + self.assigned_ip, + self.kernel_id, + ) + ) return pod_status @@ -87,12 +102,12 @@ def terminate_container_resources(self): # the process of restarting the kernel, then that's our target, else just delete the pod. result = False - body = client.V1DeleteOptions(grace_period_seconds=0, propagation_policy='Background') + body = client.V1DeleteOptions(grace_period_seconds=0, propagation_policy="Background") if self.delete_kernel_namespace and not self.kernel_manager.restarting: - object_name = 'namespace' + object_name = "namespace" else: - object_name = 'pod' + object_name = "pod" # Delete the namespace or pod... try: @@ -102,46 +117,57 @@ def terminate_container_resources(self): # indicate the phase value. if self.delete_kernel_namespace and not self.kernel_manager.restarting: - v1_status = client.CoreV1Api().delete_namespace(name=self.kernel_namespace, body=body) + v1_status = client.CoreV1Api().delete_namespace( + name=self.kernel_namespace, body=body + ) else: - v1_status = client.CoreV1Api().delete_namespaced_pod(namespace=self.kernel_namespace, - body=body, name=self.container_name) + v1_status = client.CoreV1Api().delete_namespaced_pod( + namespace=self.kernel_namespace, body=body, name=self.container_name + ) if v1_status and v1_status.status: - termination_stati = ['Succeeded', 'Failed', 'Terminating'] + termination_stati = ["Succeeded", "Failed", "Terminating"] if any(status in v1_status.status for status in termination_stati): result = True if not result: - self.log.warning("Unable to delete {}: {}".format(object_name, v1_status)) + self.log.warning(f"Unable to delete {object_name}: {v1_status}") except Exception as err: if isinstance(err, client.rest.ApiException) and err.status == 404: result = True # okay if its not found else: - self.log.warning("Error occurred deleting {}: {}".format(object_name, err)) + self.log.warning(f"Error occurred deleting {object_name}: {err}") if result: - self.log.debug("KubernetesProcessProxy.terminate_container_resources, pod: {}.{}, kernel ID: {} has " - "been terminated.".format(self.kernel_namespace, self.container_name, self.kernel_id)) + self.log.debug( + "KubernetesProcessProxy.terminate_container_resources, pod: {}.{}, kernel ID: {} has " + "been terminated.".format( + self.kernel_namespace, self.container_name, self.kernel_id + ) + ) self.container_name = None result = None # maintain jupyter contract else: - self.log.warning("KubernetesProcessProxy.terminate_container_resources, pod: {}.{}, kernel ID: {} has " - "not been terminated.".format(self.kernel_namespace, self.container_name, self.kernel_id)) + self.log.warning( + "KubernetesProcessProxy.terminate_container_resources, pod: {}.{}, kernel ID: {} has " + "not been terminated.".format( + self.kernel_namespace, self.container_name, self.kernel_id + ) + ) return result def _determine_kernel_pod_name(self, **kwargs): - pod_name = kwargs['env'].get('KERNEL_POD_NAME') + pod_name = kwargs["env"].get("KERNEL_POD_NAME") if pod_name is None: - pod_name = KernelSessionManager.get_kernel_username(**kwargs) + '-' + self.kernel_id + pod_name = KernelSessionManager.get_kernel_username(**kwargs) + "-" + self.kernel_id # Rewrite pod_name to be compatible with DNS name convention # And put back into env since kernel needs this - pod_name = re.sub('[^0-9a-z]+', '-', pod_name.lower()) - while pod_name.startswith('-'): + pod_name = re.sub("[^0-9a-z]+", "-", pod_name.lower()) + while pod_name.startswith("-"): pod_name = pod_name[1:] - while pod_name.endswith('-'): + while pod_name.endswith("-"): pod_name = pod_name[:-1] - kwargs['env']['KERNEL_POD_NAME'] = pod_name + kwargs["env"]["KERNEL_POD_NAME"] = pod_name return pod_name @@ -149,22 +175,27 @@ def _determine_kernel_namespace(self, **kwargs): # Since we need the service account name regardless of whether we're creating the namespace or not, # get it now. - service_account_name = KubernetesProcessProxy._determine_kernel_service_account_name(**kwargs) + service_account_name = KubernetesProcessProxy._determine_kernel_service_account_name( + **kwargs + ) # If KERNEL_NAMESPACE was provided, then we assume it already exists. If not provided, then we'll # create the namespace and record that we'll want to delete it as well. - namespace = kwargs['env'].get('KERNEL_NAMESPACE') + namespace = kwargs["env"].get("KERNEL_NAMESPACE") if namespace is None: # check if share gateway namespace is configured... if share_gateway_namespace: # if so, set to EG namespace namespace = enterprise_gateway_namespace - self.log.warning("Shared namespace has been configured. All kernels will reside in EG namespace: {}". - format(namespace)) + self.log.warning( + "Shared namespace has been configured. All kernels will reside in EG namespace: {}".format( + namespace + ) + ) else: namespace = self._create_kernel_namespace(service_account_name) - kwargs['env']['KERNEL_NAMESPACE'] = namespace # record in env since kernel needs this + kwargs["env"]["KERNEL_NAMESPACE"] = namespace # record in env since kernel needs this else: - self.log.info("KERNEL_NAMESPACE provided by client: {}".format(namespace)) + self.log.info(f"KERNEL_NAMESPACE provided by client: {namespace}") return namespace @@ -172,8 +203,10 @@ def _determine_kernel_namespace(self, **kwargs): def _determine_kernel_service_account_name(**kwargs): # Check if an account name was provided. If not, set to the default name (which can be set # from the EG env as well). Finally, ensure the env value is set. - service_account_name = kwargs['env'].get('KERNEL_SERVICE_ACCOUNT_NAME', default_kernel_service_account_name) - kwargs['env']['KERNEL_SERVICE_ACCOUNT_NAME'] = service_account_name + service_account_name = kwargs["env"].get( + "KERNEL_SERVICE_ACCOUNT_NAME", default_kernel_service_account_name + ) + kwargs["env"]["KERNEL_SERVICE_ACCOUNT_NAME"] = service_account_name return service_account_name def _create_kernel_namespace(self, service_account_name): @@ -186,7 +219,7 @@ def _create_kernel_namespace(self, service_account_name): namespace = self.kernel_pod_name # create the namespace ... - labels = {'app': 'enterprise-gateway', 'component': 'kernel', 'kernel_id': self.kernel_id} + labels = {"app": "enterprise-gateway", "component": "kernel", "kernel_id": self.kernel_id} namespace_metadata = client.V1ObjectMeta(name=namespace, labels=labels) body = client.V1Namespace(metadata=namespace_metadata) @@ -194,25 +227,35 @@ def _create_kernel_namespace(self, service_account_name): try: client.CoreV1Api().create_namespace(body=body) self.delete_kernel_namespace = True - self.log.info("Created kernel namespace: {}".format(namespace)) + self.log.info(f"Created kernel namespace: {namespace}") # Now create a RoleBinding for this namespace for the default ServiceAccount. We'll reference # the ClusterRole, but that will only be applied for this namespace. This prevents the need for # creating a role each time. self._create_role_binding(namespace, service_account_name) except Exception as err: - if isinstance(err, client.rest.ApiException) and err.status == 409 and self.kernel_manager.restarting: - self.delete_kernel_namespace = True # okay if ns already exists and restarting, still mark for delete - self.log.info("Re-using kernel namespace: {}".format(namespace)) + if ( + isinstance(err, client.rest.ApiException) + and err.status == 409 + and self.kernel_manager.restarting + ): + self.delete_kernel_namespace = ( + True # okay if ns already exists and restarting, still mark for delete + ) + self.log.info(f"Re-using kernel namespace: {namespace}") else: if self.delete_kernel_namespace: - reason = "Error occurred creating role binding for namespace '{}': {}".format(namespace, err) + reason = "Error occurred creating role binding for namespace '{}': {}".format( + namespace, err + ) # delete the namespace since we'll be using the EG namespace... - body = client.V1DeleteOptions(grace_period_seconds=0, propagation_policy='Background') + body = client.V1DeleteOptions( + grace_period_seconds=0, propagation_policy="Background" + ) client.CoreV1Api().delete_namespace(name=namespace, body=body) - self.log.warning("Deleted kernel namespace: {}".format(namespace)) + self.log.warning(f"Deleted kernel namespace: {namespace}") else: - reason = "Error occurred creating namespace '{}': {}".format(namespace, err) + reason = f"Error occurred creating namespace '{namespace}': {err}" self.log_and_raise(http_status_code=500, reason=reason) return namespace @@ -227,27 +270,41 @@ def _create_role_binding(self, namespace, service_account_name): # We will not use a try/except clause here since _create_kernel_namespace will handle exceptions. role_binding_name = kernel_cluster_role # use same name for binding as cluster role - labels = {'app': 'enterprise-gateway', 'component': 'kernel', 'kernel_id': self.kernel_id} + labels = {"app": "enterprise-gateway", "component": "kernel", "kernel_id": self.kernel_id} binding_metadata = client.V1ObjectMeta(name=role_binding_name, labels=labels) - binding_role_ref = client.V1RoleRef(api_group='', kind='ClusterRole', name=kernel_cluster_role) - binding_subjects = client.V1Subject(api_group='', kind='ServiceAccount', name=service_account_name, - namespace=namespace) - - body = client.V1RoleBinding(kind='RoleBinding', metadata=binding_metadata, role_ref=binding_role_ref, - subjects=[binding_subjects]) - - client.RbacAuthorizationV1Api().create_namespaced_role_binding(namespace=namespace, body=body) - self.log.info("Created kernel role-binding '{}' in namespace: {} for service account: {}". - format(role_binding_name, namespace, service_account_name)) + binding_role_ref = client.V1RoleRef( + api_group="", kind="ClusterRole", name=kernel_cluster_role + ) + binding_subjects = client.V1Subject( + api_group="", kind="ServiceAccount", name=service_account_name, namespace=namespace + ) + + body = client.V1RoleBinding( + kind="RoleBinding", + metadata=binding_metadata, + role_ref=binding_role_ref, + subjects=[binding_subjects], + ) + + client.RbacAuthorizationV1Api().create_namespaced_role_binding( + namespace=namespace, body=body + ) + self.log.info( + "Created kernel role-binding '{}' in namespace: {} for service account: {}".format( + role_binding_name, namespace, service_account_name + ) + ) def get_process_info(self): """Captures the base information necessary for kernel persistence relative to kubernetes.""" - process_info = super(KubernetesProcessProxy, self).get_process_info() - process_info.update({'kernel_ns': self.kernel_namespace, 'delete_ns': self.delete_kernel_namespace}) + process_info = super().get_process_info() + process_info.update( + {"kernel_ns": self.kernel_namespace, "delete_ns": self.delete_kernel_namespace} + ) return process_info def load_process_info(self, process_info): """Loads the base information necessary for kernel persistence relative to kubernetes.""" - super(KubernetesProcessProxy, self).load_process_info(process_info) - self.kernel_namespace = process_info['kernel_ns'] - self.delete_kernel_namespace = process_info['delete_ns'] + super().load_process_info(process_info) + self.kernel_namespace = process_info["kernel_ns"] + self.delete_kernel_namespace = process_info["delete_ns"] diff --git a/enterprise_gateway/services/processproxies/processproxy.py b/enterprise_gateway/services/processproxies/processproxy.py index c96e2f7a0..01a6ec7f1 100644 --- a/enterprise_gateway/services/processproxies/processproxy.py +++ b/enterprise_gateway/services/processproxies/processproxy.py @@ -10,8 +10,6 @@ import json import logging import os -import paramiko -import pexpect import random import re import signal @@ -19,18 +17,30 @@ import sys import time import warnings - from asyncio import Event, TimeoutError from calendar import timegm -from Cryptodome.Cipher import PKCS1_v1_5, AES +from enum import Enum +from socket import ( + AF_INET, + SHUT_RDWR, + SHUT_WR, + SO_REUSEADDR, + SOCK_STREAM, + SOL_SOCKET, + gethostbyname, + gethostname, + socket, + timeout, +) + +import paramiko +import pexpect +from Cryptodome.Cipher import AES, PKCS1_v1_5 from Cryptodome.PublicKey import RSA from Cryptodome.Util.Padding import unpad -from enum import Enum from jupyter_client import launch_kernel, localinterfaces from jupyter_server import _tz from jupyter_server.serverapp import random_ports -from socket import gethostbyname, gethostname, socket, timeout,\ - AF_INET, SO_REUSEADDR, SOCK_STREAM, SOL_SOCKET, SHUT_RDWR, SHUT_WR from tornado import web from tornado.ioloop import PeriodicCallback from traitlets.config import SingletonConfigurable @@ -39,27 +49,29 @@ from ..sessions.kernelsessionmanager import KernelSessionManager # Default logging level of paramiko produces too much noise - raise to warning only. -logging.getLogger('paramiko').setLevel(os.getenv('EG_SSH_LOG_LEVEL', logging.WARNING)) +logging.getLogger("paramiko").setLevel(os.getenv("EG_SSH_LOG_LEVEL", logging.WARNING)) # Pop certain env variables that don't need to be logged, e.g. remote_pwd -env_pop_list = ['EG_REMOTE_PWD', 'LS_COLORS'] - -default_kernel_launch_timeout = float(os.getenv('EG_KERNEL_LAUNCH_TIMEOUT', '30')) -max_poll_attempts = int(os.getenv('EG_MAX_POLL_ATTEMPTS', '10')) -poll_interval = float(os.getenv('EG_POLL_INTERVAL', '0.5')) -socket_timeout = float(os.getenv('EG_SOCKET_TIMEOUT', '0.005')) -tunneling_enabled = bool(os.getenv('EG_ENABLE_TUNNELING', 'False').lower() == 'true') -ssh_port = int(os.getenv('EG_SSH_PORT', '22')) -eg_response_ip = os.getenv('EG_RESPONSE_IP', None) -desired_response_port = int(os.getenv('EG_RESPONSE_PORT', 8877)) -response_port_retries = int(os.getenv('EG_RESPONSE_PORT_RETRIES', 10)) -response_addr_any = bool(os.getenv('EG_RESPONSE_ADDR_ANY', 'False').lower() == 'true') - -connection_interval = poll_interval / 100.0 # already polling, so make connection timeout a fraction of outer poll +env_pop_list = ["EG_REMOTE_PWD", "LS_COLORS"] + +default_kernel_launch_timeout = float(os.getenv("EG_KERNEL_LAUNCH_TIMEOUT", "30")) +max_poll_attempts = int(os.getenv("EG_MAX_POLL_ATTEMPTS", "10")) +poll_interval = float(os.getenv("EG_POLL_INTERVAL", "0.5")) +socket_timeout = float(os.getenv("EG_SOCKET_TIMEOUT", "0.005")) +tunneling_enabled = bool(os.getenv("EG_ENABLE_TUNNELING", "False").lower() == "true") +ssh_port = int(os.getenv("EG_SSH_PORT", "22")) +eg_response_ip = os.getenv("EG_RESPONSE_IP", None) +desired_response_port = int(os.getenv("EG_RESPONSE_PORT", 8877)) +response_port_retries = int(os.getenv("EG_RESPONSE_PORT_RETRIES", 10)) +response_addr_any = bool(os.getenv("EG_RESPONSE_ADDR_ANY", "False").lower() == "true") + +connection_interval = ( + poll_interval / 100.0 +) # already polling, so make connection timeout a fraction of outer poll # Minimum port range size and max retries -min_port_range_size = int(os.getenv('EG_MIN_PORT_RANGE_SIZE', '1000')) -max_port_range_retries = int(os.getenv('EG_MAX_PORT_RANGE_RETRIES', '5')) +min_port_range_size = int(os.getenv("EG_MIN_PORT_RANGE_SIZE", "1000")) +max_port_range_retries = int(os.getenv("EG_MAX_PORT_RANGE_RETRIES", "5")) # Number of seconds in 100 years as the max keep-alive interval value. max_keep_alive_interval = 100 * 365 * 24 * 60 * 60 @@ -68,7 +80,7 @@ # when determining the response address. For example, on systems with many network interfaces, # some may have their IPs appear the local interfaces list (e.g., docker's 172.17.0.* is an example) # that should not be used. This env can be used to indicate such IPs. -prohibited_local_ips = os.getenv('EG_PROHIBITED_LOCAL_IPS', '').split(',') +prohibited_local_ips = os.getenv("EG_PROHIBITED_LOCAL_IPS", "").split(",") def _get_local_ip(): @@ -95,12 +107,15 @@ class KernelChannel(Enum): """ Enumeration used to better manage tunneling """ + SHELL = "SHELL" IOPUB = "IOPUB" STDIN = "STDIN" HEARTBEAT = "HB" CONTROL = "CONTROL" - COMMUNICATION = "EG_COMM" # Optional channel for remote launcher to issue interrupts - NOT a ZMQ channel + COMMUNICATION = ( + "EG_COMM" # Optional channel for remote launcher to issue interrupts - NOT a ZMQ channel + ) class Response(Event): @@ -122,26 +137,26 @@ def response(self, value): class ResponseManager(SingletonConfigurable): """Singleton that manages the responses from each kernel launcher at startup. - This singleton does the following: - 1. Acquires a public and private RSA key pair at first use to encrypt and decrypt the - received responses. The public key is sent to the launcher during startup - and is used by the launcher to encrypt the AES key the launcher uses to encrypt - the connection information, while the private key remains in the server and is - used to decrypt the AES key from the response - which it then uses to decrypt - the connection information. - 2. Creates a single socket based on the configuration settings that is listened on - via a periodic callback. - 3. On receipt, it decrypts the response (key then connection info) and posts the - response payload to a map identified by the kernel_id embedded in the response. - 4. Provides a wait mechanism for callers to poll to get their connection info - based on their registration (of kernel_id). - """ + This singleton does the following: + 1. Acquires a public and private RSA key pair at first use to encrypt and decrypt the + received responses. The public key is sent to the launcher during startup + and is used by the launcher to encrypt the AES key the launcher uses to encrypt + the connection information, while the private key remains in the server and is + used to decrypt the AES key from the response - which it then uses to decrypt + the connection information. + 2. Creates a single socket based on the configuration settings that is listened on + via a periodic callback. + 3. On receipt, it decrypts the response (key then connection info) and posts the + response payload to a map identified by the kernel_id embedded in the response. + 4. Provides a wait mechanism for callers to poll to get their connection info + based on their registration (of kernel_id). + """ KEY_SIZE = 1024 # Can be small since its only used to {en,de}crypt the AES key. _instance = None def __init__(self, **kwargs): - super(ResponseManager, self).__init__(**kwargs) + super().__init__(**kwargs) self._response_ip = None self._response_port = None self._response_socket = None @@ -150,7 +165,7 @@ def __init__(self, **kwargs): # Create encryption keys... self._private_key = RSA.generate(ResponseManager.KEY_SIZE) self._public_key = self._private_key.publickey() - self._public_pem = self._public_key.export_key('PEM') + self._public_pem = self._public_key.export_key("PEM") # Event facility... self._response_registry = {} @@ -161,14 +176,16 @@ def __init__(self, **kwargs): @property def public_key(self) -> str: """Provides the string-form of public key PEM with header/footer/newlines stripped.""" - return self._public_pem.decode()\ - .replace('-----BEGIN PUBLIC KEY-----', '')\ - .replace('-----END PUBLIC KEY-----', '')\ - .replace('\n', '') + return ( + self._public_pem.decode() + .replace("-----BEGIN PUBLIC KEY-----", "") + .replace("-----END PUBLIC KEY-----", "") + .replace("\n", "") + ) @property def response_address(self) -> str: - return self._response_ip + ':' + str(self._response_port) + return self._response_ip + ":" + str(self._response_port) def register_event(self, kernel_id: str) -> None: """Register kernel_id so its connection information can be processed.""" @@ -189,8 +206,8 @@ def _prepare_response_socket(self): # (which is the default). # Multiple IP bindings should be configured for containerized configurations (k8s) that need to # launch kernels into external YARN clusters. - bind_ip = (local_ip if eg_response_ip is None else eg_response_ip) - bind_ip = (bind_ip if response_addr_any is False else '') + bind_ip = local_ip if eg_response_ip is None else eg_response_ip + bind_ip = bind_ip if response_addr_any is False else "" response_port = desired_response_port for port in random_ports(response_port, response_port_retries + 1): @@ -200,11 +217,15 @@ def _prepare_response_socket(self): if e.errno == errno.EADDRINUSE: self.log.info(f"Response port {port} is already in use, trying another port...") continue - elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)): - self.log.warning(f"Permission to bind to response port {port} denied - continuing...") + elif e.errno in (errno.EACCES, getattr(errno, "WSAEACCES", errno.EACCES)): + self.log.warning( + f"Permission to bind to response port {port} denied - continuing..." + ) continue else: - raise RuntimeError(f"Failed to bind to port '{port}' for response address due to: '{e}'") + raise RuntimeError( + f"Failed to bind to port '{port}' for response address due to: '{e}'" + ) else: response_port = port break @@ -213,13 +234,15 @@ def _prepare_response_socket(self): self.log.critical(msg) raise RuntimeError(msg) - self.log.info(f"Enterprise Gateway is bound to port {response_port} " - f"for remote kernel connection information.") + self.log.info( + f"Enterprise Gateway is bound to port {response_port} " + f"for remote kernel connection information." + ) s.listen(128) s.settimeout(socket_timeout) self._response_socket = s self._response_port = response_port - self._response_ip = (local_ip if eg_response_ip is None else eg_response_ip) + self._response_ip = local_ip if eg_response_ip is None else eg_response_ip def _start_response_manager(self) -> None: """If not already started, creates and starts the periodic callback to process connections.""" @@ -242,23 +265,25 @@ def stop_response_manager(self) -> None: async def _process_connections(self) -> None: """Checks the socket for data, if found, decrypts the payload and posts to 'wait map'.""" loop = asyncio.get_event_loop() - data = '' + data = "" try: conn, addr = await loop.sock_accept(self._response_socket) while True: buffer = await loop.sock_recv(conn, 1024) if not buffer: # send is complete, process payload - self.log.debug("Received payload '{}'".format(data)) + self.log.debug(f"Received payload '{data}'") payload = self._decode_payload(data) - self.log.debug("Decrypted payload '{}'".format(payload)) + self.log.debug(f"Decrypted payload '{payload}'") self._post_connection(payload) break - data = data + buffer.decode(encoding='utf-8') # append what we received until we get no more... + data = data + buffer.decode( + encoding="utf-8" + ) # append what we received until we get no more... conn.close() except timeout: pass except Exception as ex: - self.log.error("Failure occurred processing connection: {}".format(ex)) + self.log.error(f"Failure occurred processing connection: {ex}") def _decode_payload(self, data) -> dict: """ @@ -286,48 +311,57 @@ def _decode_payload(self, data) -> dict: try: payload = json.loads(payload_str) # Get the version - version = payload.get('version') + version = payload.get("version") if version is None: - raise ValueError("Payload received from kernel does not include a version indicator!") - self.log.debug("Version {} payload received.".format(version)) + raise ValueError( + "Payload received from kernel does not include a version indicator!" + ) + self.log.debug(f"Version {version} payload received.") if version == 1: # Decrypt the AES key using the RSA private key - encrypted_aes_key = base64.b64decode(payload['key'].encode()) + encrypted_aes_key = base64.b64decode(payload["key"].encode()) cipher = PKCS1_v1_5.new(self._private_key) - aes_key = cipher.decrypt(encrypted_aes_key, b'\x42') + aes_key = cipher.decrypt(encrypted_aes_key, b"\x42") # Per docs, don't convey that decryption returned sentinel. So just let # things fail "naturally". # Decrypt and unpad the connection information using the just-decrypted AES key cipher = AES.new(aes_key, AES.MODE_ECB) - encrypted_connection_info = base64.b64decode(payload['conn_info'].encode()) + encrypted_connection_info = base64.b64decode(payload["conn_info"].encode()) connection_info_str = unpad(cipher.decrypt(encrypted_connection_info), 16).decode() else: - raise ValueError("Unexpected version indicator received: {}!".format(version)) + raise ValueError(f"Unexpected version indicator received: {version}!") except Exception as ex: # Could be version "0", walk the registrant kernel-ids and attempt to decrypt using each as a key. # If none are found, re-raise the triggering exception. - self.log.debug("decode_payload exception - {}: {}".format(ex.__class__.__name__, ex)) + self.log.debug(f"decode_payload exception - {ex.__class__.__name__}: {ex}") connection_info_str = None for kernel_id in self._response_registry.keys(): aes_key = kernel_id[0:16] try: - cipher = AES.new(aes_key.encode('utf-8'), AES.MODE_ECB) + cipher = AES.new(aes_key.encode("utf-8"), AES.MODE_ECB) decrypted_payload = cipher.decrypt(payload_str) # Version "0" responses use custom padding, so remove that here. - connection_info_str = "".join([decrypted_payload.decode("utf-8").rsplit("}", 1)[0], "}"]) + connection_info_str = "".join( + [decrypted_payload.decode("utf-8").rsplit("}", 1)[0], "}"] + ) # Try to load as JSON new_connection_info = json.loads(connection_info_str) # Add kernel_id into dict, then dump back to string so this can be processed as valid response - new_connection_info['kernel_id'] = kernel_id + new_connection_info["kernel_id"] = kernel_id connection_info_str = json.dumps(new_connection_info) - self.log.warning("WARNING!!!! Legacy kernel response received for kernel_id '{}'! " - "Update kernel launchers to current version!".format(kernel_id)) + self.log.warning( + "WARNING!!!! Legacy kernel response received for kernel_id '{}'! " + "Update kernel launchers to current version!".format(kernel_id) + ) break # If we're here, we made it! except Exception as ex2: # Any exception fails this experiment and we continue - self.log.debug("Received the following exception detecting legacy kernel response - {}: {}". - format(ex2.__class__.__name__, ex2)) + self.log.debug( + "Received the following exception detecting legacy kernel response - {}: {}".format( + ex2.__class__.__name__, ex2 + ) + ) connection_info_str = None if connection_info_str is None: @@ -339,7 +373,7 @@ def _decode_payload(self, data) -> dict: def _post_connection(self, connection_info: dict) -> None: """Posts connection information into "wait map" based on kernel_id value.""" - kernel_id = connection_info.get('kernel_id') + kernel_id = connection_info.get("kernel_id") if kernel_id is None: self.log.error("No kernel id found in response! Kernel launch will fail.") return @@ -347,7 +381,7 @@ def _post_connection(self, connection_info: dict) -> None: self.log.error("Kernel id '{}' has not been registered and will not be processed!") return - self.log.debug("Connection info received for kernel '{}': {}".format(kernel_id, connection_info)) + self.log.debug(f"Connection info received for kernel '{kernel_id}': {connection_info}") self._response_registry[kernel_id].response = connection_info @@ -375,13 +409,16 @@ def __init__(self, kernel_manager, proxy_config): self.proxy_config = proxy_config # Initialize to 0 IP primarily so restarts of remote kernels don't encounter local-only enforcement during # relaunch (see jupyter_client.manager.start_kernel(). - self.kernel_manager.ip = '0.0.0.0' + self.kernel_manager.ip = "0.0.0.0" self.log = kernel_manager.log # extract the kernel_id string from the connection file and set the KERNEL_ID environment variable if self.kernel_manager.kernel_id is None: - self.kernel_manager.kernel_id = os.path.basename(self.kernel_manager.connection_file). \ - replace('kernel-', '').replace('.json', '') + self.kernel_manager.kernel_id = ( + os.path.basename(self.kernel_manager.connection_file) + .replace("kernel-", "") + .replace(".json", "") + ) self.kernel_id = self.kernel_manager.kernel_id self.kernel_launch_timeout = default_kernel_launch_timeout @@ -392,12 +429,14 @@ def __init__(self, kernel_manager, proxy_config): # Handle authorization sets... # Take union of unauthorized users... self.unauthorized_users = self.kernel_manager.unauthorized_users - if proxy_config.get('unauthorized_users'): - self.unauthorized_users = self.unauthorized_users.union(proxy_config.get('unauthorized_users').split(',')) + if proxy_config.get("unauthorized_users"): + self.unauthorized_users = self.unauthorized_users.union( + proxy_config.get("unauthorized_users").split(",") + ) # Let authorized users override global value - if set on kernelspec... - if proxy_config.get('authorized_users'): - self.authorized_users = set(proxy_config.get('authorized_users').split(',')) + if proxy_config.get("authorized_users"): + self.authorized_users = set(proxy_config.get("authorized_users").split(",")) else: self.authorized_users = self.kernel_manager.authorized_users @@ -450,23 +489,23 @@ async def launch_process(self, kernel_cmd, **kwargs): kwargs : optional Additional arguments used during the launch - primarily the env to use for the kernel. """ - env_dict = kwargs.get('env') + env_dict = kwargs.get("env") if env_dict is None: env_dict = dict(os.environ.copy()) - kwargs.update({'env': env_dict}) + kwargs.update({"env": env_dict}) # see if KERNEL_LAUNCH_TIMEOUT was included from user. If so, override default - if env_dict.get('KERNEL_LAUNCH_TIMEOUT'): - self.kernel_launch_timeout = float(env_dict.get('KERNEL_LAUNCH_TIMEOUT')) + if env_dict.get("KERNEL_LAUNCH_TIMEOUT"): + self.kernel_launch_timeout = float(env_dict.get("KERNEL_LAUNCH_TIMEOUT")) # add the applicable kernel_id and language to the env dict - env_dict['KERNEL_ID'] = self.kernel_id + env_dict["KERNEL_ID"] = self.kernel_id - kernel_language = 'unknown-kernel-language' + kernel_language = "unknown-kernel-language" if len(self.kernel_manager.kernel_spec.language) > 0: kernel_language = self.kernel_manager.kernel_spec.language.lower() # if already set in env: stanza, let that override. - env_dict['KERNEL_LANGUAGE'] = env_dict.get('KERNEL_LANGUAGE', kernel_language) + env_dict["KERNEL_LANGUAGE"] = env_dict.get("KERNEL_LANGUAGE", kernel_language) # Remove any potential sensitive (e.g., passwords) or annoying values (e.g., LG_COLORS) for k in env_pop_list: @@ -474,7 +513,7 @@ async def launch_process(self, kernel_cmd, **kwargs): self._enforce_authorization(**kwargs) - self.log.debug("BaseProcessProxy.launch_process() env: {}".format(kwargs.get('env'))) + self.log.debug("BaseProcessProxy.launch_process() env: {}".format(kwargs.get("env"))) def launch_kernel(self, cmd, **kwargs): """ @@ -485,7 +524,7 @@ def launch_kernel(self, cmd, **kwargs): """ # Remove kernel_headers - kwargs.pop('kernel_headers', None) + kwargs.pop("kernel_headers", None) return launch_kernel(cmd, **kwargs) def cleanup(self): @@ -520,8 +559,11 @@ def wait(self): else: break else: - self.log.warning("Wait timeout of {} seconds exhausted. Continuing...". - format(max_poll_attempts * poll_interval)) + self.log.warning( + "Wait timeout of {} seconds exhausted. Continuing...".format( + max_poll_attempts * poll_interval + ) + ) def send_signal(self, signum): """ @@ -568,14 +610,14 @@ def kill(self): if i > max_poll_attempts: # Send -9 signal if process is still alive if self.local_proc: result = self.local_proc.kill() - self.log.debug("BaseProcessProxy.kill(): {}".format(result)) + self.log.debug(f"BaseProcessProxy.kill(): {result}") else: if self.ip and self.pid > 0: if BaseProcessProxyABC.ip_is_local(self.ip): result = self.local_signal(signal.SIGKILL) else: result = self.remote_signal(signal.SIGKILL) - self.log.debug("SIGKILL signal sent to pid: {}".format(self.pid)) + self.log.debug(f"SIGKILL signal sent to pid: {self.pid}") return result def terminate(self): @@ -589,14 +631,14 @@ def terminate(self): result = None if self.local_proc: result = self.local_proc.terminate() - self.log.debug("BaseProcessProxy.terminate(): {}".format(result)) + self.log.debug(f"BaseProcessProxy.terminate(): {result}") else: if self.ip and self.pid > 0: if BaseProcessProxyABC.ip_is_local(self.ip): result = self.local_signal(signal.SIGTERM) else: result = self.remote_signal(signal.SIGTERM) - self.log.debug("SIGTERM signal sent to pid: {}".format(self.pid)) + self.log.debug(f"SIGTERM signal sent to pid: {self.pid}") return result @staticmethod @@ -627,9 +669,7 @@ def _get_ssh_client(self, host): else: ssh.set_missing_host_key_policy(paramiko.RejectPolicy()) if self.remote_pwd: - self.log.debug( - "Connecting to remote host with username and password." - ) + self.log.debug("Connecting to remote host with username and password.") ssh.connect( host_ip, port=ssh_port, @@ -654,10 +694,8 @@ def _get_ssh_client(self, host): error_message = error_message_prefix + ( " provided" if self.remote_pwd else "-less SSH" ) - error_message = ( - error_message + "and EG_REMOTE_GSS_SSH={!r} ({})".format( - self._use_gss_raw, self.use_gss - ) + error_message = error_message + "and EG_REMOTE_GSS_SSH={!r} ({})".format( + self._use_gss_raw, self.use_gss ) self.log_and_raise(http_status_code=http_status_code, reason=error_message) @@ -701,21 +739,24 @@ def remote_signal(self, signum): """ val = None # if we have a process group, use that, else use the pid... - target = '-' + str(self.pgid) if self.pgid > 0 and signum > 0 else str(self.pid) - cmd = 'kill -{} {}; echo $?'.format(signum, target) + target = "-" + str(self.pgid) if self.pgid > 0 and signum > 0 else str(self.pid) + cmd = f"kill -{signum} {target}; echo $?" if signum > 0: # only log if meaningful signal (not for poll) - self.log.debug("Sending signal: {} to target: {} on host: {}".format(signum, target, self.ip)) + self.log.debug(f"Sending signal: {signum} to target: {target} on host: {self.ip}") try: result = self.rsh(self.ip, cmd) except Exception as e: - self.log.warning("Remote signal({}) to '{}' on host '{}' failed with exception '{}'.". - format(signum, target, self.ip, e)) + self.log.warning( + "Remote signal({}) to '{}' on host '{}' failed with exception '{}'.".format( + signum, target, self.ip, e + ) + ) return False for line in result: val = line.strip() - if val == '0': + if val == "0": return None return False @@ -725,13 +766,13 @@ def local_signal(self, signum): Sends signal `signum` to local process. """ # if we have a process group, use that, else use the pid... - target = '-' + str(self.pgid) if self.pgid > 0 and signum > 0 else str(self.pid) + target = "-" + str(self.pgid) if self.pgid > 0 and signum > 0 else str(self.pid) if signum > 0: # only log if meaningful signal (not for poll) - self.log.debug("Sending signal: {} to target: {}".format(signum, target)) + self.log.debug(f"Sending signal: {signum} to target: {target}") - cmd = ['kill', '-' + str(signum), target] + cmd = ["kill", "-" + str(signum), target] - with open(os.devnull, 'w') as devnull: + with open(os.devnull, "w") as devnull: result = subprocess.call(cmd, stderr=devnull) if result == 0: @@ -756,11 +797,11 @@ def _enforce_authorization(self, **kwargs): by KERNEL_USERNAME when impersonation_enabled is True. """ # Get the env - env_dict = kwargs.get('env') + env_dict = kwargs.get("env") # Although it may already be set in the env, just override in case it was only set via command line or config # Convert to string since execve() (called by Popen in base classes) wants string values. - env_dict['EG_IMPERSONATION_ENABLED'] = str(self.kernel_manager.impersonation_enabled) + env_dict["EG_IMPERSONATION_ENABLED"] = str(self.kernel_manager.impersonation_enabled) # Ensure KERNEL_USERNAME is set kernel_username = KernelSessionManager.get_kernel_username(**kwargs) @@ -772,17 +813,22 @@ def _enforce_authorization(self, **kwargs): # If authorized users are non-empty, ensure user is in that set. if self.authorized_users.__len__() > 0: if kernel_username not in self.authorized_users: - self._raise_authorization_error(kernel_username, "not in the set of users authorized") + self._raise_authorization_error( + kernel_username, "not in the set of users authorized" + ) def _raise_authorization_error(self, kernel_username, differentiator_clause): """ Raises a 403 status code after building the appropriate message. """ kernel_name = self.kernel_manager.kernel_spec.display_name - kernel_clause = " '{}'.".format(kernel_name) if kernel_name is not None else "s." - error_message = "User '{}' is {} to start kernel{} " \ - "Ensure KERNEL_USERNAME is set to an appropriate value and retry the request.". \ - format(kernel_username, differentiator_clause, kernel_clause) + kernel_clause = f" '{kernel_name}'." if kernel_name is not None else "s." + error_message = ( + "User '{}' is {} to start kernel{} " + "Ensure KERNEL_USERNAME is set to an appropriate value and retry the request.".format( + kernel_username, differentiator_clause, kernel_clause + ) + ) self.log_and_raise(http_status_code=403, reason=error_message) def get_process_info(self): @@ -792,7 +838,7 @@ def get_process_info(self): The superclass method must always be called first to ensure proper ordering. Since this is the most base class, no call to `super()` is necessary. """ - process_info = {'pid': self.pid, 'pgid': self.pgid, 'ip': self.ip} + process_info = {"pid": self.pid, "pgid": self.pgid, "ip": self.ip} return process_info def load_process_info(self, process_info): @@ -802,10 +848,10 @@ def load_process_info(self, process_info): The superclass method must always be called first to ensure proper ordering. Since this is the most base class, no call to `super()` is necessary. """ - self.pid = process_info['pid'] - self.pgid = process_info['pgid'] - self.ip = process_info['ip'] - self.kernel_manager.ip = process_info['ip'] + self.pid = process_info["pid"] + self.pgid = process_info["pgid"] + self.ip = process_info["ip"] + self.kernel_manager.ip = process_info["ip"] def _validate_port_range(self): """ @@ -813,8 +859,8 @@ def _validate_port_range(self): """ # Let port_range override global value - if set on kernelspec... port_range = self.kernel_manager.port_range - if self.proxy_config.get('port_range'): - port_range = self.proxy_config.get('port_range') + if self.proxy_config.get("port_range"): + port_range = self.proxy_config.get("port_range") try: port_ranges = port_range.split("..") @@ -825,9 +871,13 @@ def _validate_port_range(self): port_range_size = self.upper_port - self.lower_port if port_range_size != 0: if port_range_size < min_port_range_size: - self.log_and_raise(http_status_code=500, reason="Port range validation failed for range: '{}'. " - "Range size must be at least {} as specified by env EG_MIN_PORT_RANGE_SIZE". - format(port_range, min_port_range_size)) + self.log_and_raise( + http_status_code=500, + reason="Port range validation failed for range: '{}'. " + "Range size must be at least {} as specified by env EG_MIN_PORT_RANGE_SIZE".format( + port_range, min_port_range_size + ), + ) # According to RFC 793, port is a 16-bit unsigned int. Which means the port # numbers must be in the range (0, 65535). However, within that range, @@ -851,17 +901,29 @@ def _validate_port_range(self): # In case of JEG, we will accept ports in the range 1024 - 65535 as these days # admins use dedicated hosts for individual services. if self.lower_port < 1024 or self.lower_port > 65535: - self.log_and_raise(http_status_code=500, reason="Invalid port range '{}' specified. " - "Range for valid port numbers is (1024, 65535).".format(port_range)) + self.log_and_raise( + http_status_code=500, + reason="Invalid port range '{}' specified. " + "Range for valid port numbers is (1024, 65535).".format(port_range), + ) if self.upper_port < 1024 or self.upper_port > 65535: - self.log_and_raise(http_status_code=500, reason="Invalid port range '{}' specified. " - "Range for valid port numbers is (1024, 65535).".format(port_range)) + self.log_and_raise( + http_status_code=500, + reason="Invalid port range '{}' specified. " + "Range for valid port numbers is (1024, 65535).".format(port_range), + ) except ValueError as ve: - self.log_and_raise(http_status_code=500, reason="Port range validation failed for range: '{}'. " - "Error was: {}".format(port_range, ve)) + self.log_and_raise( + http_status_code=500, + reason="Port range validation failed for range: '{}'. " + "Error was: {}".format(port_range, ve), + ) except IndexError as ie: - self.log_and_raise(http_status_code=500, reason="Port range validation failed for range: '{}'. " - "Error was: {}".format(port_range, ie)) + self.log_and_raise( + http_status_code=500, + reason="Port range validation failed for range: '{}'. " + "Error was: {}".format(port_range, ie), + ) self.kernel_manager.port_range = port_range @@ -888,7 +950,7 @@ def select_ports(self, count): sock.close() return ports - def select_socket(self, ip=''): + def select_socket(self, ip=""): """ Creates and returns a socket whose port adheres to the configured port range, if applicable. @@ -911,8 +973,11 @@ def select_socket(self, ip=''): except Exception: retries = retries + 1 if retries > max_port_range_retries: - self.log_and_raise(http_status_code=500, reason="Failed to locate port within range {} after {} " - "retries!".format(self.kernel_manager.port_range, max_port_range_retries)) + self.log_and_raise( + http_status_code=500, + reason="Failed to locate port within range {} after {} " + "retries!".format(self.kernel_manager.port_range, max_port_range_retries), + ) return sock def _get_candidate_port(self): @@ -956,12 +1021,13 @@ class LocalProcessProxy(BaseProcessProxyABC): This process proxy is used when no other process proxy is configured. """ + def __init__(self, kernel_manager, proxy_config): - super(LocalProcessProxy, self).__init__(kernel_manager, proxy_config) + super().__init__(kernel_manager, proxy_config) kernel_manager.ip = localinterfaces.LOCALHOST async def launch_process(self, kernel_cmd, **kwargs): - await super(LocalProcessProxy, self).launch_process(kernel_cmd, **kwargs) + await super().launch_process(kernel_cmd, **kwargs) # launch the local run.sh self.local_proc = self.launch_kernel(kernel_cmd, **kwargs) @@ -972,8 +1038,11 @@ async def launch_process(self, kernel_cmd, **kwargs): except OSError: pass self.ip = local_ip - self.log.info("Local kernel launched on '{}', pid: {}, pgid: {}, KernelID: {}, cmd: '{}'" - .format(self.ip, self.pid, self.pgid, self.kernel_id, kernel_cmd)) + self.log.info( + "Local kernel launched on '{}', pid: {}, pgid: {}, KernelID: {}, cmd: '{}'".format( + self.ip, self.pid, self.pgid, self.kernel_id, kernel_cmd + ) + ) return self @@ -983,26 +1052,30 @@ class RemoteProcessProxy(BaseProcessProxyABC, metaclass=abc.ABCMeta): """ def __init__(self, kernel_manager, proxy_config): - super(RemoteProcessProxy, self).__init__(kernel_manager, proxy_config) + super().__init__(kernel_manager, proxy_config) self.response_socket = None self.start_time = None self.assigned_ip = None - self.assigned_host = '' + self.assigned_host = "" self.comm_ip = None self.comm_port = 0 - self.tunneled_connect_info = None # Contains the destination connection info when tunneling in use + self.tunneled_connect_info = ( + None # Contains the destination connection info when tunneling in use + ) self.tunnel_processes = {} - self.response_manager = ResponseManager.instance() # This will create the key pair and socket on first use + self.response_manager = ( + ResponseManager.instance() + ) # This will create the key pair and socket on first use self.response_manager.register_event(self.kernel_id) self.kernel_manager.response_address = self.response_manager.response_address self.kernel_manager.public_key = self.response_manager.public_key async def launch_process(self, kernel_cmd, **kwargs): # Pass along port-range info to kernels... - kwargs['env']['EG_MIN_PORT_RANGE_SIZE'] = str(min_port_range_size) - kwargs['env']['EG_MAX_PORT_RANGE_RETRIES'] = str(max_port_range_retries) + kwargs["env"]["EG_MIN_PORT_RANGE_SIZE"] = str(min_port_range_size) + kwargs["env"]["EG_MAX_PORT_RANGE_RETRIES"] = str(max_port_range_retries) - await super(RemoteProcessProxy, self).launch_process(kernel_cmd, **kwargs) + await super().launch_process(kernel_cmd, **kwargs) # remove connection file because a) its not necessary any longer since launchers will return # the connection information which will (sufficiently) remain in memory and b) launchers # landing on this node may want to write to this file and be denied access. @@ -1033,8 +1106,10 @@ def detect_launch_failure(self): poll_result = self.local_proc.poll() if poll_result and poll_result > 0: self.local_proc.wait() - error_message = "Error occurred during launch of KernelID: {}. " \ - "Check Enterprise Gateway log for more information.".format(self.kernel_id) + error_message = ( + "Error occurred during launch of KernelID: {}. " + "Check Enterprise Gateway log for more information.".format(self.kernel_id) + ) self.local_proc = None self.log_and_raise(http_status_code=500, reason=error_message) @@ -1050,47 +1125,78 @@ def _tunnel_to_kernel(self, connection_info, server, port=ssh_port, key=None): lports = self.select_ports(5) - rports = cf['shell_port'], cf['iopub_port'], cf['stdin_port'], cf['hb_port'], cf['control_port'] + rports = ( + cf["shell_port"], + cf["iopub_port"], + cf["stdin_port"], + cf["hb_port"], + cf["control_port"], + ) - channels = KernelChannel.SHELL, KernelChannel.IOPUB, KernelChannel.STDIN, \ - KernelChannel.HEARTBEAT, KernelChannel.CONTROL + channels = ( + KernelChannel.SHELL, + KernelChannel.IOPUB, + KernelChannel.STDIN, + KernelChannel.HEARTBEAT, + KernelChannel.CONTROL, + ) - remote_ip = cf['ip'] + remote_ip = cf["ip"] if not tunnel.try_passwordless_ssh(server + ":" + str(port), key): - self.log_and_raise(http_status_code=403, reason="Must use password-less scheme by setting up the " - "SSH public key on the cluster nodes") + self.log_and_raise( + http_status_code=403, + reason="Must use password-less scheme by setting up the " + "SSH public key on the cluster nodes", + ) for lp, rp, kc in zip(lports, rports, channels): self._create_ssh_tunnel(kc, lp, rp, remote_ip, server, port, key) return tuple(lports) - def _tunnel_to_port(self, kernel_channel, remote_ip, remote_port, server, port=ssh_port, key=None): + def _tunnel_to_port( + self, kernel_channel, remote_ip, remote_port, server, port=ssh_port, key=None + ): """ Analogous to _tunnel_to_kernel, but deals with a single port. This will typically be called for any one-off ports that require tunnelling. Note - this method assumes that passwordless ssh is in use and has been previously validated. """ local_port = self.select_ports(1)[0] - self._create_ssh_tunnel(kernel_channel, local_port, remote_port, remote_ip, server, port, key) + self._create_ssh_tunnel( + kernel_channel, local_port, remote_port, remote_ip, server, port, key + ) return local_port - def _create_ssh_tunnel(self, kernel_channel, local_port, remote_port, remote_ip, server, port, key): + def _create_ssh_tunnel( + self, kernel_channel, local_port, remote_port, remote_ip, server, port, key + ): """ Creates an SSH tunnel between the local and remote port/server for the given kernel channel. """ channel_name = kernel_channel.value - self.log.debug("Creating SSH tunnel for '{}': 127.0.0.1:'{}' to '{}':'{}'" - .format(channel_name, local_port, remote_ip, remote_port)) + self.log.debug( + "Creating SSH tunnel for '{}': 127.0.0.1:'{}' to '{}':'{}'".format( + channel_name, local_port, remote_ip, remote_port + ) + ) try: - process = self._spawn_ssh_tunnel(kernel_channel, local_port, remote_port, remote_ip, server, port, key) + process = self._spawn_ssh_tunnel( + kernel_channel, local_port, remote_port, remote_ip, server, port, key + ) self.tunnel_processes[channel_name] = process except Exception as e: - self.log_and_raise(http_status_code=500, reason="Could not open SSH tunnel for port {}. Exception: '{}'" - .format(channel_name, e)) + self.log_and_raise( + http_status_code=500, + reason="Could not open SSH tunnel for port {}. Exception: '{}'".format( + channel_name, e + ), + ) - def _spawn_ssh_tunnel(self, kernel_channel, local_port, remote_port, remote_ip, server, port=ssh_port, key=None): + def _spawn_ssh_tunnel( + self, kernel_channel, local_port, remote_port, remote_ip, server, port=ssh_port, key=None + ): """ This method spawns a child process to create an SSH tunnel and returns the spawned process. ZMQ's implementation returns a pid on UNIX based platforms and a process handle/reference on @@ -1107,23 +1213,32 @@ def _spawn_ssh_tunnel(self, kernel_channel, local_port, remote_port, remote_ip, the spawned process to be owned by the parent process. This allows the parent process to control the lifecycle of it's child processes and do appropriate cleanup during termination. """ - if sys.platform == 'win32': + if sys.platform == "win32": ssh_server = server + ":" + str(port) return tunnel.paramiko_tunnel(local_port, remote_port, ssh_server, remote_ip, key) else: - ssh = "ssh -p %s -o ServerAliveInterval=%i" % \ - (port, self._get_keep_alive_interval(kernel_channel)) + ssh = "ssh -p %s -o ServerAliveInterval=%i" % ( + port, + self._get_keep_alive_interval(kernel_channel), + ) cmd = "%s -S none -L 127.0.0.1:%i:%s:%i %s" % ( - ssh, local_port, remote_ip, remote_port, server) - return pexpect.spawn(cmd, env=os.environ.copy().pop('SSH_ASKPASS', None)) + ssh, + local_port, + remote_ip, + remote_port, + server, + ) + return pexpect.spawn(cmd, env=os.environ.copy().pop("SSH_ASKPASS", None)) def _get_keep_alive_interval(self, kernel_channel): cull_idle_timeout = self.kernel_manager.cull_idle_timeout - if (kernel_channel == KernelChannel.COMMUNICATION or - kernel_channel == KernelChannel.CONTROL or - cull_idle_timeout <= 0 or - cull_idle_timeout > max_keep_alive_interval): + if ( + kernel_channel == KernelChannel.COMMUNICATION + or kernel_channel == KernelChannel.CONTROL + or cull_idle_timeout <= 0 + or cull_idle_timeout > max_keep_alive_interval + ): # For COMMUNICATION and CONTROL channels, keep-alive interval will be set to # max_keep_alive_interval to make sure that the SSH session does not timeout # or expire for a very long time. Also, if cull_idle_timeout is unspecified, @@ -1150,11 +1265,16 @@ async def receive_connection_info(self): ready_to_connect = True except Exception as e: if type(e) is timeout or type(e) is TimeoutError: - self.log.debug("Waiting for KernelID '{}' to send connection info from host '{}' - retrying..." - .format(self.kernel_id, self.assigned_host)) + self.log.debug( + "Waiting for KernelID '{}' to send connection info from host '{}' - retrying...".format( + self.kernel_id, self.assigned_host + ) + ) else: - error_message = "Exception occurred waiting for connection file response for KernelId '{}' "\ + error_message = ( + "Exception occurred waiting for connection file response for KernelId '{}' " "on host '{}': {}".format(self.kernel_id, self.assigned_host, e) + ) self.kill() self.log_and_raise(http_status_code=500, reason=error_message) @@ -1167,9 +1287,13 @@ def _setup_connection_info(self, connect_info): tunneling is enabled, these ports will be tunneled with the original port information recorded. """ - self.log.debug("Host assigned to the kernel is: '{}' '{}'".format(self.assigned_host, self.assigned_ip)) + self.log.debug( + f"Host assigned to the kernel is: '{self.assigned_host}' '{self.assigned_ip}'" + ) - connect_info['ip'] = self.assigned_ip # Set connection to IP address of system where the kernel was launched + connect_info[ + "ip" + ] = self.assigned_ip # Set connection to IP address of system where the kernel was launched if tunneling_enabled is True: # Capture the current(tunneled) connect_info relative to the IP and ports (including the @@ -1178,38 +1302,51 @@ def _setup_connection_info(self, connect_info): # Open tunnels to the 5 ZMQ kernel ports tunnel_ports = self._tunnel_to_kernel(connect_info, self.assigned_ip) - self.log.debug("Local ports used to create SSH tunnels: '{}'".format(tunnel_ports)) + self.log.debug(f"Local ports used to create SSH tunnels: '{tunnel_ports}'") # Replace the remote connection ports with the local ports used to create SSH tunnels. - connect_info['ip'] = '127.0.0.1' - connect_info['shell_port'] = tunnel_ports[0] - connect_info['iopub_port'] = tunnel_ports[1] - connect_info['stdin_port'] = tunnel_ports[2] - connect_info['hb_port'] = tunnel_ports[3] - connect_info['control_port'] = tunnel_ports[4] + connect_info["ip"] = "127.0.0.1" + connect_info["shell_port"] = tunnel_ports[0] + connect_info["iopub_port"] = tunnel_ports[1] + connect_info["stdin_port"] = tunnel_ports[2] + connect_info["hb_port"] = tunnel_ports[3] + connect_info["control_port"] = tunnel_ports[4] # If a communication port was provided, tunnel it - if 'comm_port' in connect_info: - self.comm_ip = connect_info['ip'] - tunneled_comm_port = int(connect_info['comm_port']) - self.comm_port = self._tunnel_to_port(KernelChannel.COMMUNICATION, self.assigned_ip, - tunneled_comm_port, self.assigned_ip) - connect_info['comm_port'] = self.comm_port - self.log.debug("Established gateway communication to: {}:{} for KernelID '{}' via tunneled port " - "127.0.0.1:{}".format(self.assigned_ip, tunneled_comm_port, - self.kernel_id, self.comm_port)) + if "comm_port" in connect_info: + self.comm_ip = connect_info["ip"] + tunneled_comm_port = int(connect_info["comm_port"]) + self.comm_port = self._tunnel_to_port( + KernelChannel.COMMUNICATION, + self.assigned_ip, + tunneled_comm_port, + self.assigned_ip, + ) + connect_info["comm_port"] = self.comm_port + self.log.debug( + "Established gateway communication to: {}:{} for KernelID '{}' via tunneled port " + "127.0.0.1:{}".format( + self.assigned_ip, tunneled_comm_port, self.kernel_id, self.comm_port + ) + ) else: # tunneling not enabled, still check for and record communication port - if 'comm_port' in connect_info: - self.comm_ip = connect_info['ip'] - self.comm_port = int(connect_info['comm_port']) - self.log.debug("Established gateway communication to: {}:{} for KernelID '{}'". - format(self.assigned_ip, self.comm_port, self.kernel_id)) + if "comm_port" in connect_info: + self.comm_ip = connect_info["ip"] + self.comm_port = int(connect_info["comm_port"]) + self.log.debug( + "Established gateway communication to: {}:{} for KernelID '{}'".format( + self.assigned_ip, self.comm_port, self.kernel_id + ) + ) # If no communication port was provided, record that fact as well since this is useful to know - if 'comm_port' not in connect_info: - self.log.debug("Gateway communication port has NOT been established for KernelID '{}' (optional).". - format(self.kernel_id)) + if "comm_port" not in connect_info: + self.log.debug( + "Gateway communication port has NOT been established for KernelID '{}' (optional).".format( + self.kernel_id + ) + ) self._update_connection(connect_info) @@ -1220,23 +1357,33 @@ def _update_connection(self, connect_info): Note: Do NOT update connect_info with IP and other such artifacts in this method/function. """ # Reset the ports to 0 so load can take place (which resets the members to value from file or json)... - self.kernel_manager.stdin_port = self.kernel_manager.iopub_port = self.kernel_manager.shell_port = \ - self.kernel_manager.hb_port = self.kernel_manager.control_port = 0 + self.kernel_manager.stdin_port = ( + self.kernel_manager.iopub_port + ) = ( + self.kernel_manager.shell_port + ) = self.kernel_manager.hb_port = self.kernel_manager.control_port = 0 if connect_info: # Load new connection information into memory. No need to write back out to a file or track loopback, etc. # The launcher may also be sending back process info, so check and extract self._extract_pid_info(connect_info) self.kernel_manager.load_connection_info(info=connect_info) - self.log.debug("Received connection info for KernelID '{}' from host '{}': {}..." - .format(self.kernel_id, self.assigned_host, connect_info)) + self.log.debug( + "Received connection info for KernelID '{}' from host '{}': {}...".format( + self.kernel_id, self.assigned_host, connect_info + ) + ) else: - error_message = "Unexpected runtime encountered for Kernel ID '{}' - " \ - "connection information is null!".format(self.kernel_id) + error_message = ( + "Unexpected runtime encountered for Kernel ID '{}' - " + "connection information is null!".format(self.kernel_id) + ) self.log_and_raise(http_status_code=500, reason=error_message) self._close_response_socket() - self.kernel_manager._connection_file_written = True # allows for cleanup of local files (as necessary) + self.kernel_manager._connection_file_written = ( + True # allows for cleanup of local files (as necessary) + ) def _close_response_socket(self): # If there's a response-socket, close it since its no longer needed. @@ -1253,23 +1400,35 @@ def _extract_pid_info(self, connect_info): """ Extracts any PID, PGID info from the payload received on the response socket. """ - pid = connect_info.pop('pid', None) + pid = connect_info.pop("pid", None) if pid: try: self.pid = int(pid) except ValueError: - self.log.warning("pid returned from kernel launcher is not an integer: {} - ignoring.".format(pid)) + self.log.warning( + "pid returned from kernel launcher is not an integer: {} - ignoring.".format( + pid + ) + ) pid = None - pgid = connect_info.pop('pgid', None) + pgid = connect_info.pop("pgid", None) if pgid: try: self.pgid = int(pgid) except ValueError: - self.log.warning("pgid returned from kernel launcher is not an integer: {} - ignoring.".format(pgid)) + self.log.warning( + "pgid returned from kernel launcher is not an integer: {} - ignoring.".format( + pgid + ) + ) pgid = None - if pid or pgid: # if either process ids were updated, update the ip as well and don't use local_proc + if ( + pid or pgid + ): # if either process ids were updated, update the ip as well and don't use local_proc self.ip = self.assigned_ip - if not BaseProcessProxyABC.ip_is_local(self.ip): # only unset local_proc if we're remote + if not BaseProcessProxyABC.ip_is_local( + self.ip + ): # only unset local_proc if we're remote self.local_proc = None async def handle_timeout(self): @@ -1277,12 +1436,18 @@ async def handle_timeout(self): Checks to see if the kernel launch timeout has been exceeded while awaiting connection info. """ await asyncio.sleep(poll_interval) - time_interval = RemoteProcessProxy.get_time_diff(self.start_time, RemoteProcessProxy.get_current_time()) + time_interval = RemoteProcessProxy.get_time_diff( + self.start_time, RemoteProcessProxy.get_current_time() + ) if time_interval > self.kernel_launch_timeout: error_http_code = 500 - reason = "Waited too long ({}s) to get connection file".format(self.kernel_launch_timeout) - timeout_message = "KernelID: '{}' launch timeout due to: {}".format(self.kernel_id, reason) + reason = "Waited too long ({}s) to get connection file".format( + self.kernel_launch_timeout + ) + timeout_message = "KernelID: '{}' launch timeout due to: {}".format( + self.kernel_id, reason + ) await asyncio.get_event_loop().run_in_executor(None, self.kill) self.log_and_raise(http_status_code=error_http_code, reason=timeout_message) @@ -1293,11 +1458,11 @@ def cleanup(self): self.assigned_ip = None for kernel_channel, process in self.tunnel_processes.items(): - self.log.debug("cleanup: terminating {} tunnel process.".format(kernel_channel)) + self.log.debug(f"cleanup: terminating {kernel_channel} tunnel process.") process.terminate() self.tunnel_processes.clear() - super(RemoteProcessProxy, self).cleanup() + super().cleanup() def _send_listener_request(self, request, shutdown_socket=False): """ @@ -1310,7 +1475,7 @@ def _send_listener_request(self, request, shutdown_socket=False): try: sock.settimeout(socket_timeout) sock.connect((self.comm_ip, self.comm_port)) - sock.send(json.dumps(request).encode(encoding='utf-8')) + sock.send(json.dumps(request).encode(encoding="utf-8")) finally: if shutdown_socket: try: @@ -1324,9 +1489,12 @@ def _send_listener_request(self, request, shutdown_socket=False): request=request, ) else: - self.log.warning("Exception occurred attempting to shutdown communication socket to {}:{} " - "for KernelID '{}' (ignored): {}".format(self.comm_ip, self.comm_port, - self.kernel_id, str(e2))) + self.log.warning( + "Exception occurred attempting to shutdown communication socket to {}:{} " + "for KernelID '{}' (ignored): {}".format( + self.comm_ip, self.comm_port, self.kernel_id, str(e2) + ) + ) sock.close() else: self.log.debug( @@ -1352,21 +1520,22 @@ def send_signal(self, signum): self._send_listener_request({"signum": signum}) if signum > 0: # Polling (signum == 0) is too frequent - self.log.debug("Signal ({}) sent via gateway communication port.".format(signum)) + self.log.debug(f"Signal ({signum}) sent via gateway communication port.") return None except Exception as e: if ( isinstance(e, OSError) and e.errno == errno.ECONNREFUSED ): # Return False since there's no process. - self.log.debug( - "ERROR: ECONNREFUSED, no process listening, cannot send signal." - ) + self.log.debug("ERROR: ECONNREFUSED, no process listening, cannot send signal.") return False - self.log.warning("An unexpected exception occurred sending signal ({}) for KernelID '{}': {}" - .format(signum, self.kernel_id, str(e))) + self.log.warning( + "An unexpected exception occurred sending signal ({}) for KernelID '{}': {}".format( + signum, self.kernel_id, str(e) + ) + ) - return super(RemoteProcessProxy, self).send_signal(signum) + return super().send_signal(signum) def shutdown_listener(self): """ @@ -1378,16 +1547,19 @@ def shutdown_listener(self): if self.comm_port > 0: shutdown_request = dict() - shutdown_request['shutdown'] = 1 + shutdown_request["shutdown"] = 1 try: self._send_listener_request(shutdown_request, shutdown_socket=True) self.log.debug("Shutdown request sent to listener via gateway communication port.") except Exception as e: if not isinstance(e, OSError) or e.errno != errno.ECONNREFUSED: - self.log.warning("An unexpected exception occurred sending listener shutdown to {}:{} for " - "KernelID '{}': {}" - .format(self.comm_ip, self.comm_port, self.kernel_id, str(e))) + self.log.warning( + "An unexpected exception occurred sending listener shutdown to {}:{} for " + "KernelID '{}': {}".format( + self.comm_ip, self.comm_port, self.kernel_id, str(e) + ) + ) # Also terminate the tunnel process for the communication port - if in play. Failure to terminate # this process results in the kernel (launcher) appearing to remain alive following the shutdown @@ -1396,7 +1568,7 @@ def shutdown_listener(self): comm_port_name = KernelChannel.COMMUNICATION.value comm_port_tunnel = self.tunnel_processes.get(comm_port_name, None) if comm_port_tunnel: - self.log.debug("shutdown_listener: terminating {} tunnel process.".format(comm_port_name)) + self.log.debug(f"shutdown_listener: terminating {comm_port_name} tunnel process.") comm_port_tunnel.terminate() del self.tunnel_processes[comm_port_name] @@ -1404,27 +1576,34 @@ def get_process_info(self): """ Captures the base information necessary for kernel persistence relative to remote processes. """ - process_info = super(RemoteProcessProxy, self).get_process_info() - process_info.update({'assigned_ip': self.assigned_ip, - 'assigned_host': self.assigned_host, - 'comm_ip': self.comm_ip, - 'comm_port': self.comm_port, - 'tunneled_connect_info': self.tunneled_connect_info}) + process_info = super().get_process_info() + process_info.update( + { + "assigned_ip": self.assigned_ip, + "assigned_host": self.assigned_host, + "comm_ip": self.comm_ip, + "comm_port": self.comm_port, + "tunneled_connect_info": self.tunneled_connect_info, + } + ) return process_info def load_process_info(self, process_info): """ Captures the base information necessary for kernel persistence relative to remote processes. """ - super(RemoteProcessProxy, self).load_process_info(process_info) - self.assigned_ip = process_info['assigned_ip'] - self.assigned_host = process_info['assigned_host'] - self.comm_ip = process_info['comm_ip'] - self.comm_port = process_info['comm_port'] - if 'tunneled_connect_info' in process_info and process_info['tunneled_connect_info'] is not None: + super().load_process_info(process_info) + self.assigned_ip = process_info["assigned_ip"] + self.assigned_host = process_info["assigned_host"] + self.comm_ip = process_info["comm_ip"] + self.comm_port = process_info["comm_port"] + if ( + "tunneled_connect_info" in process_info + and process_info["tunneled_connect_info"] is not None + ): # If this was a tunneled connection, re-establish tunnels. Note, this will reset the # communication socket (comm_ip, comm_port) members as well. - self._setup_connection_info(process_info['tunneled_connect_info']) + self._setup_connection_info(process_info["tunneled_connect_info"]) def log_and_raise(self, http_status_code=None, reason=None): """ diff --git a/enterprise_gateway/services/processproxies/spark_operator.py b/enterprise_gateway/services/processproxies/spark_operator.py index a63d28f8a..eb588ce74 100644 --- a/enterprise_gateway/services/processproxies/spark_operator.py +++ b/enterprise_gateway/services/processproxies/spark_operator.py @@ -2,17 +2,18 @@ # Distributed under the terms of the Modified BSD License. import os + from .crd import CustomResourceProcessProxy, client -enterprise_gateway_namespace = os.environ.get('EG_NAMESPACE', 'default') +enterprise_gateway_namespace = os.environ.get("EG_NAMESPACE", "default") class SparkOperatorProcessProxy(CustomResourceProcessProxy): def __init__(self, kernel_manager, proxy_config): - super(SparkOperatorProcessProxy, self).__init__(kernel_manager, proxy_config) - self.group = 'sparkoperator.k8s.io' - self.version = 'v1beta2' - self.plural = 'sparkapplications' + super().__init__(kernel_manager, proxy_config) + self.group = "sparkoperator.k8s.io" + self.version = "v1beta2" + self.plural = "sparkapplications" def get_container_status(self, iteration): pod_status = pod_info = None @@ -23,18 +24,18 @@ def get_container_status(self, iteration): self.version, self.kernel_namespace, self.plural, - self.kernel_resource_name + self.kernel_resource_name, ) if custom_resource: - pod_name = custom_resource['status']['driverInfo']['podName'] + pod_name = custom_resource["status"]["driverInfo"]["podName"] pod_info = client.CoreV1Api().read_namespaced_pod(pod_name, self.kernel_namespace) except Exception: pass if pod_info and pod_info.status: pod_status = pod_info.status.phase - if pod_status == 'Running' and self.assigned_host == '': + if pod_status == "Running" and self.assigned_host == "": self.assigned_ip = pod_info.status.pod_ip self.assigned_host = pod_info.metadata.name self.assigned_node_ip = pod_info.status.host_ip diff --git a/enterprise_gateway/services/processproxies/yarn.py b/enterprise_gateway/services/processproxies/yarn.py index 818d3a87c..831d00e20 100644 --- a/enterprise_gateway/services/processproxies/yarn.py +++ b/enterprise_gateway/services/processproxies/yarn.py @@ -17,28 +17,31 @@ from .processproxy import RemoteProcessProxy # Default logging level of yarn-api and underlying connectionpool produce too much noise - raise to warning only. -logging.getLogger('yarn_api_client').setLevel(os.getenv('EG_YARN_LOG_LEVEL', logging.WARNING)) -logging.getLogger('urllib3.connectionpool').setLevel(os.environ.get('EG_YARN_LOG_LEVEL', logging.WARNING)) +logging.getLogger("yarn_api_client").setLevel(os.getenv("EG_YARN_LOG_LEVEL", logging.WARNING)) +logging.getLogger("urllib3.connectionpool").setLevel( + os.environ.get("EG_YARN_LOG_LEVEL", logging.WARNING) +) local_ip = localinterfaces.public_ips()[0] -poll_interval = float(os.getenv('EG_POLL_INTERVAL', '0.5')) -max_poll_attempts = int(os.getenv('EG_MAX_POLL_ATTEMPTS', '10')) -yarn_shutdown_wait_time = float(os.getenv('EG_YARN_SHUTDOWN_WAIT_TIME', '15.0')) +poll_interval = float(os.getenv("EG_POLL_INTERVAL", "0.5")) +max_poll_attempts = int(os.getenv("EG_MAX_POLL_ATTEMPTS", "10")) +yarn_shutdown_wait_time = float(os.getenv("EG_YARN_SHUTDOWN_WAIT_TIME", "15.0")) # cert_path: Boolean, defaults to `True`, that controls # whether we verify the server's TLS certificate in yarn-api-client. # Or a string, in which case it must be a path to a CA bundle(.pem file) to use. -cert_path = os.getenv('EG_YARN_CERT_BUNDLE', True) +cert_path = os.getenv("EG_YARN_CERT_BUNDLE", True) class YarnClusterProcessProxy(RemoteProcessProxy): """ Kernel lifecycle management for YARN clusters. """ - initial_states = {'NEW', 'SUBMITTED', 'ACCEPTED', 'RUNNING'} - final_states = {'FINISHED', 'KILLED', 'FAILED'} + + initial_states = {"NEW", "SUBMITTED", "ACCEPTED", "RUNNING"} + final_states = {"FINISHED", "KILLED", "FAILED"} def __init__(self, kernel_manager, proxy_config): - super(YarnClusterProcessProxy, self).__init__(kernel_manager, proxy_config) + super().__init__(kernel_manager, proxy_config) self.application_id = None self.last_known_state = None self.candidate_queue = None @@ -47,16 +50,14 @@ def __init__(self, kernel_manager, proxy_config): self.pid = None self.ip = None - self.yarn_endpoint \ - = proxy_config.get('yarn_endpoint', - kernel_manager.yarn_endpoint) - self.alt_yarn_endpoint \ - = proxy_config.get('alt_yarn_endpoint', - kernel_manager.alt_yarn_endpoint) + self.yarn_endpoint = proxy_config.get("yarn_endpoint", kernel_manager.yarn_endpoint) + self.alt_yarn_endpoint = proxy_config.get( + "alt_yarn_endpoint", kernel_manager.alt_yarn_endpoint + ) - self.yarn_endpoint_security_enabled \ - = proxy_config.get('yarn_endpoint_security_enabled', - kernel_manager.yarn_endpoint_security_enabled) + self.yarn_endpoint_security_enabled = proxy_config.get( + "yarn_endpoint_security_enabled", kernel_manager.yarn_endpoint_security_enabled + ) # YARN applications tend to take longer than the default 5 second wait time. Rather than # require a command-line option for those using YARN, we'll adjust based on a local env that @@ -64,8 +65,11 @@ def __init__(self, kernel_manager, proxy_config): # the desired value. if kernel_manager.shutdown_wait_time < yarn_shutdown_wait_time: kernel_manager.shutdown_wait_time = yarn_shutdown_wait_time - self.log.debug("{class_name} shutdown wait time adjusted to {wait_time} seconds.". - format(class_name=type(self).__name__, wait_time=kernel_manager.shutdown_wait_time)) + self.log.debug( + "{class_name} shutdown wait time adjusted to {wait_time} seconds.".format( + class_name=type(self).__name__, wait_time=kernel_manager.shutdown_wait_time + ) + ) # If yarn resource check is enabled and it isn't available immediately, # 20% of kernel_launch_timeout is used to wait @@ -85,6 +89,7 @@ def _initialize_resource_manager(self, **kwargs): if self.yarn_endpoint_security_enabled: from requests_kerberos import HTTPKerberosAuth + auth = HTTPKerberosAuth() else: # If we have the appropriate version of yarn-api-client, use its SimpleAuth class. @@ -92,13 +97,18 @@ def _initialize_resource_manager(self, **kwargs): # access is not allowed. (Default is to allow anonymous access.) try: from yarn_api_client.auth import SimpleAuth + kernel_username = KernelSessionManager.get_kernel_username(**kwargs) auth = SimpleAuth(kernel_username) - self.log.debug(f"Using SimpleAuth with '{kernel_username}' against endpoints: {endpoints}") + self.log.debug( + f"Using SimpleAuth with '{kernel_username}' against endpoints: {endpoints}" + ) except ImportError: auth = None - self.resource_mgr = ResourceManager(service_endpoints=endpoints, auth=auth, verify=cert_path) + self.resource_mgr = ResourceManager( + service_endpoints=endpoints, auth=auth, verify=cert_path + ) self.rm_addr = self.resource_mgr.get_active_endpoint() @@ -113,15 +123,18 @@ async def launch_process(self, kernel_cmd, **kwargs): # if not available, kernel startup is not attempted self.confirm_yarn_queue_availability(**kwargs) - await super(YarnClusterProcessProxy, self).launch_process(kernel_cmd, **kwargs) + await super().launch_process(kernel_cmd, **kwargs) # launch the local run.sh - which is configured for yarn-cluster... self.local_proc = self.launch_kernel(kernel_cmd, **kwargs) self.pid = self.local_proc.pid self.ip = local_ip - self.log.debug("Yarn cluster kernel launched using YARN RM address: {}, pid: {}, Kernel ID: {}, cmd: '{}'" - .format(self.rm_addr, self.local_proc.pid, self.kernel_id, kernel_cmd)) + self.log.debug( + "Yarn cluster kernel launched using YARN RM address: {}, pid: {}, Kernel ID: {}, cmd: '{}'".format( + self.rm_addr, self.local_proc.pid, self.kernel_id, kernel_cmd + ) + ) await self.confirm_remote_startup() return self @@ -158,20 +171,22 @@ def confirm_yarn_queue_availability(self, **kwargs): :param kwargs: :return: """ - env_dict = kwargs.get('env', {}) + env_dict = kwargs.get("env", {}) - executor_memory = int(env_dict.get('KERNEL_EXECUTOR_MEMORY', 0)) - driver_memory = int(env_dict.get('KERNEL_DRIVER_MEMORY', 0)) + executor_memory = int(env_dict.get("KERNEL_EXECUTOR_MEMORY", 0)) + driver_memory = int(env_dict.get("KERNEL_DRIVER_MEMORY", 0)) if executor_memory * driver_memory > 0: container_memory = self.resource_mgr.cluster_node_container_memory() if max(executor_memory, driver_memory) > container_memory: - self.log_and_raise(http_status_code=500, - reason="Container Memory not sufficient for a executor/driver allocation") + self.log_and_raise( + http_status_code=500, + reason="Container Memory not sufficient for a executor/driver allocation", + ) - candidate_queue_name = (env_dict.get('KERNEL_QUEUE', None)) - node_label = env_dict.get('KERNEL_NODE_LABEL', None) - partition_availability_threshold = float(env_dict.get('YARN_PARTITION_THRESHOLD', 95.0)) + candidate_queue_name = env_dict.get("KERNEL_QUEUE", None) + node_label = env_dict.get("KERNEL_NODE_LABEL", None) + partition_availability_threshold = float(env_dict.get("YARN_PARTITION_THRESHOLD", 95.0)) if candidate_queue_name is None or node_label is None: return @@ -183,43 +198,62 @@ def confirm_yarn_queue_availability(self, **kwargs): self.candidate_queue = self.resource_mgr.cluster_scheduler_queue(candidate_queue_name) if self.candidate_queue is None: - self.log.warning("Queue: {} not found in cluster." - "Availability check will not be performed".format(candidate_queue_name)) + self.log.warning( + "Queue: {} not found in cluster." + "Availability check will not be performed".format(candidate_queue_name) + ) return - self.candidate_partition = self.resource_mgr.cluster_queue_partition(self.candidate_queue, node_label) + self.candidate_partition = self.resource_mgr.cluster_queue_partition( + self.candidate_queue, node_label + ) if self.candidate_partition is None: - self.log.debug("Partition: {} not found in {} queue." - "Availability check will not be performed".format(node_label, candidate_queue_name)) + self.log.debug( + "Partition: {} not found in {} queue." + "Availability check will not be performed".format(node_label, candidate_queue_name) + ) return - self.log.debug("Checking endpoint: {} if partition: {} " - "has used capacity <= {}%".format(self.yarn_endpoint, - self.candidate_partition, partition_availability_threshold)) + self.log.debug( + "Checking endpoint: {} if partition: {} " + "has used capacity <= {}%".format( + self.yarn_endpoint, self.candidate_partition, partition_availability_threshold + ) + ) - yarn_available = self.resource_mgr.cluster_scheduler_queue_availability(self.candidate_partition, - partition_availability_threshold) + yarn_available = self.resource_mgr.cluster_scheduler_queue_availability( + self.candidate_partition, partition_availability_threshold + ) if not yarn_available: self.log.debug( - "Retrying for {} ms since resources are not available".format(self.yarn_resource_check_wait_time)) + "Retrying for {} ms since resources are not available".format( + self.yarn_resource_check_wait_time + ) + ) while not yarn_available: self.handle_yarn_queue_timeout() yarn_available = self.resource_mgr.cluster_scheduler_queue_availability( - self.candidate_partition, partition_availability_threshold) + self.candidate_partition, partition_availability_threshold + ) # subtracting the total amount of time spent for polling for queue availability - self.kernel_launch_timeout -= RemoteProcessProxy.get_time_diff(self.start_time, - RemoteProcessProxy.get_current_time()) + self.kernel_launch_timeout -= RemoteProcessProxy.get_time_diff( + self.start_time, RemoteProcessProxy.get_current_time() + ) def handle_yarn_queue_timeout(self): time.sleep(poll_interval) - time_interval = RemoteProcessProxy.get_time_diff(self.start_time, RemoteProcessProxy.get_current_time()) + time_interval = RemoteProcessProxy.get_time_diff( + self.start_time, RemoteProcessProxy.get_current_time() + ) if time_interval > self.yarn_resource_check_wait_time: error_http_code = 500 - reason = "Yarn Compute Resource is unavailable after {} seconds".format(self.yarn_resource_check_wait_time) + reason = "Yarn Compute Resource is unavailable after {} seconds".format( + self.yarn_resource_check_wait_time + ) self.log_and_raise(http_status_code=error_http_code, reason=reason) def poll(self): @@ -255,7 +289,7 @@ def send_signal(self, signum): # Yarn api doesn't support the equivalent to interrupts, so take our chances # via a remote signal. Note that this condition cannot check against the # signum value because altternate interrupt signals might be in play. - return super(YarnClusterProcessProxy, self).send_signal(signum) + return super().send_signal(signum) def kill(self): """Kill a kernel. @@ -277,10 +311,13 @@ def kill(self): result = None if result is False: # We couldn't terminate via Yarn, try remote signal - result = super(YarnClusterProcessProxy, self).kill() + result = super().kill() - self.log.debug("YarnClusterProcessProxy.kill, application ID: {}, kernel ID: {}, state: {}, result: {}" - .format(self.application_id, self.kernel_id, state, result)) + self.log.debug( + "YarnClusterProcessProxy.kill, application ID: {}, kernel ID: {}, state: {}, result: {}".format( + self.application_id, self.kernel_id, state, result + ) + ) return result def cleanup(self): @@ -288,23 +325,26 @@ def cleanup(self): # we might have a defunct process (if using waitAppCompletion = false) - so poll, kill, wait when we have # a local_proc. if self.local_proc: - self.log.debug("YarnClusterProcessProxy.cleanup: Clearing possible defunct process, pid={}...". - format(self.local_proc.pid)) - if super(YarnClusterProcessProxy, self).poll(): - super(YarnClusterProcessProxy, self).kill() - super(YarnClusterProcessProxy, self).wait() + self.log.debug( + "YarnClusterProcessProxy.cleanup: Clearing possible defunct process, pid={}...".format( + self.local_proc.pid + ) + ) + if super().poll(): + super().kill() + super().wait() self.local_proc = None # reset application id to force new query - handles kernel restarts/interrupts self.application_id = None # for cleanup, we should call the superclass last - super(YarnClusterProcessProxy, self).cleanup() + super().cleanup() async def confirm_remote_startup(self): - """ Confirms the yarn application is in a started state before returning. Should post-RUNNING states be - unexpectedly encountered (FINISHED, KILLED, FAILED) then we must throw, - otherwise the rest of the gateway will believe its talking to a valid kernel. + """Confirms the yarn application is in a started state before returning. Should post-RUNNING states be + unexpectedly encountered (FINISHED, KILLED, FAILED) then we must throw, + otherwise the rest of the gateway will believe its talking to a valid kernel. """ self.start_time = RemoteProcessProxy.get_current_time() i = 0 @@ -318,14 +358,21 @@ async def confirm_remote_startup(self): app_state = self._get_application_state() if app_state in YarnClusterProcessProxy.final_states: - error_message = "KernelID: '{}', ApplicationID: '{}' unexpectedly found in state '{}'" \ - " during kernel startup!".format(self.kernel_id, self.application_id, app_state) + error_message = ( + "KernelID: '{}', ApplicationID: '{}' unexpectedly found in state '{}'" + " during kernel startup!".format( + self.kernel_id, self.application_id, app_state + ) + ) self.log_and_raise(http_status_code=500, reason=error_message) - self.log.debug("{}: State: '{}', Host: '{}', KernelID: '{}', ApplicationID: '{}'". - format(i, app_state, self.assigned_host, self.kernel_id, self.application_id)) + self.log.debug( + "{}: State: '{}', Host: '{}', KernelID: '{}', ApplicationID: '{}'".format( + i, app_state, self.assigned_host, self.kernel_id, self.application_id + ) + ) - if self.assigned_host != '': + if self.assigned_host != "": ready_to_connect = await self.receive_connection_info() else: self.detect_launch_failure() @@ -333,37 +380,50 @@ async def confirm_remote_startup(self): async def handle_timeout(self): """Checks to see if the kernel launch timeout has been exceeded while awaiting connection info.""" await asyncio.sleep(poll_interval) - time_interval = RemoteProcessProxy.get_time_diff(self.start_time, RemoteProcessProxy.get_current_time()) + time_interval = RemoteProcessProxy.get_time_diff( + self.start_time, RemoteProcessProxy.get_current_time() + ) if time_interval > self.kernel_launch_timeout: - reason = "Application ID is None. Failed to submit a new application to YARN within {} seconds. " \ - "Check Enterprise Gateway log for more information.". \ - format(self.kernel_launch_timeout) + reason = ( + "Application ID is None. Failed to submit a new application to YARN within {} seconds. " + "Check Enterprise Gateway log for more information.".format( + self.kernel_launch_timeout + ) + ) error_http_code = 500 if self._get_application_id(True): if self._query_app_state_by_id(self.application_id) != "RUNNING": - reason = "YARN resources unavailable after {} seconds for app {}, launch timeout: {}! " \ - "Check YARN configuration.".format(time_interval, self.application_id, - self.kernel_launch_timeout) + reason = ( + "YARN resources unavailable after {} seconds for app {}, launch timeout: {}! " + "Check YARN configuration.".format( + time_interval, self.application_id, self.kernel_launch_timeout + ) + ) error_http_code = 503 else: - reason = "App {} is RUNNING, but waited too long ({} secs) to get connection file. " \ - "Check YARN logs for more information.".format(self.application_id, - self.kernel_launch_timeout) + reason = ( + "App {} is RUNNING, but waited too long ({} secs) to get connection file. " + "Check YARN logs for more information.".format( + self.application_id, self.kernel_launch_timeout + ) + ) await asyncio.get_event_loop().run_in_executor(None, self.kill) - timeout_message = "KernelID: '{}' launch timeout due to: {}".format(self.kernel_id, reason) + timeout_message = "KernelID: '{}' launch timeout due to: {}".format( + self.kernel_id, reason + ) self.log_and_raise(http_status_code=error_http_code, reason=timeout_message) def get_process_info(self): """Captures the base information necessary for kernel persistence relative to YARN clusters.""" - process_info = super(YarnClusterProcessProxy, self).get_process_info() - process_info.update({'application_id': self.application_id}) + process_info = super().get_process_info() + process_info.update({"application_id": self.application_id}) return process_info def load_process_info(self, process_info): """Loads the base information necessary for kernel persistence relative to YARN clusters.""" - super(YarnClusterProcessProxy, self).load_process_info(process_info) - self.application_id = process_info['application_id'] + super().load_process_info(process_info) + self.application_id = process_info["application_id"] def _get_application_state(self): # Gets the current application state using the application_id already obtained. Once the assigned host @@ -371,12 +431,12 @@ def _get_application_state(self): app_state = self.last_known_state app = self._query_app_by_id(self.application_id) if app: - if app.get('state'): - app_state = app.get('state') + if app.get("state"): + app_state = app.get("state") self.last_known_state = app_state - if self.assigned_host == '' and app.get('amHostHttpAddress'): - self.assigned_host = app.get('amHostHttpAddress').split(':')[0] + if self.assigned_host == "" and app.get("amHostHttpAddress"): + self.assigned_host = app.get("amHostHttpAddress").split(":")[0] # Set the kernel manager ip to the actual host where the application landed. self.assigned_ip = socket.gethostbyname(self.assigned_host) @@ -389,21 +449,29 @@ def _get_application_id(self, ignore_final_states=False): app = self._query_app_by_name(self.kernel_id) state_condition = True if type(app) is dict: - state = app.get('state') + state = app.get("state") self.last_known_state = state if ignore_final_states: state_condition = state not in YarnClusterProcessProxy.final_states - if len(app.get('id', '')) > 0 and state_condition: - self.application_id = app['id'] - time_interval = RemoteProcessProxy.get_time_diff(self.start_time, - RemoteProcessProxy.get_current_time()) - self.log.info("ApplicationID: '{}' assigned for KernelID: '{}', " - "state: {}, {} seconds after starting." - .format(app['id'], self.kernel_id, state, time_interval)) + if len(app.get("id", "")) > 0 and state_condition: + self.application_id = app["id"] + time_interval = RemoteProcessProxy.get_time_diff( + self.start_time, RemoteProcessProxy.get_current_time() + ) + self.log.info( + "ApplicationID: '{}' assigned for KernelID: '{}', " + "state: {}, {} seconds after starting.".format( + app["id"], self.kernel_id, state, time_interval + ) + ) if not self.application_id: - self.log.debug("ApplicationID not yet assigned for KernelID: '{}' - retrying...".format(self.kernel_id)) + self.log.debug( + "ApplicationID not yet assigned for KernelID: '{}' - retrying...".format( + self.kernel_id + ) + ) return self.application_id def _query_app_by_name(self, kernel_id): @@ -417,27 +485,38 @@ def _query_app_by_name(self, kernel_id): :param kernel_id: as the unique app name for query :return: The JSON object of an application. """ - top_most_app_id = '' + top_most_app_id = "" target_app = None try: - response = self.resource_mgr.cluster_applications(started_time_begin=str(self.start_time)) - except socket.error as sock_err: + response = self.resource_mgr.cluster_applications( + started_time_begin=str(self.start_time) + ) + except OSError as sock_err: if sock_err.errno == errno.ECONNREFUSED: - self.log.warning("YARN RM address: '{}' refused the connection. Is the resource manager running?". - format(self.rm_addr)) + self.log.warning( + "YARN RM address: '{}' refused the connection. Is the resource manager running?".format( + self.rm_addr + ) + ) else: - self.log.warning("Query for kernel ID '{}' failed with exception: {} - '{}'. Continuing...". - format(kernel_id, type(sock_err), sock_err)) + self.log.warning( + "Query for kernel ID '{}' failed with exception: {} - '{}'. Continuing...".format( + kernel_id, type(sock_err), sock_err + ) + ) except Exception as e: - self.log.warning("Query for kernel ID '{}' failed with exception: {} - '{}'. Continuing...". - format(kernel_id, type(e), e)) + self.log.warning( + "Query for kernel ID '{}' failed with exception: {} - '{}'. Continuing...".format( + kernel_id, type(e), e + ) + ) else: data = response.data - if type(data) is dict and type(data.get("apps")) is dict and 'app' in data.get("apps"): - for app in data['apps']['app']: - if app.get('name', '').find(kernel_id) >= 0 and app.get('id') > top_most_app_id: + if type(data) is dict and type(data.get("apps")) is dict and "app" in data.get("apps"): + for app in data["apps"]["app"]: + if app.get("name", "").find(kernel_id) >= 0 and app.get("id") > top_most_app_id: target_app = app - top_most_app_id = app.get('id') + top_most_app_id = app.get("id") return target_app def _query_app_by_id(self, app_id): @@ -450,12 +529,15 @@ def _query_app_by_id(self, app_id): try: response = self.resource_mgr.cluster_application(application_id=app_id) except Exception as e: - self.log.warning("Query for application ID '{}' failed with exception: '{}'. Continuing...". - format(app_id, e)) + self.log.warning( + "Query for application ID '{}' failed with exception: '{}'. Continuing...".format( + app_id, e + ) + ) else: data = response.data - if type(data) is dict and 'app' in data: - app = data['app'] + if type(data) is dict and "app" in data: + app = data["app"] return app @@ -469,11 +551,12 @@ def _query_app_state_by_id(self, app_id): try: response = self.resource_mgr.cluster_application_state(application_id=app_id) except Exception as e: - self.log.warning("Query for application '{}' state failed with exception: '{}'. " - "Continuing with last known state = '{}'...". - format(app_id, e, state)) + self.log.warning( + "Query for application '{}' state failed with exception: '{}'. " + "Continuing with last known state = '{}'...".format(app_id, e, state) + ) else: - state = response.data['state'] + state = response.data["state"] self.last_known_state = state return state @@ -489,7 +572,10 @@ def _kill_app_by_id(self, app_id): try: response = self.resource_mgr.cluster_application_kill(application_id=app_id) except Exception as e: - self.log.warning("Termination of application '{}' failed with exception: '{}'. Continuing...". - format(app_id, e)) + self.log.warning( + "Termination of application '{}' failed with exception: '{}'. Continuing...".format( + app_id, e + ) + ) return response diff --git a/enterprise_gateway/services/sessions/handlers.py b/enterprise_gateway/services/sessions/handlers.py index c368a0bd1..bec99cb2a 100644 --- a/enterprise_gateway/services/sessions/handlers.py +++ b/enterprise_gateway/services/sessions/handlers.py @@ -2,19 +2,20 @@ # Distributed under the terms of the Modified BSD License. """Tornado handlers for session CRUD.""" -import tornado import jupyter_server.services.sessions.handlers as jupyter_server_handlers +import tornado from jupyter_server.utils import ensure_async -from ...mixins import TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin +from ...mixins import CORSMixin, JSONErrorsMixin, TokenAuthorizationMixin -class SessionRootHandler(TokenAuthorizationMixin, - CORSMixin, - JSONErrorsMixin, - jupyter_server_handlers.SessionRootHandler): + +class SessionRootHandler( + TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, jupyter_server_handlers.SessionRootHandler +): """Extends the jupyter_server root session handler with token auth, CORS, and JSON errors. """ + async def get(self): """Overrides the super class method to honor the kernel listing configuration setting. @@ -24,10 +25,10 @@ async def get(self): tornado.web.HTTPError If eg_list_kernels is False, respond with 403 Forbidden """ - if 'eg_list_kernels' not in self.settings or not self.settings['eg_list_kernels']: - raise tornado.web.HTTPError(403, 'Forbidden') + if "eg_list_kernels" not in self.settings or not self.settings["eg_list_kernels"]: + raise tornado.web.HTTPError(403, "Forbidden") else: - await ensure_async(super(SessionRootHandler, self).get()) + await ensure_async(super().get()) default_handlers = [] diff --git a/enterprise_gateway/services/sessions/kernelsessionmanager.py b/enterprise_gateway/services/sessions/kernelsessionmanager.py index be99b889e..c5b206895 100644 --- a/enterprise_gateway/services/sessions/kernelsessionmanager.py +++ b/enterprise_gateway/services/sessions/kernelsessionmanager.py @@ -37,29 +37,38 @@ class KernelSessionManager(LoggingConfigurable): """ # Session Persistence - session_persistence_env = 'EG_KERNEL_SESSION_PERSISTENCE' + session_persistence_env = "EG_KERNEL_SESSION_PERSISTENCE" session_persistence_default_value = False - enable_persistence = Bool(session_persistence_default_value, config=True, - help="""Enable kernel session persistence (True or False). Default = False - (EG_KERNEL_SESSION_PERSISTENCE env var)""") - - @default('enable_persistence') + enable_persistence = Bool( + session_persistence_default_value, + config=True, + help="""Enable kernel session persistence (True or False). Default = False + (EG_KERNEL_SESSION_PERSISTENCE env var)""", + ) + + @default("enable_persistence") def session_persistence_default(self): - return bool(os.getenv(self.session_persistence_env, - str(self.session_persistence_default_value)).lower() == 'true') + return bool( + os.getenv( + self.session_persistence_env, str(self.session_persistence_default_value) + ).lower() + == "true" + ) # Persistence root - persistence_root_env = 'EG_PERSISTENCE_ROOT' - persistence_root = Unicode(config=True, - help="""Identifies the root 'directory' under which the 'kernel_sessions' node will - reside. This directory should exist. (EG_PERSISTENCE_ROOT env var)""") - - @default('persistence_root') + persistence_root_env = "EG_PERSISTENCE_ROOT" + persistence_root = Unicode( + config=True, + help="""Identifies the root 'directory' under which the 'kernel_sessions' node will + reside. This directory should exist. (EG_PERSISTENCE_ROOT env var)""", + ) + + @default("persistence_root") def persistence_root_default(self): return os.getenv(self.persistence_root_env, "/") def __init__(self, kernel_manager, **kwargs): - super(KernelSessionManager, self).__init__(**kwargs) + super().__init__(**kwargs) self.kernel_manager = kernel_manager self._sessions = dict() self._sessionsByUser = dict() @@ -83,29 +92,33 @@ def create_session(self, kernel_id, **kwargs): # Compose the kernel_session entry kernel_session = dict() - kernel_session['kernel_id'] = kernel_id - kernel_session['username'] = KernelSessionManager.get_kernel_username(**kwargs) - kernel_session['kernel_name'] = km.kernel_name + kernel_session["kernel_id"] = kernel_id + kernel_session["username"] = KernelSessionManager.get_kernel_username(**kwargs) + kernel_session["kernel_name"] = km.kernel_name # Build the inner dictionaries: connection_info, process_proxy and add to kernel_session - kernel_session['connection_info'] = km.get_connection_info() - kernel_session['launch_args'] = kwargs.copy() - kernel_session['process_info'] = km.process_proxy.get_process_info() if km.process_proxy else {} + kernel_session["connection_info"] = km.get_connection_info() + kernel_session["launch_args"] = kwargs.copy() + kernel_session["process_info"] = ( + km.process_proxy.get_process_info() if km.process_proxy else {} + ) self._save_session(kernel_id, kernel_session) def refresh_session(self, kernel_id): """ Refreshes the session from its persisted state. Called on kernel restarts. """ - self.log.debug("Refreshing kernel session for id: {}".format(kernel_id)) + self.log.debug(f"Refreshing kernel session for id: {kernel_id}") km = self.kernel_manager.get_kernel(kernel_id) # Compose the kernel_session entry kernel_session = self._sessions[kernel_id] # Build the inner dictionaries: connection_info, process_proxy and add to kernel_session - kernel_session['connection_info'] = km.get_connection_info() - kernel_session['process_info'] = km.process_proxy.get_process_info() if km.process_proxy else {} + kernel_session["connection_info"] = km.get_connection_info() + kernel_session["process_info"] = ( + km.process_proxy.get_process_info() if km.process_proxy else {} + ) self._save_session(kernel_id, kernel_session) def _save_session(self, kernel_id, kernel_session): @@ -113,7 +126,7 @@ def _save_session(self, kernel_id, kernel_session): kernels_lock.acquire() try: self._sessions[kernel_id] = kernel_session - username = kernel_session['username'] + username = kernel_session["username"] if username not in self._sessionsByUser: self._sessionsByUser[username] = [] self._sessionsByUser[username].append(kernel_id) @@ -141,27 +154,34 @@ def start_sessions(self): self.load_sessions() sessions_to_remove = [] for kernel_id, kernel_session in self._sessions.items(): - self.log.info("Attempting startup of persisted kernel session for id: %s..." % kernel_id) + self.log.info( + "Attempting startup of persisted kernel session for id: %s..." % kernel_id + ) if self._start_session(kernel_session): - self.log.info("Startup of persisted kernel session for id '{}' was successful. Client should " - "reconnect kernel.".format(kernel_id)) + self.log.info( + "Startup of persisted kernel session for id '{}' was successful. Client should " + "reconnect kernel.".format(kernel_id) + ) else: sessions_to_remove.append(kernel_id) - self.log.warn("Startup of persisted kernel session for id '{}' was not successful. Check if " - "client is still active and restart kernel.".format(kernel_id)) + self.log.warn( + "Startup of persisted kernel session for id '{}' was not successful. Check if " + "client is still active and restart kernel.".format(kernel_id) + ) self._delete_sessions(sessions_to_remove) def _start_session(self, kernel_session): # Attempt to start kernel from persisted state. if started, record kernel_session in dictionary # else delete session - kernel_id = kernel_session['kernel_id'] + kernel_id = kernel_session["kernel_id"] kernel_started = self.kernel_manager.start_kernel_from_session( kernel_id=kernel_id, - kernel_name=kernel_session['kernel_name'], - connection_info=kernel_session['connection_info'], - process_info=kernel_session['process_info'], - launch_args=kernel_session['launch_args']) + kernel_name=kernel_session["kernel_name"], + connection_info=kernel_session["connection_info"], + process_info=kernel_session["process_info"], + launch_args=kernel_session["launch_args"], + ) if not kernel_started: return False @@ -184,8 +204,11 @@ def _delete_sessions(self, kernel_ids): # Prior to removing session, update the per User list kernel_session = self._sessions.get(kernel_id, None) if kernel_session is not None: - username = kernel_session['username'] - if username in self._sessionsByUser and kernel_id in self._sessionsByUser[username]: + username = kernel_session["username"] + if ( + username in self._sessionsByUser + and kernel_id in self._sessionsByUser[username] + ): self._sessionsByUser[username].remove(kernel_id) self._sessions.pop(kernel_id, None) @@ -197,11 +220,11 @@ def _delete_sessions(self, kernel_ids): def pre_save_transformation(session): kernel_id = list(session.keys())[0] session_info = session[kernel_id] - if session_info.get('connection_info'): - info = session_info['connection_info'] - key = info.get('key') + if session_info.get("connection_info"): + info = session_info["connection_info"] + key = info.get("key") if key: - info['key'] = key.decode("utf8") + info["key"] = key.decode("utf8") return session @@ -209,11 +232,11 @@ def pre_save_transformation(session): def post_load_transformation(session): kernel_id = list(session.keys())[0] session_info = session[kernel_id] - if session_info.get('connection_info'): - info = session_info['connection_info'] - key = info.get('key') + if session_info.get("connection_info"): + info = session_info["connection_info"] + key = info.get("key") if key: - info['key'] = key.encode("utf8") + info["key"] = key.encode("utf8") return session @@ -222,7 +245,9 @@ def load_sessions(self): """ Load and initialize _sessions member from persistent storage. This method is called from start_sessions(). """ - raise NotImplementedError("KernelSessionManager.load_sessions() requires an implementation!") + raise NotImplementedError( + "KernelSessionManager.load_sessions() requires an implementation!" + ) # abstractmethod def load_session(self, kernel_id): @@ -230,20 +255,26 @@ def load_session(self, kernel_id): Load and initialize _sessions member from persistent storage for a single kernel. This method is called from refresh_sessions(). """ - raise NotImplementedError("KernelSessionManager.load_sessions() requires an implementation!") + raise NotImplementedError( + "KernelSessionManager.load_sessions() requires an implementation!" + ) # abstractmethod def delete_sessions(self, kernel_ids): """ Delete the sessions in persistent storage. Caller is responsible for synchronizing call. """ - raise NotImplementedError("KernelSessionManager.delete_sessions(kernel_ids) requires an implementation!") + raise NotImplementedError( + "KernelSessionManager.delete_sessions(kernel_ids) requires an implementation!" + ) def save_session(self, kernel_id): """ Saves the sessions dictionary to persistent store. Caller is responsible for synchronizing call. """ - raise NotImplementedError("KernelSessionManager.save_session(kernel_id) requires an implementation!") + raise NotImplementedError( + "KernelSessionManager.save_session(kernel_id) requires an implementation!" + ) def active_sessions(self, username): """ @@ -279,13 +310,13 @@ def get_kernel_username(**kwargs): str indicating kernel username """ # Get the env - env_dict = kwargs.get('env', {}) + env_dict = kwargs.get("env", {}) # Ensure KERNEL_USERNAME is set - kernel_username = env_dict.get('KERNEL_USERNAME') + kernel_username = env_dict.get("KERNEL_USERNAME") if kernel_username is None: kernel_username = getpass.getuser() - env_dict['KERNEL_USERNAME'] = kernel_username + env_dict["KERNEL_USERNAME"] = kernel_username return kernel_username @@ -297,19 +328,19 @@ class FileKernelSessionManager(KernelSessionManager): """ # Change the default to Jupyter Data Dir. - @default('persistence_root') + @default("persistence_root") def persistence_root_default(self): return os.getenv(self.persistence_root_env, jupyter_data_dir()) def __init__(self, kernel_manager, **kwargs): - super(FileKernelSessionManager, self).__init__(kernel_manager, **kwargs) + super().__init__(kernel_manager, **kwargs) if self.enable_persistence: - self.log.info("Kernel session persistence location: {}".format(self._get_sessions_loc())) + self.log.info(f"Kernel session persistence location: {self._get_sessions_loc()}") def delete_sessions(self, kernel_ids): if self.enable_persistence: for kernel_id in kernel_ids: - kernel_file_name = "".join([kernel_id, '.json']) + kernel_file_name = "".join([kernel_id, ".json"]) kernel_session_file_path = os.path.join(self._get_sessions_loc(), kernel_file_name) if os.path.exists(kernel_session_file_path): os.remove(kernel_session_file_path) @@ -317,31 +348,34 @@ def delete_sessions(self, kernel_ids): def save_session(self, kernel_id): if self.enable_persistence: if kernel_id is not None: - kernel_file_name = "".join([kernel_id, '.json']) + kernel_file_name = "".join([kernel_id, ".json"]) kernel_session_file_path = os.path.join(self._get_sessions_loc(), kernel_file_name) temp_session = dict() temp_session[kernel_id] = self._sessions[kernel_id] - with open(kernel_session_file_path, 'w') as fp: + with open(kernel_session_file_path, "w") as fp: json.dump(KernelSessionManager.pre_save_transformation(temp_session), fp) fp.close() def load_sessions(self): if self.enable_persistence: - kernel_session_files = [json_files for json_files in os.listdir(self._get_sessions_loc()) if - json_files.endswith('.json')] + kernel_session_files = [ + json_files + for json_files in os.listdir(self._get_sessions_loc()) + if json_files.endswith(".json") + ] for kernel_session_file in kernel_session_files: self._load_session_from_file(kernel_session_file) def load_session(self, kernel_id): if self.enable_persistence: if kernel_id is not None: - kernel_session_file = "".join([kernel_id, '.json']) + kernel_session_file = "".join([kernel_id, ".json"]) self._load_session_from_file(kernel_session_file) def _load_session_from_file(self, file_name): kernel_session_file_path = os.path.join(self._get_sessions_loc(), file_name) if os.path.exists(kernel_session_file_path): - self.log.debug("Loading saved session(s) from {}".format(kernel_session_file_path)) + self.log.debug(f"Loading saved session(s) from {kernel_session_file_path}") with open(kernel_session_file_path) as fp: self._sessions.update(KernelSessionManager.post_load_transformation(json.load(fp))) fp.close() diff --git a/enterprise_gateway/services/sessions/sessionmanager.py b/enterprise_gateway/services/sessions/sessionmanager.py index c06398705..95b3b44ff 100644 --- a/enterprise_gateway/services/sessions/sessionmanager.py +++ b/enterprise_gateway/services/sessions/sessionmanager.py @@ -3,6 +3,7 @@ """Session manager that keeps all its metadata in memory.""" import uuid + from tornado import web from traitlets.config.configurable import LoggingConfigurable @@ -25,11 +26,12 @@ class SessionManager(LoggingConfigurable): _columns : list Session metadata key names """ + def __init__(self, kernel_manager, *args, **kwargs): - super(SessionManager, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.kernel_manager = kernel_manager self._sessions = [] - self._columns = ['session_id', 'path', 'kernel_id'] + self._columns = ["session_id", "path", "kernel_id"] def session_exists(self, path, *args, **kwargs): """Checks to see if the session with the given path value exists. @@ -43,7 +45,7 @@ def session_exists(self, path, *args, **kwargs): ------- bool """ - return bool([item for item in self._sessions if item['path'] == path]) + return bool([item for item in self._sessions if item["path"] == path]) def new_session_id(self): """Creates a uuid for a new session.""" @@ -93,9 +95,7 @@ def save_session(self, session_id, path=None, kernel_id=None, *args, **kwargs): dict Session model with `session_id`, `path`, and `kernel_id` keys """ - self._sessions.append({'session_id': session_id, - 'path': path, - 'kernel_id': kernel_id}) + self._sessions.append({"session_id": session_id, "path": path, "kernel_id": kernel_id}) return self.get_session(session_id=session_id) @@ -154,7 +154,7 @@ def get_session(self, **kwargs): row = self.get_session_by_key(column, kwargs[column]) if not row: - raise web.HTTPError(404, u'Session not found: %s' % kwargs[column]) + raise web.HTTPError(404, "Session not found: %s" % kwargs[column]) return self.row_to_model(row) @@ -180,18 +180,18 @@ def update_session(self, session_id, *args, **kwargs): # no changes return - row = self.get_session_by_key('session_id', session_id) + row = self.get_session_by_key("session_id", session_id) if not row: raise KeyError self._sessions.remove(row) - if 'path' in kwargs: - row['path'] = kwargs['path'] + if "path" in kwargs: + row["path"] = kwargs["path"] - if 'kernel_id' in kwargs: - row['kernel_id'] = kwargs['kernel_id'] + if "kernel_id" in kwargs: + row["kernel_id"] = kwargs["kernel_id"] self._sessions.append(row) @@ -205,7 +205,7 @@ def row_to_model(self, row, *args, **kwargs): `path`, and `kernel` to the kernel model looked up using the `kernel_id` """ - if row['kernel_id'] not in self.kernel_manager: + if row["kernel_id"] not in self.kernel_manager: # The kernel was killed or died without deleting the session. # We can't use delete_session here because that tries to find # and shut down the kernel. @@ -213,11 +213,9 @@ def row_to_model(self, row, *args, **kwargs): raise KeyError model = { - 'id': row['session_id'], - 'notebook': { - 'path': row['path'] - }, - 'kernel': self.kernel_manager.kernel_model(row['kernel_id']) + "id": row["session_id"], + "notebook": {"path": row["path"]}, + "kernel": self.kernel_manager.kernel_model(row["kernel_id"]), } return model @@ -242,9 +240,9 @@ async def delete_session(self, session_id, *args, **kwargs): If the `session_id` is not in the store """ # Check that session exists before deleting - s = self.get_session_by_key('session_id', session_id) + s = self.get_session_by_key("session_id", session_id) if not s: raise KeyError - await self.kernel_manager.shutdown_kernel(s['kernel_id']) + await self.kernel_manager.shutdown_kernel(s["kernel_id"]) self._sessions.remove(s) diff --git a/enterprise_gateway/tests/__init__.py b/enterprise_gateway/tests/__init__.py index a7478206b..9e571ab0b 100644 --- a/enterprise_gateway/tests/__init__.py +++ b/enterprise_gateway/tests/__init__.py @@ -2,6 +2,7 @@ # Distributed under the terms of the Modified BSD License. from tornado import ioloop + def teardown(): """The test fixture appears to leak something on certain platforms that endlessly tries an async socket connect and fails after the tests end. @@ -12,4 +13,3 @@ def teardown(): # `Event loop is closed` exceptions. These didn't affect the test resutls, but produced output that # was otherwise misleading noise. # ioloop.IOLoop.current().close(True) - diff --git a/enterprise_gateway/tests/resources/kernels/kernel_defaults_test/kernel.json b/enterprise_gateway/tests/resources/kernels/kernel_defaults_test/kernel.json index 1810cde47..78fce3f55 100644 --- a/enterprise_gateway/tests/resources/kernels/kernel_defaults_test/kernel.json +++ b/enterprise_gateway/tests/resources/kernels/kernel_defaults_test/kernel.json @@ -9,11 +9,5 @@ "PROCESS_VAR1": "process_var1_default", "PROCESS_VAR2": "process_var2_default" }, - "argv": [ - "python", - "-m", - "ipykernel_launcher", - "-f", - "{connection_file}" - ] + "argv": ["python", "-m", "ipykernel_launcher", "-f", "{connection_file}"] } diff --git a/enterprise_gateway/tests/resources/public/index.html b/enterprise_gateway/tests/resources/public/index.html index 4c031950f..de7f01c23 100644 --- a/enterprise_gateway/tests/resources/public/index.html +++ b/enterprise_gateway/tests/resources/public/index.html @@ -1,9 +1,9 @@ - + - - Hello world! - - -

Hello world!

- + + Hello world! + + +

Hello world!

+ diff --git a/enterprise_gateway/tests/test_enterprise_gateway.py b/enterprise_gateway/tests/test_enterprise_gateway.py index 50e6deb91..100ae568e 100644 --- a/enterprise_gateway/tests/test_enterprise_gateway.py +++ b/enterprise_gateway/tests/test_enterprise_gateway.py @@ -7,8 +7,8 @@ import uuid from tempfile import TemporaryDirectory -from tornado.testing import gen_test from tornado.escape import json_decode, url_escape +from tornado.testing import gen_test from .test_handlers import TestHandlers @@ -16,9 +16,8 @@ class TestEnterpriseGateway(TestHandlers): - def setUp(self): - super(TestEnterpriseGateway, self).setUp() + super().setUp() # Enable debug logging if necessary # app = self.get_app() # app.settings['kernel_manager'].log.level = logging.DEBUG @@ -34,42 +33,39 @@ def test_max_kernels_per_user(self): # Request a kernel for bob bob_response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', - body='{"env": {"KERNEL_USERNAME": "bob"} }' + self.get_url("/api/kernels"), method="POST", body='{"env": {"KERNEL_USERNAME": "bob"} }' ) self.assertEqual(bob_response.code, 201) # Request a kernel for alice alice_response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', - body='{"env": {"KERNEL_USERNAME": "alice"} }' + self.get_url("/api/kernels"), + method="POST", + body='{"env": {"KERNEL_USERNAME": "alice"} }', ) self.assertEqual(alice_response.code, 201) # Request another for alice - 403 expected failed_response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', + self.get_url("/api/kernels"), + method="POST", body='{"env": {"KERNEL_USERNAME": "alice"} }', - raise_error=False + raise_error=False, ) self.assertEqual(failed_response.code, 403) # Shut down the kernel for alice kernel = json_decode(alice_response.body) response = yield self.http_client.fetch( - self.get_url('/api/kernels/' + url_escape(kernel['id'])), - method='DELETE' + self.get_url("/api/kernels/" + url_escape(kernel["id"])), method="DELETE" ) self.assertEqual(response.code, 204) # Try again for alice - expect success alice_response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', - body='{"env": {"KERNEL_USERNAME": "alice"} }' + self.get_url("/api/kernels"), + method="POST", + body='{"env": {"KERNEL_USERNAME": "alice"} }', ) self.assertEqual(alice_response.code, 201) @@ -80,23 +76,23 @@ def test_authorization(self): """ self.get_app() - self.app.authorized_users = {'bob', 'alice', 'bad_guy'} - self.app.unauthorized_users = {'bad_guy'} + self.app.authorized_users = {"bob", "alice", "bad_guy"} + self.app.unauthorized_users = {"bad_guy"} # Request a kernel for alice alice_response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', - body='{"env": {"KERNEL_USERNAME": "alice"} }' + self.get_url("/api/kernels"), + method="POST", + body='{"env": {"KERNEL_USERNAME": "alice"} }', ) self.assertEqual(alice_response.code, 201) # Request a kernel for bad_guy - 403 expected failed_response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', + self.get_url("/api/kernels"), + method="POST", body='{"env": {"KERNEL_USERNAME": "bad_guy"} }', - raise_error=False + raise_error=False, ) self.assertEqual(failed_response.code, 403) @@ -110,46 +106,46 @@ def test_port_range(self): self.app.port_range = "10000..10999" # range too small # Request a kernel for alice - 500 expected alice_response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', + self.get_url("/api/kernels"), + method="POST", body='{"env": {"KERNEL_USERNAME": "alice"} }', - raise_error=False + raise_error=False, ) self.assertEqual(alice_response.code, 500) self.app.port_range = "100..11099" # invalid lower port # Request a kernel for alice - 500 expected alice_response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', + self.get_url("/api/kernels"), + method="POST", body='{"env": {"KERNEL_USERNAME": "alice"} }', - raise_error=False + raise_error=False, ) self.assertEqual(alice_response.code, 500) self.app.port_range = "10000..65537" # invalid upper port # Request a kernel for alice - 500 expected alice_response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', + self.get_url("/api/kernels"), + method="POST", body='{"env": {"KERNEL_USERNAME": "alice"} }', - raise_error=False + raise_error=False, ) self.assertEqual(alice_response.code, 500) self.app.port_range = "30000..31000" # valid range # Request a kernel for alice - 201 expected alice_response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', - body='{"env": {"KERNEL_USERNAME": "alice"} }' + self.get_url("/api/kernels"), + method="POST", + body='{"env": {"KERNEL_USERNAME": "alice"} }', ) self.assertEqual(alice_response.code, 201) # validate ports are in range body = json_decode(alice_response.body) - kernel_id = body['id'] - port_list = app.settings['kernel_manager']._kernels.get(kernel_id).ports + kernel_id = body["id"] + port_list = app.settings["kernel_manager"]._kernels.get(kernel_id).ports for port in port_list: self.assertTrue(30000 <= port <= 31000) @@ -158,15 +154,17 @@ def test_port_range(self): def test_dynamic_updates(self): app = self.app # Get the actual EnterpriseGatewayApp instance s1 = time.time() - name = app.config_file_name + '.py' - with TemporaryDirectory('_1') as td1: - os.environ['JUPYTER_CONFIG_DIR'] = td1 + name = app.config_file_name + ".py" + with TemporaryDirectory("_1") as td1: + os.environ["JUPYTER_CONFIG_DIR"] = td1 config_file = pjoin(td1, name) - with open(config_file, 'w') as f: - f.writelines([ - "c.EnterpriseGatewayApp.impersonation_enabled = False\n", - "c.AsyncMappingKernelManager.cull_connected = False\n" - ]) + with open(config_file, "w") as f: + f.writelines( + [ + "c.EnterpriseGatewayApp.impersonation_enabled = False\n", + "c.AsyncMappingKernelManager.cull_connected = False\n", + ] + ) # app.jupyter_path.append(td1) app.load_config_file() app.add_dynamic_configurable("EnterpriseGatewayApp", app) @@ -184,11 +182,13 @@ def test_dynamic_updates(self): if s2 - s1 < 1.0: time.sleep(1.0 - (s2 - s1)) # update config file - with open(config_file, 'w') as f: - f.writelines([ - "c.EnterpriseGatewayApp.impersonation_enabled = True\n", - "c.AsyncMappingKernelManager.cull_connected = True\n" - ]) + with open(config_file, "w") as f: + f.writelines( + [ + "c.EnterpriseGatewayApp.impersonation_enabled = True\n", + "c.AsyncMappingKernelManager.cull_connected = True\n", + ] + ) # trigger reload and verify updates app.update_dynamic_configurables() @@ -207,11 +207,11 @@ def test_kernel_id_env_var(self): """ expected_kernel_id = str(uuid.uuid4()) kernel_response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', + self.get_url("/api/kernels"), + method="POST", body='{"env": {"KERNEL_ID": "%s"}}' % expected_kernel_id, - raise_error=False + raise_error=False, ) self.assertEqual(kernel_response.code, 201) kernel = json_decode(kernel_response.body) - self.assertEqual(expected_kernel_id, kernel['id']) + self.assertEqual(expected_kernel_id, kernel["id"]) diff --git a/enterprise_gateway/tests/test_gatewayapp.py b/enterprise_gateway/tests/test_gatewayapp.py index 339e5584e..b92c5672d 100644 --- a/enterprise_gateway/tests/test_gatewayapp.py +++ b/enterprise_gateway/tests/test_gatewayapp.py @@ -3,12 +3,14 @@ """Tests for basic gateway app behavior.""" import logging -import unittest import os -from enterprise_gateway.enterprisegatewayapp import EnterpriseGatewayApp +import unittest + from tornado.testing import AsyncHTTPTestCase, ExpectLog -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +from enterprise_gateway.enterprisegatewayapp import EnterpriseGatewayApp + +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") class TestGatewayAppConfig(unittest.TestCase): @@ -28,65 +30,65 @@ def _assert_envs_to_traitlets(self): self.assertEqual(app.port, 1234) self.assertEqual(app.port_retries, 4321) - self.assertEqual(app.ip, '1.1.1.1') - self.assertEqual(app.auth_token, 'fake-token') - self.assertEqual(app.allow_credentials, 'true') - self.assertEqual(app.allow_headers, 'Authorization') - self.assertEqual(app.allow_methods, 'GET') - self.assertEqual(app.allow_origin, '*') - self.assertEqual(app.expose_headers, 'X-Fake-Header') - self.assertEqual(app.max_age, '5') - self.assertEqual(app.base_url, '/fake/path') + self.assertEqual(app.ip, "1.1.1.1") + self.assertEqual(app.auth_token, "fake-token") + self.assertEqual(app.allow_credentials, "true") + self.assertEqual(app.allow_headers, "Authorization") + self.assertEqual(app.allow_methods, "GET") + self.assertEqual(app.allow_origin, "*") + self.assertEqual(app.expose_headers, "X-Fake-Header") + self.assertEqual(app.max_age, "5") + self.assertEqual(app.base_url, "/fake/path") self.assertEqual(app.max_kernels, 1) - self.assertEqual(app.default_kernel_name, 'fake_kernel') - self.assertEqual(app.keyfile, '/test/fake.key') - self.assertEqual(app.certfile, '/test/fake.crt') - self.assertEqual(app.client_ca, '/test/fake_ca.crt') + self.assertEqual(app.default_kernel_name, "fake_kernel") + self.assertEqual(app.keyfile, "/test/fake.key") + self.assertEqual(app.certfile, "/test/fake.crt") + self.assertEqual(app.client_ca, "/test/fake_ca.crt") self.assertEqual(app.ssl_version, 3) def test_config_env_vars_bc(self): """B/C env vars should be honored for traitlets.""" # Environment vars are always strings - os.environ['KG_PORT'] = '1234' - os.environ['KG_PORT_RETRIES'] = '4321' - os.environ['KG_IP'] = '1.1.1.1' - os.environ['KG_AUTH_TOKEN'] = 'fake-token' - os.environ['KG_ALLOW_CREDENTIALS'] = 'true' - os.environ['KG_ALLOW_HEADERS'] = 'Authorization' - os.environ['KG_ALLOW_METHODS'] = 'GET' - os.environ['KG_ALLOW_ORIGIN'] = '*' - os.environ['KG_EXPOSE_HEADERS'] = 'X-Fake-Header' - os.environ['KG_MAX_AGE'] = '5' - os.environ['KG_BASE_URL'] = '/fake/path' - os.environ['KG_MAX_KERNELS'] = '1' - os.environ['KG_DEFAULT_KERNEL_NAME'] = 'fake_kernel' - os.environ['KG_KEYFILE'] = '/test/fake.key' - os.environ['KG_CERTFILE'] = '/test/fake.crt' - os.environ['KG_CLIENT_CA'] = '/test/fake_ca.crt' - os.environ['KG_SSL_VERSION'] = '3' + os.environ["KG_PORT"] = "1234" + os.environ["KG_PORT_RETRIES"] = "4321" + os.environ["KG_IP"] = "1.1.1.1" + os.environ["KG_AUTH_TOKEN"] = "fake-token" + os.environ["KG_ALLOW_CREDENTIALS"] = "true" + os.environ["KG_ALLOW_HEADERS"] = "Authorization" + os.environ["KG_ALLOW_METHODS"] = "GET" + os.environ["KG_ALLOW_ORIGIN"] = "*" + os.environ["KG_EXPOSE_HEADERS"] = "X-Fake-Header" + os.environ["KG_MAX_AGE"] = "5" + os.environ["KG_BASE_URL"] = "/fake/path" + os.environ["KG_MAX_KERNELS"] = "1" + os.environ["KG_DEFAULT_KERNEL_NAME"] = "fake_kernel" + os.environ["KG_KEYFILE"] = "/test/fake.key" + os.environ["KG_CERTFILE"] = "/test/fake.crt" + os.environ["KG_CLIENT_CA"] = "/test/fake_ca.crt" + os.environ["KG_SSL_VERSION"] = "3" self._assert_envs_to_traitlets() def test_config_env_vars(self): """Env vars should be honored for traitlets.""" # Environment vars are always strings - os.environ['EG_PORT'] = '1234' - os.environ['EG_PORT_RETRIES'] = '4321' - os.environ['EG_IP'] = '1.1.1.1' - os.environ['EG_AUTH_TOKEN'] = 'fake-token' - os.environ['EG_ALLOW_CREDENTIALS'] = 'true' - os.environ['EG_ALLOW_HEADERS'] = 'Authorization' - os.environ['EG_ALLOW_METHODS'] = 'GET' - os.environ['EG_ALLOW_ORIGIN'] = '*' - os.environ['EG_EXPOSE_HEADERS'] = 'X-Fake-Header' - os.environ['EG_MAX_AGE'] = '5' - os.environ['EG_BASE_URL'] = '/fake/path' - os.environ['EG_MAX_KERNELS'] = '1' - os.environ['EG_DEFAULT_KERNEL_NAME'] = 'fake_kernel' - os.environ['EG_KEYFILE'] = '/test/fake.key' - os.environ['EG_CERTFILE'] = '/test/fake.crt' - os.environ['EG_CLIENT_CA'] = '/test/fake_ca.crt' - os.environ['EG_SSL_VERSION'] = '3' + os.environ["EG_PORT"] = "1234" + os.environ["EG_PORT_RETRIES"] = "4321" + os.environ["EG_IP"] = "1.1.1.1" + os.environ["EG_AUTH_TOKEN"] = "fake-token" + os.environ["EG_ALLOW_CREDENTIALS"] = "true" + os.environ["EG_ALLOW_HEADERS"] = "Authorization" + os.environ["EG_ALLOW_METHODS"] = "GET" + os.environ["EG_ALLOW_ORIGIN"] = "*" + os.environ["EG_EXPOSE_HEADERS"] = "X-Fake-Header" + os.environ["EG_MAX_AGE"] = "5" + os.environ["EG_BASE_URL"] = "/fake/path" + os.environ["EG_MAX_KERNELS"] = "1" + os.environ["EG_DEFAULT_KERNEL_NAME"] = "fake_kernel" + os.environ["EG_KEYFILE"] = "/test/fake.key" + os.environ["EG_CERTFILE"] = "/test/fake.crt" + os.environ["EG_CLIENT_CA"] = "/test/fake_ca.crt" + os.environ["EG_SSL_VERSION"] = "3" self._assert_envs_to_traitlets() @@ -111,11 +113,11 @@ def tearDown(self): if self.app: self.app.shutdown() - super(TestGatewayAppBase, self).tearDown() + super().tearDown() def get_app(self): """Returns a tornado.web.Application for the Tornado test runner.""" - if hasattr(self, '_app'): + if hasattr(self, "_app"): return self._app self.app = EnterpriseGatewayApp(log_level=logging.CRITICAL) self.setup_app() @@ -131,6 +133,5 @@ def setup_app(self): pass def setup_configurables(self): - """Override to configure further settings, such as the personality. - """ + """Override to configure further settings, such as the personality.""" pass diff --git a/enterprise_gateway/tests/test_handlers.py b/enterprise_gateway/tests/test_handlers.py index 1c09ff153..45cd371e0 100644 --- a/enterprise_gateway/tests/test_handlers.py +++ b/enterprise_gateway/tests/test_handlers.py @@ -2,15 +2,16 @@ # Distributed under the terms of the Modified BSD License. """Tests for jupyter-websocket mode.""" -import os import json +import os -from .test_gatewayapp import TestGatewayAppBase, RESOURCES -from tornado.gen import coroutine, Return -from tornado.websocket import websocket_connect +from tornado.escape import json_decode, json_encode, url_escape +from tornado.gen import Return, coroutine from tornado.httpclient import HTTPRequest from tornado.testing import gen_test -from tornado.escape import json_encode, json_decode, url_escape +from tornado.websocket import websocket_connect + +from .test_gatewayapp import RESOURCES, TestGatewayAppBase class TestHandlers(TestGatewayAppBase): @@ -19,31 +20,30 @@ class TestHandlers(TestGatewayAppBase): """ def setup_app(self): - """Configure JUPYTER_PATH so that we can use local kernelspec files for testing. - """ - os.environ['JUPYTER_PATH'] = RESOURCES + """Configure JUPYTER_PATH so that we can use local kernelspec files for testing.""" + os.environ["JUPYTER_PATH"] = RESOURCES # These are required for setup of test_kernel_defaults - os.environ['EG_ENV_PROCESS_WHITELIST'] = "PROCESS_VAR1,PROCESS_VAR2" - os.environ['PROCESS_VAR1'] = "process_var1_override" + os.environ["EG_ENV_PROCESS_WHITELIST"] = "PROCESS_VAR1,PROCESS_VAR2" + os.environ["PROCESS_VAR1"] = "process_var1_override" - self.app.env_whitelist = ['TEST_VAR', 'OTHER_VAR1', 'OTHER_VAR2'] + self.app.env_whitelist = ["TEST_VAR", "OTHER_VAR1", "OTHER_VAR2"] def tearDown(self): """Shuts down the app after test run.""" # Clean out items added to env - if 'JUPYTER_PATH' in os.environ: - os.environ.pop('JUPYTER_PATH') - if 'EG_ENV_PROCESS_WHITELIST' in os.environ: - os.environ.pop('EG_ENV_PROCESS_WHITELIST') - if 'PROCESS_VAR1' in os.environ: - os.environ.pop('PROCESS_VAR1') + if "JUPYTER_PATH" in os.environ: + os.environ.pop("JUPYTER_PATH") + if "EG_ENV_PROCESS_WHITELIST" in os.environ: + os.environ.pop("EG_ENV_PROCESS_WHITELIST") + if "PROCESS_VAR1" in os.environ: + os.environ.pop("PROCESS_VAR1") - super(TestHandlers, self).tearDown() + super().tearDown() @coroutine - def spawn_kernel(self, kernel_body='{}'): + def spawn_kernel(self, kernel_body="{}"): """Spawns a kernel using the gateway API and connects a websocket client to it. @@ -59,17 +59,14 @@ def spawn_kernel(self, kernel_body='{}'): """ # Request a kernel response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', - body=kernel_body + self.get_url("/api/kernels"), method="POST", body=kernel_body ) self.assertEqual(response.code, 201) # Connect to the kernel via websocket kernel = json_decode(response.body) - ws_url = 'ws://localhost:{}/api/kernels/{}/channels'.format( - self.get_http_port(), - url_escape(kernel['id']) + ws_url = "ws://localhost:{}/api/kernels/{}/channels".format( + self.get_http_port(), url_escape(kernel["id"]) ) ws = yield websocket_connect(ws_url) @@ -89,23 +86,23 @@ def execute_request(self, code): The message """ return { - 'header': { - 'username': '', - 'version': '5.0', - 'session': '', - 'msg_id': 'fake-msg-id', - 'msg_type': 'execute_request' + "header": { + "username": "", + "version": "5.0", + "session": "", + "msg_id": "fake-msg-id", + "msg_type": "execute_request", }, - 'parent_header': {}, - 'channel': 'shell', - 'content': { - 'code': code, - 'silent': False, - 'store_history': False, - 'user_expressions': {} + "parent_header": {}, + "channel": "shell", + "content": { + "code": code, + "silent": False, + "store_history": False, + "user_expressions": {}, }, - 'metadata': {}, - 'buffers': {} + "metadata": {}, + "buffers": {}, } @coroutine @@ -114,50 +111,51 @@ def await_stream(self, ws): while 1: msg = yield ws.read_message() msg = json_decode(msg) - msg_type = msg['msg_type'] - parent_msg_id = msg['parent_header']['msg_id'] - if msg_type == 'stream' and parent_msg_id == 'fake-msg-id': - raise Return(msg['content']) + msg_type = msg["msg_type"] + parent_msg_id = msg["parent_header"]["msg_id"] + if msg_type == "stream" and parent_msg_id == "fake-msg-id": + raise Return(msg["content"]) class TestDefaults(TestHandlers): """Tests gateway behavior.""" + @gen_test def test_startup(self): """Root of kernels resource should be OK.""" - self.app.web_app.settings['eg_list_kernels'] = True - response = yield self.http_client.fetch(self.get_url('/api/kernels')) + self.app.web_app.settings["eg_list_kernels"] = True + response = yield self.http_client.fetch(self.get_url("/api/kernels")) self.assertEqual(response.code, 200) @gen_test def test_headless(self): """Other notebook resources should not exist.""" - response = yield self.http_client.fetch(self.get_url('/api/contents'), raise_error=False) + response = yield self.http_client.fetch(self.get_url("/api/contents"), raise_error=False) self.assertEqual(response.code, 404) - response = yield self.http_client.fetch(self.get_url('/'), raise_error=False) + response = yield self.http_client.fetch(self.get_url("/"), raise_error=False) self.assertEqual(response.code, 404) - response = yield self.http_client.fetch(self.get_url('/tree'), raise_error=False) + response = yield self.http_client.fetch(self.get_url("/tree"), raise_error=False) self.assertEqual(response.code, 404) @gen_test def test_check_origin(self): """Allow origin setting should pass through to base handlers.""" response = yield self.http_client.fetch( - self.get_url('/api/kernelspecs'), - method='GET', - headers={'Origin': 'fake.com:8888'}, - raise_error=False + self.get_url("/api/kernelspecs"), + method="GET", + headers={"Origin": "fake.com:8888"}, + raise_error=False, ) self.assertEqual(response.code, 404) app = self.get_app() - app.settings['allow_origin'] = '*' + app.settings["allow_origin"] = "*" response = yield self.http_client.fetch( - self.get_url('/api/kernelspecs'), - method='GET', - headers={'Origin': 'fake.com:8888'}, - raise_error=False + self.get_url("/api/kernelspecs"), + method="GET", + headers={"Origin": "fake.com:8888"}, + raise_error=False, ) self.assertEqual(response.code, 200) @@ -166,83 +164,75 @@ def test_auth_token(self): """All server endpoints should check the configured auth token.""" # Set token requirement app = self.get_app() - app.settings['eg_auth_token'] = 'fake-token' + app.settings["eg_auth_token"] = "fake-token" # Requst API without the token response = yield self.http_client.fetch( - self.get_url('/api'), - method='GET', - raise_error=False + self.get_url("/api"), method="GET", raise_error=False ) self.assertEqual(response.code, 401) # Now with it response = yield self.http_client.fetch( - self.get_url('/api'), - method='GET', - headers={'Authorization': 'token fake-token'}, - raise_error=False + self.get_url("/api"), + method="GET", + headers={"Authorization": "token fake-token"}, + raise_error=False, ) self.assertEqual(response.code, 200) # Request kernelspecs without the token response = yield self.http_client.fetch( - self.get_url('/api/kernelspecs'), - method='GET', - raise_error=False + self.get_url("/api/kernelspecs"), method="GET", raise_error=False ) self.assertEqual(response.code, 401) # Now with it response = yield self.http_client.fetch( - self.get_url('/api/kernelspecs'), - method='GET', - headers={'Authorization': 'token fake-token'}, - raise_error=False + self.get_url("/api/kernelspecs"), + method="GET", + headers={"Authorization": "token fake-token"}, + raise_error=False, ) self.assertEqual(response.code, 200) # Request a kernel without the token response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', - body='{}', - raise_error=False + self.get_url("/api/kernels"), method="POST", body="{}", raise_error=False ) self.assertEqual(response.code, 401) # Request with the token now response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', - body='{}', - headers={'Authorization': 'token fake-token'}, - raise_error=False + self.get_url("/api/kernels"), + method="POST", + body="{}", + headers={"Authorization": "token fake-token"}, + raise_error=False, ) self.assertEqual(response.code, 201) kernel = json_decode(response.body) # Request kernel info without the token response = yield self.http_client.fetch( - self.get_url('/api/kernels/' + url_escape(kernel['id'])), - method='GET', - raise_error=False + self.get_url("/api/kernels/" + url_escape(kernel["id"])), + method="GET", + raise_error=False, ) self.assertEqual(response.code, 401) # Now with it response = yield self.http_client.fetch( - self.get_url('/api/kernels/' + url_escape(kernel['id'])), - method='GET', - headers={'Authorization': 'token fake-token'}, - raise_error=False + self.get_url("/api/kernels/" + url_escape(kernel["id"])), + method="GET", + headers={"Authorization": "token fake-token"}, + raise_error=False, ) self.assertEqual(response.code, 200) # Request websocket connection without the token - ws_url = 'ws://localhost:{}/api/kernels/{}/channels'.format( - self.get_http_port(), - url_escape(kernel['id']) + ws_url = "ws://localhost:{}/api/kernels/{}/channels".format( + self.get_http_port(), url_escape(kernel["id"]) ) # No option to ignore errors so try/except try: @@ -250,10 +240,10 @@ def test_auth_token(self): except Exception as ex: self.assertEqual(ex.code, 401) else: - self.assertTrue(False, 'no exception raised') + self.assertTrue(False, "no exception raised") # Now request the websocket with the token - ws_req = HTTPRequest(ws_url, headers={'Authorization': 'token fake-token'}) + ws_req = HTTPRequest(ws_url, headers={"Authorization": "token fake-token"}) ws = yield websocket_connect(ws_req) ws.close() @@ -261,117 +251,98 @@ def test_auth_token(self): def test_cors_headers(self): """All kernel endpoints should respond with configured CORS headers.""" app = self.get_app() - app.settings['eg_allow_credentials'] = 'false' - app.settings['eg_allow_headers'] = 'Authorization,Content-Type' - app.settings['eg_allow_methods'] = 'GET,POST' - app.settings['eg_allow_origin'] = 'https://jupyter.org' - app.settings['eg_expose_headers'] = 'X-My-Fake-Header' - app.settings['eg_max_age'] = '600' - app.settings['eg_list_kernels'] = True + app.settings["eg_allow_credentials"] = "false" + app.settings["eg_allow_headers"] = "Authorization,Content-Type" + app.settings["eg_allow_methods"] = "GET,POST" + app.settings["eg_allow_origin"] = "https://jupyter.org" + app.settings["eg_expose_headers"] = "X-My-Fake-Header" + app.settings["eg_max_age"] = "600" + app.settings["eg_list_kernels"] = True # Get kernels to check headers - response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='GET' - ) + response = yield self.http_client.fetch(self.get_url("/api/kernels"), method="GET") self.assertEqual(response.code, 200) - self.assertEqual(response.headers['Access-Control-Allow-Credentials'], 'false') - self.assertEqual(response.headers['Access-Control-Allow-Headers'], 'Authorization,Content-Type') - self.assertEqual(response.headers['Access-Control-Allow-Methods'], 'GET,POST') - self.assertEqual(response.headers['Access-Control-Allow-Origin'], 'https://jupyter.org') - self.assertEqual(response.headers['Access-Control-Expose-Headers'], 'X-My-Fake-Header') - self.assertEqual(response.headers['Access-Control-Max-Age'], '600') - self.assertEqual(response.headers.get('Content-Security-Policy'), None) + self.assertEqual(response.headers["Access-Control-Allow-Credentials"], "false") + self.assertEqual( + response.headers["Access-Control-Allow-Headers"], "Authorization,Content-Type" + ) + self.assertEqual(response.headers["Access-Control-Allow-Methods"], "GET,POST") + self.assertEqual(response.headers["Access-Control-Allow-Origin"], "https://jupyter.org") + self.assertEqual(response.headers["Access-Control-Expose-Headers"], "X-My-Fake-Header") + self.assertEqual(response.headers["Access-Control-Max-Age"], "600") + self.assertEqual(response.headers.get("Content-Security-Policy"), None) @gen_test def test_max_kernels(self): """Number of kernels should be limited.""" app = self.get_app() - app.settings['eg_max_kernels'] = 1 + app.settings["eg_max_kernels"] = 1 # Request a kernel response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', - body='{}' + self.get_url("/api/kernels"), method="POST", body="{}" ) self.assertEqual(response.code, 201) # Request another response2 = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', - body='{}', - raise_error=False + self.get_url("/api/kernels"), method="POST", body="{}", raise_error=False ) self.assertEqual(response2.code, 403) # Shut down the kernel kernel = json_decode(response.body) response = yield self.http_client.fetch( - self.get_url('/api/kernels/' + url_escape(kernel['id'])), - method='DELETE' + self.get_url("/api/kernels/" + url_escape(kernel["id"])), method="DELETE" ) self.assertEqual(response.code, 204) # Try again response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', - body='{}' + self.get_url("/api/kernels"), method="POST", body="{}" ) self.assertEqual(response.code, 201) @gen_test def test_get_api(self): """Server should respond with the API version metadata.""" - response = yield self.http_client.fetch( - self.get_url('/api') - ) + response = yield self.http_client.fetch(self.get_url("/api")) self.assertEqual(response.code, 200) info = json_decode(response.body) - self.assertIn('version', info) - self.assertIn('gateway_version', info) + self.assertIn("version", info) + self.assertIn("gateway_version", info) @gen_test def test_get_kernelspecs(self): """Server should respond with kernel spec metadata.""" - response = yield self.http_client.fetch( - self.get_url('/api/kernelspecs') - ) + response = yield self.http_client.fetch(self.get_url("/api/kernelspecs")) self.assertEqual(response.code, 200) specs = json_decode(response.body) - self.assertIn('kernelspecs', specs) - self.assertIn('default', specs) + self.assertIn("kernelspecs", specs) + self.assertIn("default", specs) @gen_test def test_get_kernels(self): """Server should respond with running kernel information.""" - self.app.web_app.settings['eg_list_kernels'] = True - response = yield self.http_client.fetch( - self.get_url('/api/kernels') - ) + self.app.web_app.settings["eg_list_kernels"] = True + response = yield self.http_client.fetch(self.get_url("/api/kernels")) self.assertEqual(response.code, 200) kernels = json_decode(response.body) self.assertEqual(len(kernels), 0) # Launch a kernel response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', - body='{}' + self.get_url("/api/kernels"), method="POST", body="{}" ) self.assertEqual(response.code, 201) kernel = json_decode(response.body) # Check the list again - response = yield self.http_client.fetch( - self.get_url('/api/kernels') - ) + response = yield self.http_client.fetch(self.get_url("/api/kernels")) self.assertEqual(response.code, 200) kernels = json_decode(response.body) self.assertEqual(len(kernels), 1) - self.assertEqual(kernels[0]['id'], kernel['id']) + self.assertEqual(kernels[0]["id"], kernel["id"]) @gen_test def test_kernel_comm(self): @@ -379,20 +350,24 @@ def test_kernel_comm(self): ws = yield self.spawn_kernel() # Send a request for kernel info - ws.write_message(json_encode({ - 'header': { - 'username': '', - 'version': '5.0', - 'session': '', - 'msg_id': 'fake-msg-id', - 'msg_type': 'kernel_info_request' - }, - 'parent_header': {}, - 'channel': 'shell', - 'content': {}, - 'metadata': {}, - 'buffers': {} - })) + ws.write_message( + json_encode( + { + "header": { + "username": "", + "version": "5.0", + "session": "", + "msg_id": "fake-msg-id", + "msg_type": "kernel_info_request", + }, + "parent_header": {}, + "channel": "shell", + "content": {}, + "metadata": {}, + "buffers": {}, + } + ) + ) # Assert the reply comes back. Test will timeout if this hangs. # Note that this range may be side-effected by upstream changes, @@ -401,70 +376,57 @@ def test_kernel_comm(self): msg = yield ws.read_message() msg = json_decode(msg) print(f"test_kernel_comm, msg_type: {msg['msg_type']}") - if(msg['msg_type'] == 'kernel_info_reply'): + if msg["msg_type"] == "kernel_info_reply": break else: - self.assertTrue(False, 'never received kernel_info_reply') + self.assertTrue(False, "never received kernel_info_reply") ws.close() @gen_test def test_no_discovery(self): """The list of kernels / sessions should be forbidden by default.""" - response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - raise_error=False - ) + response = yield self.http_client.fetch(self.get_url("/api/kernels"), raise_error=False) self.assertEqual(response.code, 403) - response = yield self.http_client.fetch( - self.get_url('/api/sessions'), - raise_error=False - ) + response = yield self.http_client.fetch(self.get_url("/api/sessions"), raise_error=False) self.assertEqual(response.code, 403) @gen_test def test_crud_sessions(self): """Server should create, list, and delete sessions.""" app = self.get_app() - app.settings['eg_list_kernels'] = True + app.settings["eg_list_kernels"] = True # Ensure no sessions by default - response = yield self.http_client.fetch( - self.get_url('/api/sessions') - ) + response = yield self.http_client.fetch(self.get_url("/api/sessions")) self.assertEqual(response.code, 200) sessions = json_decode(response.body) self.assertEqual(len(sessions), 0) # Launch a session response = yield self.http_client.fetch( - self.get_url('/api/sessions'), - method='POST', - body='{"id":"any","notebook":{"path":"anywhere"},"kernel":{"name":"python"}}' + self.get_url("/api/sessions"), + method="POST", + body='{"id":"any","notebook":{"path":"anywhere"},"kernel":{"name":"python"}}', ) self.assertEqual(response.code, 201) session = json_decode(response.body) # Check the list again - response = yield self.http_client.fetch( - self.get_url('/api/sessions') - ) + response = yield self.http_client.fetch(self.get_url("/api/sessions")) self.assertEqual(response.code, 200) sessions = json_decode(response.body) self.assertEqual(len(sessions), 1) - self.assertEqual(sessions[0]['id'], session['id']) + self.assertEqual(sessions[0]["id"], session["id"]) # Delete the session response = yield self.http_client.fetch( - self.get_url('/api/sessions/' + session['id']), - method='DELETE' + self.get_url("/api/sessions/" + session["id"]), method="DELETE" ) self.assertEqual(response.code, 204) # Make sure the list is empty - response = yield self.http_client.fetch( - self.get_url('/api/sessions') - ) + response = yield self.http_client.fetch(self.get_url("/api/sessions")) self.assertEqual(response.code, 200) sessions = json_decode(response.body) self.assertEqual(len(sessions), 0) @@ -473,61 +435,58 @@ def test_crud_sessions(self): def test_json_errors(self): """Handlers should always return JSON errors.""" # A handler that we override - response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - raise_error=False - ) + response = yield self.http_client.fetch(self.get_url("/api/kernels"), raise_error=False) body = json_decode(response.body) self.assertEqual(response.code, 403) - self.assertEqual(body['reason'], 'Forbidden') + self.assertEqual(body["reason"], "Forbidden") # A handler from the notebook base response = yield self.http_client.fetch( - self.get_url('/api/kernels/1-2-3-4-5'), - raise_error=False + self.get_url("/api/kernels/1-2-3-4-5"), raise_error=False ) body = json_decode(response.body) self.assertEqual(response.code, 404) # Base handler json_errors decorator does not capture reason properly # self.assertEqual(body['reason'], 'Not Found') - self.assertIn('1-2-3-4-5', body['message']) + self.assertIn("1-2-3-4-5", body["message"]) # The last resort not found handler - response = yield self.http_client.fetch( - self.get_url('/fake-endpoint'), - raise_error=False - ) + response = yield self.http_client.fetch(self.get_url("/fake-endpoint"), raise_error=False) body = json_decode(response.body) self.assertEqual(response.code, 404) - self.assertEqual(body['reason'], 'Not Found') + self.assertEqual(body["reason"], "Not Found") @gen_test def test_kernel_env(self): """Kernel should start with environment vars defined in the request.""" # Note: Only envs in request prefixed with KERNEL_ or in env_whitelist (TEST_VAR) # with the exception of KERNEL_GATEWAY - which is "system owned". - kernel_body = json.dumps({ - 'name': 'python', - 'env': { - 'KERNEL_FOO': 'kernel-foo-value', - 'NOT_KERNEL': 'ignored', - 'KERNEL_GATEWAY': 'overridden', - 'TEST_VAR': 'allowed' + kernel_body = json.dumps( + { + "name": "python", + "env": { + "KERNEL_FOO": "kernel-foo-value", + "NOT_KERNEL": "ignored", + "KERNEL_GATEWAY": "overridden", + "TEST_VAR": "allowed", + }, } - }) + ) ws = yield self.spawn_kernel(kernel_body) - req = self.execute_request('import os; ' - 'print(os.getenv("KERNEL_FOO"), ' - 'os.getenv("NOT_KERNEL"), ' - 'os.getenv("KERNEL_GATEWAY"), ' - 'os.getenv("TEST_VAR"))') + req = self.execute_request( + "import os; " + 'print(os.getenv("KERNEL_FOO"), ' + 'os.getenv("NOT_KERNEL"), ' + 'os.getenv("KERNEL_GATEWAY"), ' + 'os.getenv("TEST_VAR"))' + ) ws.write_message(json_encode(req)) content = yield self.await_stream(ws) - self.assertEqual(content['name'], 'stdout') - self.assertIn('kernel-foo-value', content['text']) - self.assertNotIn('ignored', content['text']) - self.assertNotIn('overridden', content['text']) - self.assertIn('allowed', content['text']) + self.assertEqual(content["name"], "stdout") + self.assertIn("kernel-foo-value", content["text"]) + self.assertNotIn("ignored", content["text"]) + self.assertNotIn("overridden", content["text"]) + self.assertIn("allowed", content["text"]) ws.close() @@ -540,81 +499,84 @@ def test_kernel_defaults(self): # Since OTHER_VAR1 is not in the request, its existing value in kernel.json will be used. # NOTE: This test requires use of the kernels/kernel_defaults_test/kernel.json file. - kernel_body = json.dumps({ - 'name': 'kernel_defaults_test', - 'env': { - 'KERNEL_VAR1': 'kernel_var1_override', # Ensure this value overrides that in kernel.json - 'KERNEL_VAR3': 'kernel_var3_value', # Any KERNEL_ flows to kernel - 'OTHER_VAR2': 'other_var2_override', # Ensure this value overrides that in kernel.json - 'KERNEL_GATEWAY': 'kernel_gateway_override' # Ensure KERNEL_GATEWAY is not overridden + kernel_body = json.dumps( + { + "name": "kernel_defaults_test", + "env": { + "KERNEL_VAR1": "kernel_var1_override", # Ensure this value overrides that in kernel.json + "KERNEL_VAR3": "kernel_var3_value", # Any KERNEL_ flows to kernel + "OTHER_VAR2": "other_var2_override", # Ensure this value overrides that in kernel.json + "KERNEL_GATEWAY": "kernel_gateway_override", # Ensure KERNEL_GATEWAY is not overridden + }, } - }) + ) ws = yield self.spawn_kernel(kernel_body) - req = self.execute_request('import os; print(os.getenv("KERNEL_VAR1"), os.getenv("KERNEL_VAR2"), ' - 'os.getenv("KERNEL_VAR3"), os.getenv("KERNEL_GATEWAY"), os.getenv("OTHER_VAR1"), ' - 'os.getenv("OTHER_VAR2"), os.getenv("PROCESS_VAR1"), os.getenv("PROCESS_VAR2"))') + req = self.execute_request( + 'import os; print(os.getenv("KERNEL_VAR1"), os.getenv("KERNEL_VAR2"), ' + 'os.getenv("KERNEL_VAR3"), os.getenv("KERNEL_GATEWAY"), os.getenv("OTHER_VAR1"), ' + 'os.getenv("OTHER_VAR2"), os.getenv("PROCESS_VAR1"), os.getenv("PROCESS_VAR2"))' + ) ws.write_message(json_encode(req)) content = yield self.await_stream(ws) - self.assertEqual(content['name'], 'stdout') - self.assertIn('kernel_var1_override', content['text']) - self.assertIn('kernel_var2_default', content['text']) - self.assertIn('kernel_var3_value', content['text']) - self.assertNotIn('kernel_gateway_override', content['text']) - self.assertIn('other_var1_default', content['text']) - self.assertIn('other_var2_override', content['text']) - self.assertIn('process_var1_override', content['text']) - self.assertIn('process_var2_default', content['text']) + self.assertEqual(content["name"], "stdout") + self.assertIn("kernel_var1_override", content["text"]) + self.assertIn("kernel_var2_default", content["text"]) + self.assertIn("kernel_var3_value", content["text"]) + self.assertNotIn("kernel_gateway_override", content["text"]) + self.assertIn("other_var1_default", content["text"]) + self.assertIn("other_var2_override", content["text"]) + self.assertIn("process_var1_override", content["text"]) + self.assertIn("process_var2_default", content["text"]) ws.close() @gen_test def test_get_swagger_yaml_spec(self): """Getting the swagger.yaml spec should be ok""" - response = yield self.http_client.fetch(self.get_url('/api/swagger.yaml')) + response = yield self.http_client.fetch(self.get_url("/api/swagger.yaml")) self.assertEqual(response.code, 200) @gen_test def test_get_swagger_json_spec(self): """Getting the swagger.json spec should be ok""" - response = yield self.http_client.fetch(self.get_url('/api/swagger.json')) + response = yield self.http_client.fetch(self.get_url("/api/swagger.json")) self.assertEqual(response.code, 200) @gen_test def test_kernel_env_auth_token(self): """Kernel should not have EG_AUTH_TOKEN in its environment.""" - os.environ['EG_AUTH_TOKEN'] = 'fake-secret' + os.environ["EG_AUTH_TOKEN"] = "fake-secret" try: ws = yield self.spawn_kernel() req = self.execute_request('import os; print(os.getenv("EG_AUTH_TOKEN"))') ws.write_message(json_encode(req)) content = yield self.await_stream(ws) - self.assertNotIn('fake-secret', content['text']) + self.assertNotIn("fake-secret", content["text"]) finally: - del os.environ['EG_AUTH_TOKEN'] + del os.environ["EG_AUTH_TOKEN"] ws.close() class TestCustomDefaultKernel(TestHandlers): """Tests gateway behavior when setting a custom default kernelspec.""" + def setup_app(self): - self.app.default_kernel_name = 'fake-kernel' + self.app.default_kernel_name = "fake-kernel" @gen_test def test_default_kernel_name(self): """The default kernel name should be used on empty requests.""" # Request without an explicit kernel name response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='POST', - body='', - raise_error=False + self.get_url("/api/kernels"), method="POST", body="", raise_error=False ) self.assertEqual(response.code, 500) - self.assertTrue('raise NoSuchKernel' in str(response.body)) + self.assertTrue("raise NoSuchKernel" in str(response.body)) class TestEnableDiscovery(TestHandlers): """Tests gateway behavior with kernel listing enabled.""" + def setup_configurables(self): """Enables kernel listing for all tests.""" self.app.list_kernels = True @@ -623,22 +585,23 @@ def setup_configurables(self): def test_enable_kernel_list(self): """The list of kernels, sessions, and activities should be available.""" response = yield self.http_client.fetch( - self.get_url('/api/kernels'), + self.get_url("/api/kernels"), ) self.assertEqual(response.code, 200) - self.assertTrue('[]' in str(response.body)) + self.assertTrue("[]" in str(response.body)) response = yield self.http_client.fetch( - self.get_url('/api/sessions'), + self.get_url("/api/sessions"), ) self.assertEqual(response.code, 200) - self.assertTrue('[]' in str(response.body)) + self.assertTrue("[]" in str(response.body)) class TestBaseURL(TestHandlers): """Tests gateway behavior when a custom base URL is configured.""" + def setup_app(self): """Sets the custom base URL and enables kernel listing.""" - self.app.base_url = '/fake/path' + self.app.base_url = "/fake/path" def setup_configurables(self): """Enables kernel listing for all tests.""" @@ -649,35 +612,32 @@ def test_base_url(self): """Server should mount resources under configured base.""" # Should not exist at root response = yield self.http_client.fetch( - self.get_url('/api/kernels'), - method='GET', - raise_error=False + self.get_url("/api/kernels"), method="GET", raise_error=False ) self.assertEqual(response.code, 404) # Should exist under path response = yield self.http_client.fetch( - self.get_url('/fake/path/api/kernels'), - method='GET' + self.get_url("/fake/path/api/kernels"), method="GET" ) self.assertEqual(response.code, 200) class TestRelativeBaseURL(TestHandlers): """Tests gateway behavior when a relative base URL is configured.""" + def setup_app(self): """Sets the custom base URL as a relative path.""" - self.app.base_url = 'fake/path' + self.app.base_url = "fake/path" @gen_test def test_base_url(self): """Server should mount resources under fixed base.""" - self.app.web_app.settings['eg_list_kernels'] = True + self.app.web_app.settings["eg_list_kernels"] = True # Should exist under path response = yield self.http_client.fetch( - self.get_url('/fake/path/api/kernels'), - method='GET' + self.get_url("/fake/path/api/kernels"), method="GET" ) self.assertEqual(response.code, 200) @@ -686,37 +646,40 @@ class TestWildcardEnvs(TestHandlers): """Base class for jupyter-websocket mode tests that spawn kernels.""" def setup_app(self): - """Configure JUPYTER_PATH so that we can use local kernelspec files for testing. - """ + """Configure JUPYTER_PATH so that we can use local kernelspec files for testing.""" super().setup_app() # overwrite env_whitelist - self.app.env_whitelist = ['*'] + self.app.env_whitelist = ["*"] @gen_test def test_kernel_wildcard_env(self): """Kernel should start with environment vars defined in the request.""" # Note: Since env_whitelist == '*', all values should be present. - kernel_body = json.dumps({ - 'name': 'python', - 'env': { - 'KERNEL_FOO': 'kernel-foo-value', - 'OTHER_VAR1': 'other-var1-value', - 'OTHER_VAR2': 'other-var2-value', - 'TEST_VAR': 'test-var-value' + kernel_body = json.dumps( + { + "name": "python", + "env": { + "KERNEL_FOO": "kernel-foo-value", + "OTHER_VAR1": "other-var1-value", + "OTHER_VAR2": "other-var2-value", + "TEST_VAR": "test-var-value", + }, } - }) + ) ws = yield self.spawn_kernel(kernel_body) - req = self.execute_request('import os; ' - 'print(os.getenv("KERNEL_FOO"), ' - 'os.getenv("OTHER_VAR1"), ' - 'os.getenv("OTHER_VAR2"), ' - 'os.getenv("TEST_VAR"))') + req = self.execute_request( + "import os; " + 'print(os.getenv("KERNEL_FOO"), ' + 'os.getenv("OTHER_VAR1"), ' + 'os.getenv("OTHER_VAR2"), ' + 'os.getenv("TEST_VAR"))' + ) ws.write_message(json_encode(req)) content = yield self.await_stream(ws) - self.assertEqual(content['name'], 'stdout') - self.assertIn('kernel-foo-value', content['text']) - self.assertIn('other-var1-value', content['text']) - self.assertIn('other-var2-value', content['text']) - self.assertIn('test-var-value', content['text']) + self.assertEqual(content["name"], "stdout") + self.assertIn("kernel-foo-value", content["text"]) + self.assertIn("other-var1-value", content["text"]) + self.assertIn("other-var2-value", content["text"]) + self.assertIn("test-var-value", content["text"]) ws.close() diff --git a/enterprise_gateway/tests/test_kernelspec_cache.py b/enterprise_gateway/tests/test_kernelspec_cache.py index 28671a08b..46a409c20 100644 --- a/enterprise_gateway/tests/test_kernelspec_cache.py +++ b/enterprise_gateway/tests/test_kernelspec_cache.py @@ -4,13 +4,14 @@ import asyncio import json -import jupyter_core.paths import os -import pytest import shutil import sys +import jupyter_core.paths +import pytest from jupyter_client.kernelspec import KernelSpecManager, NoSuchKernel + from enterprise_gateway.services.kernelspecs import KernelSpecCache @@ -56,42 +57,48 @@ def environ( monkeypatch.setattr(jupyter_core.paths, "SYSTEM_CONFIG_PATH", [str(system_config_path)]) monkeypatch.setattr(jupyter_core.paths, "ENV_CONFIG_PATH", [str(env_config_path)]) + # END - Remove once transition to jupyter_server occurs -kernelspec_json = {'argv': ['cat', '{connection_file}'], 'display_name': 'Test kernel: {kernel_name}', } +kernelspec_json = { + "argv": ["cat", "{connection_file}"], + "display_name": "Test kernel: {kernel_name}", +} def _install_kernelspec(kernels_dir, kernel_name): """install a sample kernel in a kernels directory""" kernelspec_dir = os.path.join(kernels_dir, kernel_name) os.makedirs(kernelspec_dir) - json_file = os.path.join(kernelspec_dir, 'kernel.json') + json_file = os.path.join(kernelspec_dir, "kernel.json") named_json = kernelspec_json.copy() - named_json['display_name'] = named_json['display_name'].format(kernel_name=kernel_name) - with open(json_file, 'w') as f: + named_json["display_name"] = named_json["display_name"].format(kernel_name=kernel_name) + with open(json_file, "w") as f: json.dump(named_json, f) return kernelspec_dir def _modify_kernelspec(kernelspec_dir, kernel_name): - json_file = os.path.join(kernelspec_dir, 'kernel.json') + json_file = os.path.join(kernelspec_dir, "kernel.json") kernel_json = kernelspec_json.copy() - kernel_json['display_name'] = "{kernel_name} modified!".format(kernel_name=kernel_name) - with open(json_file, 'w') as f: + kernel_json["display_name"] = f"{kernel_name} modified!" + with open(json_file, "w") as f: json.dump(kernel_json, f) kernelspec_location = pytest.fixture(lambda data_dir: mkdir(data_dir, "kernels")) -other_kernelspec_location = pytest.fixture(lambda env_jupyter_path: mkdir(env_jupyter_path, "kernels")) +other_kernelspec_location = pytest.fixture( + lambda env_jupyter_path: mkdir(env_jupyter_path, "kernels") +) @pytest.fixture def setup_kernelspecs(environ, kernelspec_location): # Only populate factory info - _install_kernelspec(str(kernelspec_location), 'test1') - _install_kernelspec(str(kernelspec_location), 'test2') - _install_kernelspec(str(kernelspec_location), 'test3') + _install_kernelspec(str(kernelspec_location), "test1") + _install_kernelspec(str(kernelspec_location), "test2") + _install_kernelspec(str(kernelspec_location), "test3") @pytest.fixture @@ -101,7 +108,9 @@ def kernel_spec_manager(environ, setup_kernelspecs): @pytest.fixture def kernel_spec_cache(is_enabled, kernel_spec_manager): - kspec_cache = KernelSpecCache.instance(kernel_spec_manager=kernel_spec_manager, cache_enabled=is_enabled) + kspec_cache = KernelSpecCache.instance( + kernel_spec_manager=kernel_spec_manager, cache_enabled=is_enabled + ) yield kspec_cache kspec_cache = None KernelSpecCache.clear_instance() @@ -118,59 +127,67 @@ async def tests_get_all_specs(kernel_spec_cache): async def tests_get_named_spec(kernel_spec_cache): - kspec = await kernel_spec_cache.get_kernel_spec('test2') - assert kspec.display_name == 'Test kernel: test2' + kspec = await kernel_spec_cache.get_kernel_spec("test2") + assert kspec.display_name == "Test kernel: test2" async def tests_get_modified_spec(kernel_spec_cache): - kspec = await kernel_spec_cache.get_kernel_spec('test2') - assert kspec.display_name == 'Test kernel: test2' + kspec = await kernel_spec_cache.get_kernel_spec("test2") + assert kspec.display_name == "Test kernel: test2" # Modify entry - _modify_kernelspec(kspec.resource_dir, 'test2') - kspec = await kernel_spec_cache.get_kernel_spec('test2') - assert kspec.display_name == 'test2 modified!' + _modify_kernelspec(kspec.resource_dir, "test2") + kspec = await kernel_spec_cache.get_kernel_spec("test2") + assert kspec.display_name == "test2 modified!" async def tests_add_spec(kernel_spec_cache, kernelspec_location, other_kernelspec_location): assert len(kernel_spec_cache.observed_dirs) == (1 if kernel_spec_cache.cache_enabled else 0) - assert str(kernelspec_location) in kernel_spec_cache.observed_dirs \ - if kernel_spec_cache.cache_enabled else True + assert ( + str(kernelspec_location) in kernel_spec_cache.observed_dirs + if kernel_spec_cache.cache_enabled + else True + ) - _install_kernelspec(str(other_kernelspec_location), 'added') - kspec = await kernel_spec_cache.get_kernel_spec('added') + _install_kernelspec(str(other_kernelspec_location), "added") + kspec = await kernel_spec_cache.get_kernel_spec("added") # Ensure new location has been added to observed_dirs assert len(kernel_spec_cache.observed_dirs) == (2 if kernel_spec_cache.cache_enabled else 0) - assert str(other_kernelspec_location) in kernel_spec_cache.observed_dirs \ - if kernel_spec_cache.cache_enabled else True + assert ( + str(other_kernelspec_location) in kernel_spec_cache.observed_dirs + if kernel_spec_cache.cache_enabled + else True + ) - assert kspec.display_name == 'Test kernel: added' + assert kspec.display_name == "Test kernel: added" assert kernel_spec_cache.cache_misses == (1 if kernel_spec_cache.cache_enabled else 0) # Add another to an existing observed directory, no cache miss here - _install_kernelspec(str(kernelspec_location), 'added2') - await asyncio.sleep(0.5) # sleep for a half-second to allow cache to add item (no cache miss in this case) - kspec = await kernel_spec_cache.get_kernel_spec('added2') + _install_kernelspec(str(kernelspec_location), "added2") + await asyncio.sleep( + 0.5 + ) # sleep for a half-second to allow cache to add item (no cache miss in this case) + kspec = await kernel_spec_cache.get_kernel_spec("added2") - assert kspec.display_name == 'Test kernel: added2' + assert kspec.display_name == "Test kernel: added2" assert kernel_spec_cache.cache_misses == (1 if kernel_spec_cache.cache_enabled else 0) async def tests_remove_spec(kernel_spec_cache): - kspec = await kernel_spec_cache.get_kernel_spec('test2') - assert kspec.display_name == 'Test kernel: test2' + kspec = await kernel_spec_cache.get_kernel_spec("test2") + assert kspec.display_name == "Test kernel: test2" assert kernel_spec_cache.cache_misses == 0 shutil.rmtree(kspec.resource_dir) with pytest.raises(NoSuchKernel): - await kernel_spec_cache.get_kernel_spec('test2') + await kernel_spec_cache.get_kernel_spec("test2") assert kernel_spec_cache.cache_misses == (1 if kernel_spec_cache.cache_enabled else 0) async def tests_get_missing(kernel_spec_cache): with pytest.raises(NoSuchKernel): - await kernel_spec_cache.get_kernel_spec('missing') + await kernel_spec_cache.get_kernel_spec("missing") assert kernel_spec_cache.cache_misses == (1 if kernel_spec_cache.cache_enabled else 0) diff --git a/enterprise_gateway/tests/test_mixins.py b/enterprise_gateway/tests/test_mixins.py index 6b73a3f0f..a9bc19fdd 100644 --- a/enterprise_gateway/tests/test_mixins.py +++ b/enterprise_gateway/tests/test_mixins.py @@ -9,14 +9,16 @@ from unittest.mock import Mock except ImportError: # Python 2.7: use backport - from mock import Mock + from unittest.mock import Mock from tornado import web -from enterprise_gateway.mixins import TokenAuthorizationMixin, JSONErrorsMixin + +from enterprise_gateway.mixins import JSONErrorsMixin, TokenAuthorizationMixin -class SuperTokenAuthHandler(object): +class SuperTokenAuthHandler: """Super class for the handler using TokenAuthorizationMixin.""" + is_prepared = False def prepare(self): @@ -26,10 +28,11 @@ def prepare(self): class TestableTokenAuthHandler(TokenAuthorizationMixin, SuperTokenAuthHandler): """Implementation that uses the TokenAuthorizationMixin for testing.""" + __test__ = False - def __init__(self, token=''): - self.settings = {'eg_auth_token': token} + def __init__(self, token=""): + self.settings = {"eg_auth_token": token} self.arguments = {} self.response = None self.status_code = None @@ -37,26 +40,27 @@ def __init__(self, token=''): def send_error(self, status_code): self.status_code = status_code - def get_argument(self, name, default=''): + def get_argument(self, name, default=""): return self.arguments.get(name, default) class TestTokenAuthMixin(unittest.TestCase): """Unit tests the Token authorization mixin.""" + def setUp(self): """Creates a handler that uses the mixin.""" - self.mixin = TestableTokenAuthHandler('YouKnowMe') + self.mixin = TestableTokenAuthHandler("YouKnowMe") def test_no_token_required(self): """No token required - status should be None.""" - self.mixin.settings['eg_auth_token'] = '' + self.mixin.settings["eg_auth_token"] = "" self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, True) self.assertEqual(self.mixin.status_code, None) def test_missing_token(self): """Missing token - tatus should be 'unauthorized'.""" - attrs = {'headers': {}} + attrs = {"headers": {}} self.mixin.request = Mock(**attrs) self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, False) @@ -64,7 +68,7 @@ def test_missing_token(self): def test_valid_header_token(self): """Valid header token - status should be None.""" - attrs = {'headers': {'Authorization': 'token YouKnowMe'}} + attrs = {"headers": {"Authorization": "token YouKnowMe"}} self.mixin.request = Mock(**attrs) self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, True) @@ -72,7 +76,7 @@ def test_valid_header_token(self): def test_wrong_header_token(self): """Wrong header token - status should be 'unauthorized'.""" - attrs = {'headers': {'Authorization': 'token NeverHeardOf'}} + attrs = {"headers": {"Authorization": "token NeverHeardOf"}} self.mixin.request = Mock(**attrs) self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, False) @@ -80,8 +84,8 @@ def test_wrong_header_token(self): def test_valid_url_token(self): """Valid url token - status should be None.""" - self.mixin.arguments['token'] = 'YouKnowMe' - attrs = {'headers': {}} + self.mixin.arguments["token"] = "YouKnowMe" + attrs = {"headers": {}} self.mixin.request = Mock(**attrs) self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, True) @@ -89,8 +93,8 @@ def test_valid_url_token(self): def test_wrong_url_token(self): """Wrong url token - tatus should be 'unauthorized'.""" - self.mixin.arguments['token'] = 'NeverHeardOf' - attrs = {'headers': {}} + self.mixin.arguments["token"] = "NeverHeardOf" + attrs = {"headers": {}} self.mixin.request = Mock(**attrs) self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, False) @@ -98,8 +102,8 @@ def test_wrong_url_token(self): def test_differing_tokens_valid_url(self): """Differing tokens - status should be None, URL token takes precedence""" - self.mixin.arguments['token'] = 'YouKnowMe' - attrs = {'headers': {'Authorization': 'token NeverHeardOf'}} + self.mixin.arguments["token"] = "YouKnowMe" + attrs = {"headers": {"Authorization": "token NeverHeardOf"}} self.mixin.request = Mock(**attrs) self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, True) @@ -107,9 +111,9 @@ def test_differing_tokens_valid_url(self): def test_differing_tokens_wrong_url(self): """Differing token w/ wrong url - status should be 'unauthorized', URL token takes precedence""" - attrs = {'headers': {'Authorization': 'token YouKnowMe'}} + attrs = {"headers": {"Authorization": "token YouKnowMe"}} self.mixin.request = Mock(**attrs) - self.mixin.arguments['token'] = 'NeverHeardOf' + self.mixin.arguments["token"] = "NeverHeardOf" self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, False) self.assertEqual(self.mixin.status_code, 401) @@ -117,6 +121,7 @@ def test_differing_tokens_wrong_url(self): class TestableJSONErrorsHandler(JSONErrorsMixin): """Implementation that uses the JSONErrorsMixin for testing.""" + __test__ = False def __init__(self): @@ -138,6 +143,7 @@ def set_header(self, name, value): class TestJSONErrorsMixin(unittest.TestCase): """Unit tests the JSON errors mixin.""" + def setUp(self): """Creates a handler that uses the mixin.""" self.mixin = TestableJSONErrorsHandler() @@ -147,25 +153,25 @@ def test_status(self): self.mixin.write_error(404) response = json.loads(self.mixin.response) self.assertEqual(self.mixin.status_code, 404) - self.assertEqual(response['reason'], 'Not Found') - self.assertEqual(response['message'], '') + self.assertEqual(response["reason"], "Not Found") + self.assertEqual(response["message"], "") def test_custom_status(self): """Custom reason from exeception should be set in the response.""" - exc = web.HTTPError(500, reason='fake-reason') + exc = web.HTTPError(500, reason="fake-reason") self.mixin.write_error(500, exc_info=[None, exc]) response = json.loads(self.mixin.response) self.assertEqual(self.mixin.status_code, 500) - self.assertEqual(response['reason'], 'fake-reason') - self.assertEqual(response['message'], '') + self.assertEqual(response["reason"], "fake-reason") + self.assertEqual(response["message"], "") def test_log_message(self): """Custom message from exeception should be set in the response.""" - exc = web.HTTPError(410, log_message='fake-message') + exc = web.HTTPError(410, log_message="fake-message") self.mixin.write_error(410, exc_info=[None, exc]) response = json.loads(self.mixin.response) self.assertEqual(self.mixin.status_code, 410) - self.assertEqual(response['reason'], 'Gone') - self.assertEqual(response['message'], 'fake-message') + self.assertEqual(response["reason"], "Gone") + self.assertEqual(response["message"], "fake-message") diff --git a/etc/docker/demo-base/README.md b/etc/docker/demo-base/README.md index d10bdf5a6..e1fd531b9 100644 --- a/etc/docker/demo-base/README.md +++ b/etc/docker/demo-base/README.md @@ -1,17 +1,19 @@ # What this image Gives You -* Ubuntu base image : bionic -* Hadoop 2.7.7 -* Apache Spark 2.4.6 -* Java 1.8 runtime -* Mini-conda latest (python 3.7) with R packages -* Toree 0.4.0-incubating -* `jovyan` service user, with system users `elyra`, `bob`, and `alice`. The jovyan uid is `1000` to match other jupyter - images. -* Password-less ssh for service user -* Users have HDFS folder setup at startup + +- Ubuntu base image : bionic +- Hadoop 2.7.7 +- Apache Spark 2.4.6 +- Java 1.8 runtime +- Mini-conda latest (python 3.7) with R packages +- Toree 0.4.0-incubating +- `jovyan` service user, with system users `elyra`, `bob`, and `alice`. The jovyan uid is `1000` to match other jupyter + images. +- Password-less ssh for service user +- Users have HDFS folder setup at startup # Basic Use + As of the 0.9.0 release of [Jupyter Enterprise Gateway](https://github.com/jupyter-server/enterprise_gateway/releases) -this image can be started as a separate YARN cluster to better demonstrate remote kernel capabilities. See section +this image can be started as a separate YARN cluster to better demonstrate remote kernel capabilities. See section [Dual Mode](https://hub.docker.com/r/elyra/enterprise-gateway/#dual_mode) on the enterprise-gateway page for command usage. diff --git a/etc/docker/docker-compose.yml b/etc/docker/docker-compose.yml index bc938cb4c..6a96148db 100644 --- a/etc/docker/docker-compose.yml +++ b/etc/docker/docker-compose.yml @@ -9,7 +9,6 @@ version: "3.5" # also required mode=host on any published ports. :-( # 3. We only use one replica since session affinity is another point of investigation in Swarm services: - enterprise-gateway: image: elyra/enterprise-gateway:dev user: root diff --git a/etc/docker/enterprise-gateway-demo/README.md b/etc/docker/enterprise-gateway-demo/README.md index ba79922ab..5376b070a 100644 --- a/etc/docker/enterprise-gateway-demo/README.md +++ b/etc/docker/enterprise-gateway-demo/README.md @@ -1,23 +1,24 @@ -Built on [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/), this image adds support for [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) to better demonstrate running Python, R and Scala kernels in YARN-cluster mode. +Built on [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/), this image adds support for [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) to better demonstrate running Python, R and Scala kernels in YARN-cluster mode. # What it Gives You -* [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/) base functionality -* [Jupyter Enterprise Gateway](https://github.com/jupyter-incubator/enterprise_gateway) -* Python/R/Toree kernels that target YARN-cluster mode + +- [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/) base functionality +- [Jupyter Enterprise Gateway](https://github.com/jupyter-incubator/enterprise_gateway) +- Python/R/Toree kernels that target YARN-cluster mode # Basic Use **elyra/enterprise-gateway-demo** can be used as a combined YARN cluster in which the kernels run locally in YARN-cluster mode, or combined with a different instance of itself or an [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/) instance to more easily view that kernels are running remotely. -Prior to using either mode, we recommend you create a local docker network. This better isolates the container(s) and avoids port collisions that might come into play if you're using a gateway-enabled Notebook image on the same host. Here's a simple way to create a docker network... +Prior to using either mode, we recommend you create a local docker network. This better isolates the container(s) and avoids port collisions that might come into play if you're using a gateway-enabled Notebook image on the same host. Here's a simple way to create a docker network... `docker network create -d bridge jeg` -Once created, you just add `--net jeg` to the enterprise gateway run commands. Using `--net jeg` when creating instances of the gateway-enabled Notebook image are not necessary. +Once created, you just add `--net jeg` to the enterprise gateway run commands. Using `--net jeg` when creating instances of the gateway-enabled Notebook image are not necessary. ### Combined Mode -To run the image as a combined YARN/Enterprise Gateway instance, use the following command: +To run the image as a combined YARN/Enterprise Gateway instance, use the following command: `docker run -itd --rm -p 8888:8888 -p 8088:8088 -p 8042:8042 --net=jeg elyra/enterprise-gateway-demo --elyra` @@ -50,7 +51,8 @@ Then, invoke elyra/enterprise-gateway-demo as purely an Enterprise Gateway host **Tip:** YARN logs can be accessed via host system's public IP on port `8042` rather than using container's `hostname:8042`, while YARN Resource manager can be accessed via container's `hostname:8088` port. #### Bring Your Own Kernels -elyra/enterprise-gateway-demo sets up `JUPYTER_PATH` to point to `/tmp/byok`. This enables the ability to use docker volumes to mount your own set of kernelspec files. The kernelspecs must reside in a `kernels` directory. You can mount to the appropriate point in one of two ways via the docker `-v` option: + +elyra/enterprise-gateway-demo sets up `JUPYTER_PATH` to point to `/tmp/byok`. This enables the ability to use docker volumes to mount your own set of kernelspec files. The kernelspecs must reside in a `kernels` directory. You can mount to the appropriate point in one of two ways via the docker `-v` option: `-v :/tmp/byok` @@ -58,24 +60,26 @@ or `-v :/tmp/byok/kernels` -To confirm Enterprise Gateway is detecting the new kernelspecs, monitor the log (`docker logs -f `) and issue a refresh from the gateway-enabled Notebook instance. Each refresh of the notebook's tree view triggers a refresh of the set of kernelspecs in Enterprise Gateway. +To confirm Enterprise Gateway is detecting the new kernelspecs, monitor the log (`docker logs -f `) and issue a refresh from the gateway-enabled Notebook instance. Each refresh of the notebook's tree view triggers a refresh of the set of kernelspecs in Enterprise Gateway. # Connecting a client notebook -You can use any gateway-enabled notebook server to hit the running docker container. + +You can use any gateway-enabled notebook server to hit the running docker container. Note: Given the size of the enterprise-gateway-demo when combined with a YARN/Spark installation, it is recommended that you have at least 4GB of memory allocated for your docker image in order to run kernels (particularly the Toree/Scala kernel). # Recognized Environment Variables -The following environment variables are recognized during startup of the container and can be specified via docker's `-e` option. These will rarely need to be modified. -`KG_IP`: specifies the IP address of enterprise gateway. This should be a public IP. Default = 0.0.0.0 -`KG_PORT`: specifies the port that enterprise gateway is listening on. This port should be mapped to a host port via `-p`. Default = 8888 -`KG_PORT_RETRIES`: specifies the number of retries due to port conflicts that will be attempted. Default = 0 +The following environment variables are recognized during startup of the container and can be specified via docker's `-e` option. These will rarely need to be modified. + +`KG_IP`: specifies the IP address of enterprise gateway. This should be a public IP. Default = 0.0.0.0 +`KG_PORT`: specifies the port that enterprise gateway is listening on. This port should be mapped to a host port via `-p`. Default = 8888 +`KG_PORT_RETRIES`: specifies the number of retries due to port conflicts that will be attempted. Default = 0 -`EG_REMOTE_HOSTS`: specifies a comma-separated lists of hostnames which can be used to run YARN-client kernels. Default = -`EG_YARN_ENDPOINT`: specifies the HTTP endpoint of the YARN Resource Manager. Default = http://:8088/ws/v1/cluster} -`EG_SSH_PORT=`: specifies the port of the SSH server. This container is setup to use port `2122`. This value should not be changed. Default = 2122 +`EG_REMOTE_HOSTS`: specifies a comma-separated lists of hostnames which can be used to run YARN-client kernels. Default = +`EG_YARN_ENDPOINT`: specifies the HTTP endpoint of the YARN Resource Manager. Default = http://:8088/ws/v1/cluster} +`EG_SSH_PORT=`: specifies the port of the SSH server. This container is setup to use port `2122`. This value should not be changed. Default = 2122 -`EG_ENABLE_TUNNELING`: specifies whether port tunneling will be used. This value is currently `False` because ssh tunneling is not working unless Enterprise Gateway is run as the root user. This can be accomplished by starting the container with `bash` as the command and running `start-enterprise-gateway.sh` directly (sans `sudo`). +`EG_ENABLE_TUNNELING`: specifies whether port tunneling will be used. This value is currently `False` because ssh tunneling is not working unless Enterprise Gateway is run as the root user. This can be accomplished by starting the container with `bash` as the command and running `start-enterprise-gateway.sh` directly (sans `sudo`). NOTE: Dual Mode functionality is only available in tags 0.9.0+ diff --git a/etc/docker/enterprise-gateway-demo/bootstrap-enterprise-gateway.sh b/etc/docker/enterprise-gateway-demo/bootstrap-enterprise-gateway.sh index ed6f83004..891f869be 100755 --- a/etc/docker/enterprise-gateway-demo/bootstrap-enterprise-gateway.sh +++ b/etc/docker/enterprise-gateway-demo/bootstrap-enterprise-gateway.sh @@ -39,4 +39,4 @@ then /usr/local/bin/start-enterprise-gateway.sh fi -exit 0 \ No newline at end of file +exit 0 diff --git a/etc/docker/enterprise-gateway-demo/start-enterprise-gateway.sh.template b/etc/docker/enterprise-gateway-demo/start-enterprise-gateway.sh.template index b7126bf21..2fb7d1e3d 100644 --- a/etc/docker/enterprise-gateway-demo/start-enterprise-gateway.sh.template +++ b/etc/docker/enterprise-gateway-demo/start-enterprise-gateway.sh.template @@ -24,5 +24,3 @@ jupyter enterprisegateway \ --MappingKernelManager.cull_idle_timeout=${EG_CULL_IDLE_TIMEOUT} \ --MappingKernelManager.cull_interval=30 \ --MappingKernelManager.cull_connected=${EG_CULL_CONNECTED} 2>&1 | tee /usr/local/share/jupyter/enterprise-gateway.log - - diff --git a/etc/docker/enterprise-gateway/README.md b/etc/docker/enterprise-gateway/README.md index 1f845e114..8db867068 100644 --- a/etc/docker/enterprise-gateway/README.md +++ b/etc/docker/enterprise-gateway/README.md @@ -1,23 +1,28 @@ -This image adds support for [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is currently built on jupyter/minimal-notebook as a base with Apache Spark 2.4.6 installed on top. +This image adds support for [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is currently built on jupyter/minimal-notebook as a base with Apache Spark 2.4.6 installed on top. **Note: If you're looking for the YARN-based image of this name, it has been moved to [elyra/enterprise-gateway-demo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/).** # What it Gives You -* [Jupyter Enterprise Gateway](https://github.com/jupyter-server/enterprise_gateway) -* Python/R/Toree kernels that can be launched and distributed across a managed cluster. + +- [Jupyter Enterprise Gateway](https://github.com/jupyter-server/enterprise_gateway) +- Python/R/Toree kernels that can be launched and distributed across a managed cluster. # Basic Use -Pull this image, along with all of the elyra/kernel-* images to each of your managed nodes. Although manual seeding of images across the cluster is not required, it is highly recommended since kernel startup times can timeout and image downloads can seriously undermine that window. + +Pull this image, along with all of the elyra/kernel-\* images to each of your managed nodes. Although manual seeding of images across the cluster is not required, it is highly recommended since kernel startup times can timeout and image downloads can seriously undermine that window. ## Kubernetes -Download the [enterprise-gateway.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/enterprise-gateway.yaml) file and make any necessary changes for your configuration. We recommend that a persistent volume be used so that the kernelspec files can be accessed outside of the container since we've found those to require post-deployment modifications from time to time. + +Download the [enterprise-gateway.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/kubernetes/enterprise-gateway.yaml) file and make any necessary changes for your configuration. We recommend that a persistent volume be used so that the kernelspec files can be accessed outside of the container since we've found those to require post-deployment modifications from time to time. Deploy Jupyter Enterprise Gateway using `kubectl apply -f enterprise-gateway.yaml` ## Docker Swarm -Download the [`docker-compose.yml`](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/docker/docker-compose.yml) file and make any necessary changes for your configuration. The compose file consists of three pieces, the Enterprise Gateway container itself, a proxy layer container, and a Docker network. We recommend that a volume be used so that the kernelspec files can be accessed outside of the container since we've found those to require post-deployment modifications from time to time. + +Download the [`docker-compose.yml`](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/docker/docker-compose.yml) file and make any necessary changes for your configuration. The compose file consists of three pieces, the Enterprise Gateway container itself, a proxy layer container, and a Docker network. We recommend that a volume be used so that the kernelspec files can be accessed outside of the container since we've found those to require post-deployment modifications from time to time. ## Docker (Traditional) -Same instructions as for Docker Swarm using [`docker-compose.yml`](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/docker/docker-compose.yml). Please note that you can still run Enterprise Gateway as a traditional docker container within a Docker Swarm cluster, yet have the kernel containers launched as Docker Swarm services since how the kernels are launched is a function of their configured process proxy class. + +Same instructions as for Docker Swarm using [`docker-compose.yml`](https://github.com/jupyter-server/enterprise_gateway/blob/master/etc/docker/docker-compose.yml). Please note that you can still run Enterprise Gateway as a traditional docker container within a Docker Swarm cluster, yet have the kernel containers launched as Docker Swarm services since how the kernels are launched is a function of their configured process proxy class. For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). diff --git a/etc/docker/enterprise-gateway/start-enterprise-gateway.sh b/etc/docker/enterprise-gateway/start-enterprise-gateway.sh index e7c65f9c4..cd5950246 100755 --- a/etc/docker/enterprise-gateway/start-enterprise-gateway.sh +++ b/etc/docker/enterprise-gateway/start-enterprise-gateway.sh @@ -40,5 +40,3 @@ exec jupyter enterprisegateway \ --RemoteMappingKernelManager.cull_interval=${EG_CULL_INTERVAL} \ --RemoteMappingKernelManager.cull_connected=${EG_CULL_CONNECTED} \ --RemoteMappingKernelManager.default_kernel_name=${EG_DEFAULT_KERNEL_NAME} - - diff --git a/etc/docker/kernel-image-puller/README.md b/etc/docker/kernel-image-puller/README.md index a08faeafc..49f763d31 100644 --- a/etc/docker/kernel-image-puller/README.md +++ b/etc/docker/kernel-image-puller/README.md @@ -1,20 +1,23 @@ This image is responsible for contacting the configured [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) instance within a Kubernetes or Docker Swarm cluster and pulling the set of kernel-based images to the node on which it is running. # What it Gives You -* The ability to add new nodes and have kernel images on those nodes automatically populated. -* The ability to configure new kernelspecs that use different images and have those images pulled to all cluster nodes. + +- The ability to add new nodes and have kernel images on those nodes automatically populated. +- The ability to configure new kernelspecs that use different images and have those images pulled to all cluster nodes. # Basic Use + Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. -As part of that deployment, Kernel Image Puller (KIP) will be launched on each node. On Kubernetes, this will be accomplished via a DaemonSet. On Docker Swarm, it will be via a global service. KIP will then contact the configured Enterprise Gateway instance, fetch the set of in-use kernelspecs, parse out the image names and pull those images. +As part of that deployment, Kernel Image Puller (KIP) will be launched on each node. On Kubernetes, this will be accomplished via a DaemonSet. On Docker Swarm, it will be via a global service. KIP will then contact the configured Enterprise Gateway instance, fetch the set of in-use kernelspecs, parse out the image names and pull those images. There are a few points of configuration listed below - all of which are environment variables (defaults in parenthesis). -* `KIP_GATEWAY_HOST` (`http://localhost:8888`) -* `KIP_INTERVAL` (`300`) -* `KIP_LOG_LEVEL` (`INFO`) -* `KIP_NUM_PULLERS` (`2`) -* `KIP_NUM_RETRIES` (`3`) -* `KIP_PULL_POLICY` (`IfNotPresent`) + +- `KIP_GATEWAY_HOST` (`http://localhost:8888`) +- `KIP_INTERVAL` (`300`) +- `KIP_LOG_LEVEL` (`INFO`) +- `KIP_NUM_PULLERS` (`2`) +- `KIP_NUM_RETRIES` (`3`) +- `KIP_PULL_POLICY` (`IfNotPresent`) For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). diff --git a/etc/docker/kernel-image-puller/kernel_image_puller.py b/etc/docker/kernel-image-puller/kernel_image_puller.py index 22985ca96..6d3b3b41f 100644 --- a/etc/docker/kernel-image-puller/kernel_image_puller.py +++ b/etc/docker/kernel-image-puller/kernel_image_puller.py @@ -1,16 +1,14 @@ import logging import os import queue -import requests import time +from subprocess import CalledProcessError, run +from threading import Thread +from typing import List, Optional +import requests from docker.client import DockerClient from docker.errors import NotFound -from subprocess import run -from subprocess import CalledProcessError -from threading import Thread -from typing import List -from typing import Optional gateway_host = os.getenv("KIP_GATEWAY_HOST", "http://localhost:8888") num_pullers = int(os.getenv("KIP_NUM_PULLERS", "2")) @@ -33,7 +31,7 @@ CONTAINERD_CLIENT = "containerd" supported_container_runtimes = (DOCKER_CLIENT, CONTAINERD_CLIENT) -logging.basicConfig(format='[%(levelname)1.1s %(asctime)s %(name)s.%(threadName)s] %(message)s') +logging.basicConfig(format="[%(levelname)1.1s %(asctime)s %(name)s.%(threadName)s] %(message)s") def get_kernelspecs(): @@ -58,9 +56,11 @@ def fetch_image_names(): kspecs = None try: kspecs_response = get_kernelspecs() - kspecs = kspecs_response.get('kernelspecs') + kspecs = kspecs_response.get("kernelspecs") except Exception as ex: - logger.error(f"Got exception attempting to retrieve kernelspecs - retrying. Exception was: {ex}") + logger.error( + f"Got exception attempting to retrieve kernelspecs - retrying. Exception was: {ex}" + ) finally: if kspecs is None: return False @@ -68,16 +68,16 @@ def fetch_image_names(): # Locate the configured images within the kernelspecs and add to set for duplicate management images = set() for key in kspecs.keys(): - metadata = kspecs.get(key).get('spec').get('metadata') + metadata = kspecs.get(key).get("spec").get("metadata") if metadata is not None: - process_proxy = metadata.get('process_proxy') + process_proxy = metadata.get("process_proxy") if process_proxy is not None: - config = process_proxy.get('config') + config = process_proxy.get("config") if config is not None: - image_name = config.get('image_name') + image_name = config.get("image_name") if image_name is not None: images.add(image_name) - executor_image_name = config.get('executor_image_name') + executor_image_name = config.get("executor_image_name") if executor_image_name is not None: images.add(executor_image_name) @@ -101,11 +101,15 @@ def pull_image(image_name): if image_name in pulled_images: # Image has been pulled, but make sure it still exists. If it doesn't exist # let this drop through to actual pull - logger.info(f"Image '{image_name}' already pulled and policy is '{policy}'. Checking existence.") + logger.info( + f"Image '{image_name}' already pulled and policy is '{policy}'. Checking existence." + ) if image_exists(image_name): return pulled_images.remove(image_name) - logger.warning(f"Previously pulled image '{image_name}' was not found - attempting pull...") + logger.warning( + f"Previously pulled image '{image_name}' was not found - attempting pull..." + ) logger.info(f"Pulling image '{image_name}'...") if download_image(image_name): @@ -115,11 +119,11 @@ def pull_image(image_name): def get_absolute_image_name(image_name: str) -> str: - """Ensures the image name is prefixed with a "registry". """ + """Ensures the image name is prefixed with a "registry".""" # We will check for the form 'registry/repo/image:tag' if the 'registry/' prefix # is missing (based on the absence of two slashes), then we'll prefix the image # name with the KIP_DEFAULT_CONTAINER_REGISTRY env value. - image_pieces = image_name.split('/') + image_pieces = image_name.split("/") if len(image_pieces) < 3: # we're missing a registry specifier, use env return f"{default_container_registry}/{image_name}" return image_name # take our chances @@ -136,13 +140,15 @@ def image_exists(image_name: str) -> bool: except NotFound: result = False elif container_runtime == CONTAINERD_CLIENT: - argv = ['crictl', '-r', runtime_endpoint, 'inspecti', '-q', absolute_image_name] + argv = ["crictl", "-r", runtime_endpoint, "inspecti", "-q", absolute_image_name] result = execute_cmd(argv) else: # invalid container runtime logger.error(f"Invalid container runtime detected: '{container_runtime}'!") result = False t1 = time.time() - logger.debug(f"Checked existence of image '{image_name}' in {(t1 - t0):.3f} secs. exists = {result}") + logger.debug( + f"Checked existence of image '{image_name}' in {(t1 - t0):.3f} secs. exists = {result}" + ) return result @@ -157,7 +163,7 @@ def download_image(image_name: str) -> bool: except NotFound: result = False elif container_runtime == CONTAINERD_CLIENT: - argv = ['crictl', '-r', runtime_endpoint, 'pull', absolute_image_name] + argv = ["crictl", "-r", runtime_endpoint, "pull", absolute_image_name] result = execute_cmd(argv) else: # invalid container runtime logger.error(f"Invalid container runtime detected: '{container_runtime}'!") @@ -206,10 +212,14 @@ def puller(): except Exception as ex: i += 1 if i < num_retries: - logger.warning(f"Attempt {i} to pull image '{image_name}' encountered exception - retrying. " - f"Exception was: {ex}.") + logger.warning( + f"Attempt {i} to pull image '{image_name}' encountered exception - retrying. " + f"Exception was: {ex}." + ) else: - logger.error(f"Attempt {i} to pull image '{image_name}' failed with exception: {ex}") + logger.error( + f"Attempt {i} to pull image '{image_name}' failed with exception: {ex}" + ) name_queue.task_done() @@ -228,7 +238,7 @@ def get_container_runtime() -> Optional[str]: if __name__ == "__main__": - logger = logging.getLogger('kernel_image_puller') + logger = logging.getLogger("kernel_image_puller") logger.setLevel(log_level) container_runtime = get_container_runtime() @@ -236,8 +246,10 @@ def get_container_runtime() -> Optional[str]: # Determine pull policy. pulled_images = set() if policy not in policies: - logger.warning(f"Invalid pull policy detected in KIP_PULL_POLICY: '{policy}'. " - f"Using policy '{POLICY_IF_NOT_PRESENT}'.") + logger.warning( + f"Invalid pull policy detected in KIP_PULL_POLICY: '{policy}'. " + f"Using policy '{POLICY_IF_NOT_PRESENT}'." + ) policy = POLICY_IF_NOT_PRESENT logger.info("Starting Kernel Image Puller with the following parameters:") @@ -253,8 +265,10 @@ def get_container_runtime() -> Optional[str]: if is_runtime_endpoint_recognized(): logger.info(f"Detected container runtime: {container_runtime}") else: - logger.warning(f"This node's container runtime interface could not be detected from " - f"endpoint: {runtime_endpoint}, proceeding with {container_runtime} client...") + logger.warning( + f"This node's container runtime interface could not be detected from " + f"endpoint: {runtime_endpoint}, proceeding with {container_runtime} client..." + ) # Create an empty queue and start the puller threads. The number of puller threads is configurable. name_queue = queue.Queue() diff --git a/etc/docker/kernel-image-puller/requirements.txt b/etc/docker/kernel-image-puller/requirements.txt index 131b0dfa5..6ec46bb3c 100644 --- a/etc/docker/kernel-image-puller/requirements.txt +++ b/etc/docker/kernel-image-puller/requirements.txt @@ -1,3 +1,3 @@ docker>=3.7.2 -requests>=2.7,<3.0 kubernetes>=17.17.0 +requests>=2.7,<3.0 diff --git a/etc/docker/kernel-py/README.md b/etc/docker/kernel-py/README.md index 42aa2939a..427d46c09 100644 --- a/etc/docker/kernel-py/README.md +++ b/etc/docker/kernel-py/README.md @@ -1,12 +1,14 @@ -This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is built on [jupyter/scipy-notebook](https://hub.docker.com/r/jupyter/scipy-notebook/). +This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is built on [jupyter/scipy-notebook](https://hub.docker.com/r/jupyter/scipy-notebook/). # What it Gives You -* IPython kernel support (with debugger) -* [Data science libraries](https://jupyter-docker-stacks.readthedocs.io/en/latest/using/selecting.html#jupyter-scipy-notebook) + +- IPython kernel support (with debugger) +- [Data science libraries](https://jupyter-docker-stacks.readthedocs.io/en/latest/using/selecting.html#jupyter-scipy-notebook) # Basic Use + Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. -Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. +Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). diff --git a/etc/docker/kernel-r/Dockerfile b/etc/docker/kernel-r/Dockerfile index 520cb9b53..16d558755 100644 --- a/etc/docker/kernel-r/Dockerfile +++ b/etc/docker/kernel-r/Dockerfile @@ -26,4 +26,3 @@ RUN chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \ USER jovyan ENV KERNEL_LANGUAGE R CMD /usr/local/bin/bootstrap-kernel.sh - diff --git a/etc/docker/kernel-r/README.md b/etc/docker/kernel-r/README.md index 2fe3f0bdc..0d485a7fa 100644 --- a/etc/docker/kernel-r/README.md +++ b/etc/docker/kernel-r/README.md @@ -1,11 +1,13 @@ -This image enables the use of an IRKernel kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is currently built on [jupyter/r-notebook](https://hub.docker.com/r/jupyter/r-notebook/). +This image enables the use of an IRKernel kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is currently built on [jupyter/r-notebook](https://hub.docker.com/r/jupyter/r-notebook/). # What it Gives You -* IRKernel kernel support + +- IRKernel kernel support # Basic Use + Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. -Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. +Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). diff --git a/etc/docker/kernel-scala/README.md b/etc/docker/kernel-scala/README.md index 5db5ddde0..825acd13a 100644 --- a/etc/docker/kernel-scala/README.md +++ b/etc/docker/kernel-scala/README.md @@ -1,12 +1,14 @@ -This image enables the use of a Scala ([Apache Toree](https://toree.apache.org/)) kernel launched from [Jupyter Enterprise Gateway](http://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is built on [elyra/spark:v2.4.6](https://hub.docker.com/r/elyra/spark/) deriving from the [Apache Spark 2.4.6 release](https://spark.apache.org/docs/2.4.6/). Note: The ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. +This image enables the use of a Scala ([Apache Toree](https://toree.apache.org/)) kernel launched from [Jupyter Enterprise Gateway](http://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is built on [elyra/spark:v2.4.6](https://hub.docker.com/r/elyra/spark/) deriving from the [Apache Spark 2.4.6 release](https://spark.apache.org/docs/2.4.6/). Note: The ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. # What it Gives You -* Scala (Toree) kernel support -* Spark on kubernetes support from within a Jupyter Notebook + +- Scala (Toree) kernel support +- Spark on kubernetes support from within a Jupyter Notebook # Basic Use + Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. -Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. +Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). diff --git a/etc/docker/kernel-spark-py/README.md b/etc/docker/kernel-spark-py/README.md index 5018b7ead..08becd8e5 100644 --- a/etc/docker/kernel-spark-py/README.md +++ b/etc/docker/kernel-spark-py/README.md @@ -1,11 +1,13 @@ -This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes cluster. It is built on the base image [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py/), and adds [Apache Spark 2.4.6](https://spark.apache.org/docs/2.4.6/). Note: The ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. +This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes cluster. It is built on the base image [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py/), and adds [Apache Spark 2.4.6](https://spark.apache.org/docs/2.4.6/). Note: The ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. # What it Gives You -* IPython kernel support (with debugger) -* [Data science libraries](https://jupyter-docker-stacks.readthedocs.io/en/latest/using/selecting.html#jupyter-scipy-notebook) -* Spark on kubernetes support from within a Jupyter Notebook + +- IPython kernel support (with debugger) +- [Data science libraries](https://jupyter-docker-stacks.readthedocs.io/en/latest/using/selecting.html#jupyter-scipy-notebook) +- Spark on kubernetes support from within a Jupyter Notebook # Basic Use + Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. diff --git a/etc/docker/kernel-spark-r/README.md b/etc/docker/kernel-spark-r/README.md index 909a8a166..6a5f965ee 100644 --- a/etc/docker/kernel-spark-r/README.md +++ b/etc/docker/kernel-spark-r/README.md @@ -1,10 +1,12 @@ -This image enables the use of an IRKernel kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes cluster. It is built on the base image [elyra/kernel-r](https://hub.docker.com/r/elyra/kernel-r/), and adds [Apache Spark 2.4.6](https://spark.apache.org/docs/2.4.6/). Note: The ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. +This image enables the use of an IRKernel kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes cluster. It is built on the base image [elyra/kernel-r](https://hub.docker.com/r/elyra/kernel-r/), and adds [Apache Spark 2.4.6](https://spark.apache.org/docs/2.4.6/). Note: The ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. # What it Gives You -* IRkernel kernel support -* Spark on kubernetes support from within a Jupyter Notebook + +- IRkernel kernel support +- Spark on kubernetes support from within a Jupyter Notebook # Basic Use + Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. diff --git a/etc/docker/kernel-tf-gpu-py/README.md b/etc/docker/kernel-tf-gpu-py/README.md index 84f9d59e9..050759ec0 100644 --- a/etc/docker/kernel-tf-gpu-py/README.md +++ b/etc/docker/kernel-tf-gpu-py/README.md @@ -1,9 +1,11 @@ -This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster that can perform Tensorflow operations. It is currently built on [tensorflow/tensorflow:2.7.0-gpu-jupyter](https://hub.docker.com/r/tensorflow/tensorflow/) deriving from the [tensorflow](https://github.com/tensorflow/tensorflow) project. +This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster that can perform Tensorflow operations. It is currently built on [tensorflow/tensorflow:2.7.0-gpu-jupyter](https://hub.docker.com/r/tensorflow/tensorflow/) deriving from the [tensorflow](https://github.com/tensorflow/tensorflow) project. # What it Gives You -* IPython kernel support supplemented with Tensorflow functionality (and debugger) + +- IPython kernel support supplemented with Tensorflow functionality (and debugger) # Basic Use + Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. diff --git a/etc/docker/kernel-tf-py/README.md b/etc/docker/kernel-tf-py/README.md index 30387defc..ba509c10f 100644 --- a/etc/docker/kernel-tf-py/README.md +++ b/etc/docker/kernel-tf-py/README.md @@ -1,11 +1,13 @@ -This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster that can perform Tensorflow operations. It is currently built on the [jupyter/tensorflow-notebook](https://hub.docker.com/r/jupyter/tensorflow-notebook) image deriving from the [jupyter/tensorflow-notebook](https://github.com/jupyter/docker-stacks/tree/master/tensorflow-notebook) project. +This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster that can perform Tensorflow operations. It is currently built on the [jupyter/tensorflow-notebook](https://hub.docker.com/r/jupyter/tensorflow-notebook) image deriving from the [jupyter/tensorflow-notebook](https://github.com/jupyter/docker-stacks/tree/master/tensorflow-notebook) project. # What it Gives You -* IPython kernel support supplemented with Tensorflow functionality (and debugger) + +- IPython kernel support supplemented with Tensorflow functionality (and debugger) # Basic Use + Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. -For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). +For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). diff --git a/etc/kernel-launchers/R/scripts/server_listener.py b/etc/kernel-launchers/R/scripts/server_listener.py index fe61fa08b..301a245fa 100644 --- a/etc/kernel-launchers/R/scripts/server_listener.py +++ b/etc/kernel-launchers/R/scripts/server_listener.py @@ -1,36 +1,38 @@ import base64 -import os import json -import uuid -import random import logging +import os +import random import socket +import uuid +from threading import Thread -from Cryptodome.Cipher import PKCS1_v1_5, AES +from Cryptodome.Cipher import AES, PKCS1_v1_5 from Cryptodome.PublicKey import RSA from Cryptodome.Random import get_random_bytes from Cryptodome.Util.Padding import pad from jupyter_client.connect import write_connection_file -from threading import Thread LAUNCHER_VERSION = 1 # Indicate to server the version of this launcher (payloads may vary) -max_port_range_retries = int(os.getenv('MAX_PORT_RANGE_RETRIES', os.getenv('EG_MAX_PORT_RANGE_RETRIES', '5'))) +max_port_range_retries = int( + os.getenv("MAX_PORT_RANGE_RETRIES", os.getenv("EG_MAX_PORT_RANGE_RETRIES", "5")) +) -log_level = os.getenv('LOG_LEVEL', os.getenv('EG_LOG_LEVEL', '10')) +log_level = os.getenv("LOG_LEVEL", os.getenv("EG_LOG_LEVEL", "10")) log_level = int(log_level) if log_level.isdigit() else log_level -logging.basicConfig(format='[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s') +logging.basicConfig(format="[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s") -logger = logging.getLogger('server_listener for R launcher') +logger = logging.getLogger("server_listener for R launcher") logger.setLevel(log_level) def _encrypt(connection_info_str, public_key): """Encrypt the connection information using a generated AES key that is then encrypted using - the public key passed from the server. Both are then returned in an encoded JSON payload. + the public key passed from the server. Both are then returned in an encoded JSON payload. - This code also exists in the Python kernel-launcher's launch_ipykernel.py script. + This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ aes_key = get_random_bytes(16) cipher = AES.new(aes_key, mode=AES.MODE_ECB) @@ -45,28 +47,38 @@ def _encrypt(connection_info_str, public_key): encrypted_key = base64.b64encode(cipher.encrypt(aes_key)) # Compose the payload and Base64 encode it - payload = {"version": LAUNCHER_VERSION, "key": encrypted_key.decode(), "conn_info": b64_connection_info.decode()} - b64_payload = base64.b64encode(json.dumps(payload).encode(encoding='utf-8')) + payload = { + "version": LAUNCHER_VERSION, + "key": encrypted_key.decode(), + "conn_info": b64_connection_info.decode(), + } + b64_payload = base64.b64encode(json.dumps(payload).encode(encoding="utf-8")) return b64_payload -def return_connection_info(connection_file, response_addr, lower_port, upper_port, kernel_id, public_key, parent_pid): +def return_connection_info( + connection_file, response_addr, lower_port, upper_port, kernel_id, public_key, parent_pid +): """Returns the connection information corresponding to this kernel. - This code also exists in the Python kernel-launcher's launch_ipykernel.py script. + This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ response_parts = response_addr.split(":") if len(response_parts) != 2: - logger.error("Invalid format for response address '{}'. " - "Assuming 'pull' mode...".format(response_addr)) + logger.error( + "Invalid format for response address '{}'. " + "Assuming 'pull' mode...".format(response_addr) + ) return response_ip = response_parts[0] try: response_port = int(response_parts[1]) except ValueError: - logger.error("Invalid port component found in response address '{}'. " - "Assuming 'pull' mode...".format(response_addr)) + logger.error( + "Invalid port component found in response address '{}'. " + "Assuming 'pull' mode...".format(response_addr) + ) return with open(connection_file) as fp: @@ -74,21 +86,21 @@ def return_connection_info(connection_file, response_addr, lower_port, upper_por fp.close() # add process and process group ids into connection info - cf_json['pid'] = parent_pid - cf_json['pgid'] = os.getpgid(parent_pid) + cf_json["pid"] = parent_pid + cf_json["pgid"] = os.getpgid(parent_pid) # prepare socket address for handling signals comm_sock = prepare_comm_socket(lower_port, upper_port) - cf_json['comm_port'] = comm_sock.getsockname()[1] - cf_json['kernel_id'] = kernel_id + cf_json["comm_port"] = comm_sock.getsockname()[1] + cf_json["kernel_id"] = kernel_id s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((response_ip, response_port)) - json_content = json.dumps(cf_json).encode(encoding='utf-8') - logger.debug("JSON Payload '{}".format(json_content)) + json_content = json.dumps(cf_json).encode(encoding="utf-8") + logger.debug(f"JSON Payload '{json_content}") payload = _encrypt(json_content, public_key) - logger.debug("Encrypted Payload '{}".format(payload)) + logger.debug(f"Encrypted Payload '{payload}") s.send(payload) finally: s.close() @@ -99,10 +111,14 @@ def return_connection_info(connection_file, response_addr, lower_port, upper_por def prepare_comm_socket(lower_port, upper_port): """Prepares the socket to which the server will send signal and shutdown requests. - This code also exists in the Python kernel-launcher's launch_ipykernel.py script. + This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ sock = _select_socket(lower_port, upper_port) - logger.info("Signal socket bound to host: {}, port: {}".format(sock.getsockname()[0], sock.getsockname()[1])) + logger.info( + "Signal socket bound to host: {}, port: {}".format( + sock.getsockname()[0], sock.getsockname()[1] + ) + ) sock.listen(1) sock.settimeout(5) return sock @@ -111,7 +127,7 @@ def prepare_comm_socket(lower_port, upper_port): def _select_ports(count, lower_port, upper_port): """Select and return n random ports that are available and adhere to the given port range, if applicable. - This code also exists in the Python kernel-launcher's launch_ipykernel.py script. + This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ ports = [] sockets = [] @@ -127,28 +143,30 @@ def _select_ports(count, lower_port, upper_port): def _select_socket(lower_port, upper_port): """Create and return a socket whose port is available and adheres to the given port range, if applicable. - This code also exists in the Python kernel-launcher's launch_ipykernel.py script. + This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) found_port = False retries = 0 while not found_port: try: - sock.bind(('0.0.0.0', _get_candidate_port(lower_port, upper_port))) + sock.bind(("0.0.0.0", _get_candidate_port(lower_port, upper_port))) found_port = True except Exception: retries = retries + 1 if retries > max_port_range_retries: raise RuntimeError( - "Failed to locate port within range {}..{} after {} retries!". - format(lower_port, upper_port, max_port_range_retries)) + "Failed to locate port within range {}..{} after {} retries!".format( + lower_port, upper_port, max_port_range_retries + ) + ) return sock def _get_candidate_port(lower_port, upper_port): """Returns a port within the given range. If the range is zero, the zero is returned. - This code also exists in the Python kernel-launcher's launch_ipykernel.py script. + This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ range_size = upper_port - lower_port if range_size == 0: @@ -159,15 +177,15 @@ def _get_candidate_port(lower_port, upper_port): def get_server_request(sock): """Gets a request from the server and returns the corresponding dictionary. - This code also exists in the Python kernel-launcher's launch_ipykernel.py script. + This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ conn = None - data = '' + data = "" request_info = None try: conn, addr = sock.accept() while True: - buffer = conn.recv(1024).decode('utf-8') + buffer = conn.recv(1024).decode("utf-8") if not buffer: # send is complete request_info = json.loads(data) break @@ -184,43 +202,66 @@ def get_server_request(sock): def server_listener(sock, parent_pid): """Waits for requests from the server and processes each when received. Currently, - these will be one of a sending a signal to the corresponding kernel process (signum) or - stopping the listener and exiting the kernel (shutdown). + these will be one of a sending a signal to the corresponding kernel process (signum) or + stopping the listener and exiting the kernel (shutdown). - This code also exists in the Python kernel-launcher's launch_ipykernel.py script. + This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ shutdown = False while not shutdown: request = get_server_request(sock) if request: signum = -1 # prevent logging poll requests since that occurs every 3 seconds - if request.get('signum') is not None: - signum = int(request.get('signum')) + if request.get("signum") is not None: + signum = int(request.get("signum")) os.kill(parent_pid, signum) - if request.get('shutdown') is not None: - shutdown = bool(request.get('shutdown')) + if request.get("shutdown") is not None: + shutdown = bool(request.get("shutdown")) if signum != 0: - logger.info("server_listener got request: {}".format(request)) + logger.info(f"server_listener got request: {request}") -def setup_server_listener(conn_filename, parent_pid, lower_port, upper_port, response_addr, kernel_id, public_key): +def setup_server_listener( + conn_filename, parent_pid, lower_port, upper_port, response_addr, kernel_id, public_key +): ip = "0.0.0.0" key = str(uuid.uuid4()).encode() # convert to bytes ports = _select_ports(5, lower_port, upper_port) - write_connection_file(fname=conn_filename, ip=ip, key=key, shell_port=ports[0], iopub_port=ports[1], - stdin_port=ports[2], hb_port=ports[3], control_port=ports[4]) + write_connection_file( + fname=conn_filename, + ip=ip, + key=key, + shell_port=ports[0], + iopub_port=ports[1], + stdin_port=ports[2], + hb_port=ports[3], + control_port=ports[4], + ) if response_addr: - comm_socket = return_connection_info(conn_filename, response_addr, int(lower_port), int(upper_port), - kernel_id, public_key, int(parent_pid)) + comm_socket = return_connection_info( + conn_filename, + response_addr, + int(lower_port), + int(upper_port), + kernel_id, + public_key, + int(parent_pid), + ) if comm_socket: # socket in use, start server listener thread - server_listener_thread = Thread(target=server_listener, args=(comm_socket, int(parent_pid),)) + server_listener_thread = Thread( + target=server_listener, + args=( + comm_socket, + int(parent_pid), + ), + ) server_listener_thread.start() return __all__ = [ - 'setup_server_listener', + "setup_server_listener", ] diff --git a/etc/kernel-launchers/bootstrap/bootstrap-kernel.sh b/etc/kernel-launchers/bootstrap/bootstrap-kernel.sh index 5228215ff..0d133dcd7 100755 --- a/etc/kernel-launchers/bootstrap/bootstrap-kernel.sh +++ b/etc/kernel-launchers/bootstrap/bootstrap-kernel.sh @@ -94,4 +94,3 @@ else exit 1 fi exit 0 - diff --git a/etc/kernel-launchers/docker/scripts/launch_docker.py b/etc/kernel-launchers/docker/scripts/launch_docker.py index 3b73cc5e7..0665a9526 100644 --- a/etc/kernel-launchers/docker/scripts/launch_docker.py +++ b/etc/kernel-launchers/docker/scripts/launch_docker.py @@ -1,58 +1,63 @@ +import argparse import os import sys -import argparse + +import urllib3 from docker.client import DockerClient from docker.types import EndpointSpec, RestartPolicy -import urllib3 urllib3.disable_warnings() # Set env to False if the container should be left around for debug purposes, etc. -remove_container = bool(os.getenv('REMOVE_CONTAINER', os.getenv('EG_REMOVE_CONTAINER', 'True')).lower() == 'true') -swarm_mode = bool(os.getenv('DOCKER_MODE', os.getenv('EG_DOCKER_MODE', 'swarm')).lower() == 'swarm') +remove_container = bool( + os.getenv("REMOVE_CONTAINER", os.getenv("EG_REMOVE_CONTAINER", "True")).lower() == "true" +) +swarm_mode = bool(os.getenv("DOCKER_MODE", os.getenv("EG_DOCKER_MODE", "swarm")).lower() == "swarm") def launch_docker_kernel(kernel_id, port_range, response_addr, public_key, spark_context_init_mode): # Launches a containerized kernel. # Can't proceed if no image was specified. - image_name = os.environ.get('KERNEL_IMAGE', None) + image_name = os.environ.get("KERNEL_IMAGE", None) if image_name is None: sys.exit("ERROR - KERNEL_IMAGE not found in environment - kernel launch terminating!") # Container name is composed of KERNEL_USERNAME and KERNEL_ID - container_name = os.environ.get('KERNEL_USERNAME', '') + '-' + kernel_id + container_name = os.environ.get("KERNEL_USERNAME", "") + "-" + kernel_id # Determine network. If EG_DOCKER_NETWORK has not been propagated, fall back to 'bridge'... - docker_network = os.environ.get('DOCKER_NETWORK', os.environ.get('EG_DOCKER_NETWORK', 'bridge')) + docker_network = os.environ.get("DOCKER_NETWORK", os.environ.get("EG_DOCKER_NETWORK", "bridge")) # Build labels - these will be modelled similar to kubernetes: kernel_id, component, app, ... labels = dict() - labels['kernel_id'] = kernel_id - labels['component'] = 'kernel' - labels['app'] = 'enterprise-gateway' + labels["kernel_id"] = kernel_id + labels["component"] = "kernel" + labels["app"] = "enterprise-gateway" # Capture env parameters... param_env = dict() - param_env['PORT_RANGE'] = port_range - param_env['PUBLIC_KEY'] = public_key - param_env['RESPONSE_ADDRESS'] = response_addr - param_env['KERNEL_SPARK_CONTEXT_INIT_MODE'] = spark_context_init_mode + param_env["PORT_RANGE"] = port_range + param_env["PUBLIC_KEY"] = public_key + param_env["RESPONSE_ADDRESS"] = response_addr + param_env["KERNEL_SPARK_CONTEXT_INIT_MODE"] = spark_context_init_mode # Since the environment is specific to the kernel (per env stanza of kernelspec, KERNEL_ and ENV_WHITELIST) # just add the env here. param_env.update(os.environ) - param_env.pop('PATH') # Let the image PATH be used. Since this is relative to images, we're probably safe. + param_env.pop( + "PATH" + ) # Let the image PATH be used. Since this is relative to images, we're probably safe. - user = param_env.get('KERNEL_UID') - group = param_env.get('KERNEL_GID') + user = param_env.get("KERNEL_UID") + group = param_env.get("KERNEL_GID") # setup common args kwargs = dict() - kwargs['name'] = container_name - kwargs['hostname'] = container_name - kwargs['user'] = user - kwargs['labels'] = labels + kwargs["name"] = container_name + kwargs["hostname"] = container_name + kwargs["user"] = user + kwargs["labels"] = labels client = DockerClient.from_env() if swarm_mode: @@ -60,71 +65,121 @@ def launch_docker_kernel(kernel_id, port_range, response_addr, public_key, spark networks.append(docker_network) mounts = list() mounts.append("/usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels:ro") - endpoint_spec = EndpointSpec(mode='dnsrr') - restart_policy = RestartPolicy(condition='none') + endpoint_spec = EndpointSpec(mode="dnsrr") + restart_policy = RestartPolicy(condition="none") # finish args setup - kwargs['env'] = param_env - kwargs['endpoint_spec'] = endpoint_spec - kwargs['restart_policy'] = restart_policy - kwargs['container_labels'] = labels - kwargs['networks'] = networks - kwargs['groups'] = [group, '100'] - if param_env.get('KERNEL_WORKING_DIR'): - kwargs['workdir'] = param_env.get('KERNEL_WORKING_DIR') + kwargs["env"] = param_env + kwargs["endpoint_spec"] = endpoint_spec + kwargs["restart_policy"] = restart_policy + kwargs["container_labels"] = labels + kwargs["networks"] = networks + kwargs["groups"] = [group, "100"] + if param_env.get("KERNEL_WORKING_DIR"): + kwargs["workdir"] = param_env.get("KERNEL_WORKING_DIR") # kwargs['mounts'] = mounts # Enable if necessary # print("service args: {}".format(kwargs)) # useful for debug kernel_service = client.services.create(image_name, **kwargs) else: - volumes = {'/usr/local/share/jupyter/kernels': {'bind': '/usr/local/share/jupyter/kernels', 'mode': 'ro'}} + volumes = { + "/usr/local/share/jupyter/kernels": { + "bind": "/usr/local/share/jupyter/kernels", + "mode": "ro", + } + } # finish args setup - kwargs['environment'] = param_env - kwargs['remove'] = remove_container - kwargs['network'] = docker_network - kwargs['group_add'] = [group, '100'] - kwargs['detach'] = True - if param_env.get('KERNEL_WORKING_DIR'): - kwargs['working_dir'] = param_env.get('KERNEL_WORKING_DIR') + kwargs["environment"] = param_env + kwargs["remove"] = remove_container + kwargs["network"] = docker_network + kwargs["group_add"] = [group, "100"] + kwargs["detach"] = True + if param_env.get("KERNEL_WORKING_DIR"): + kwargs["working_dir"] = param_env.get("KERNEL_WORKING_DIR") # kwargs['volumes'] = volumes # Enable if necessary # print("container args: {}".format(kwargs)) # useful for debug kernel_container = client.containers.run(image_name, **kwargs) -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--kernel-id', dest='kernel_id', nargs='?', - help='Indicates the id associated with the launched kernel.') - parser.add_argument('--port-range', dest='port_range', nargs='?', - metavar='..', help='Port range to impose for kernel ports') - parser.add_argument('--response-address', dest='response_address', nargs='?', - metavar=':', help='Connection address (:) for returning connection file') - parser.add_argument('--public-key', dest='public_key', nargs='?', - help='Public key used to encrypt connection information') - parser.add_argument('--spark-context-initialization-mode', dest='spark_context_init_mode', - nargs='?', help='Indicates whether or how a spark context should be created') + parser.add_argument( + "--kernel-id", + dest="kernel_id", + nargs="?", + help="Indicates the id associated with the launched kernel.", + ) + parser.add_argument( + "--port-range", + dest="port_range", + nargs="?", + metavar="..", + help="Port range to impose for kernel ports", + ) + parser.add_argument( + "--response-address", + dest="response_address", + nargs="?", + metavar=":", + help="Connection address (:) for returning connection file", + ) + parser.add_argument( + "--public-key", + dest="public_key", + nargs="?", + help="Public key used to encrypt connection information", + ) + parser.add_argument( + "--spark-context-initialization-mode", + dest="spark_context_init_mode", + nargs="?", + help="Indicates whether or how a spark context should be created", + ) # The following arguments are deprecated and will be used only if their mirroring arguments have no value. # This means that the default value for --spark-context-initialization-mode (none) will need to come from # the mirrored args' default until deprecated item has been removed. - parser.add_argument('--RemoteProcessProxy.kernel-id', dest='rpp_kernel_id', nargs='?', - help='Indicates the id associated with the launched kernel. (deprecated)') - parser.add_argument('--RemoteProcessProxy.port-range', dest='rpp_port_range', nargs='?', - metavar='..', help='Port range to impose for kernel ports (deprecated)') - parser.add_argument('--RemoteProcessProxy.response-address', dest='rpp_response_address', nargs='?', - metavar=':', - help='Connection address (:) for returning connection file (deprecated)') - parser.add_argument('--RemoteProcessProxy.public-key', dest='rpp_public_key', nargs='?', - help='Public key used to encrypt connection information (deprecated)') - parser.add_argument('--RemoteProcessProxy.spark-context-initialization-mode', dest='rpp_spark_context_init_mode', - nargs='?', help='Indicates whether or how a spark context should be created (deprecated)', - default='none') + parser.add_argument( + "--RemoteProcessProxy.kernel-id", + dest="rpp_kernel_id", + nargs="?", + help="Indicates the id associated with the launched kernel. (deprecated)", + ) + parser.add_argument( + "--RemoteProcessProxy.port-range", + dest="rpp_port_range", + nargs="?", + metavar="..", + help="Port range to impose for kernel ports (deprecated)", + ) + parser.add_argument( + "--RemoteProcessProxy.response-address", + dest="rpp_response_address", + nargs="?", + metavar=":", + help="Connection address (:) for returning connection file (deprecated)", + ) + parser.add_argument( + "--RemoteProcessProxy.public-key", + dest="rpp_public_key", + nargs="?", + help="Public key used to encrypt connection information (deprecated)", + ) + parser.add_argument( + "--RemoteProcessProxy.spark-context-initialization-mode", + dest="rpp_spark_context_init_mode", + nargs="?", + help="Indicates whether or how a spark context should be created (deprecated)", + default="none", + ) arguments = vars(parser.parse_args()) - kernel_id = arguments['kernel_id'] or arguments['rpp_kernel_id'] - port_range = arguments['port_range'] or arguments['rpp_port_range'] - response_addr = arguments['response_address'] or arguments['rpp_response_address'] - public_key = arguments['public_key'] or arguments['rpp_public_key'] - spark_context_init_mode = arguments['spark_context_init_mode'] or arguments['rpp_spark_context_init_mode'] + kernel_id = arguments["kernel_id"] or arguments["rpp_kernel_id"] + port_range = arguments["port_range"] or arguments["rpp_port_range"] + response_addr = arguments["response_address"] or arguments["rpp_response_address"] + public_key = arguments["public_key"] or arguments["rpp_public_key"] + spark_context_init_mode = ( + arguments["spark_context_init_mode"] or arguments["rpp_spark_context_init_mode"] + ) launch_docker_kernel(kernel_id, port_range, response_addr, public_key, spark_context_init_mode) diff --git a/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py b/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py index 0bb9f4dc6..b5ca6c985 100644 --- a/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py +++ b/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py @@ -1,16 +1,16 @@ #!/opt/conda/bin/python +import argparse import os import sys + +import urllib3 import yaml -import argparse +from jinja2 import Environment, FileSystemLoader from kubernetes import client, config -import urllib3 - -from jinja2 import FileSystemLoader, Environment urllib3.disable_warnings() -KERNEL_POD_TEMPLATE_PATH = '/kernel-pod.yaml.j2' +KERNEL_POD_TEMPLATE_PATH = "/kernel-pod.yaml.j2" def generate_kernel_pod_yaml(keywords): @@ -19,7 +19,9 @@ def generate_kernel_pod_yaml(keywords): - load jinja2 template from this file directory. - substitute template variables with keywords items. """ - j_env = Environment(loader=FileSystemLoader(os.path.dirname(__file__)), trim_blocks=True, lstrip_blocks=True) + j_env = Environment( + loader=FileSystemLoader(os.path.dirname(__file__)), trim_blocks=True, lstrip_blocks=True + ) # jinja2 template substitutes template variables with None though keywords doesn't # contain corresponding item. Therefore, no need to check if any are left unsubstituted. # Kubernetes API server will validate the pod spec instead. @@ -28,7 +30,9 @@ def generate_kernel_pod_yaml(keywords): return k8s_yaml -def launch_kubernetes_kernel(kernel_id, port_range, response_addr, public_key, spark_context_init_mode): +def launch_kubernetes_kernel( + kernel_id, port_range, response_addr, public_key, spark_context_init_mode +): # Launches a containerized kernel as a kubernetes pod. config.load_incluster_config() @@ -39,17 +43,19 @@ def launch_kubernetes_kernel(kernel_id, port_range, response_addr, public_key, s # Factory values... # Since jupyter lower cases the kernel directory as the kernel-name, we need to capture its case-sensitive # value since this is used to locate the kernel launch script within the image. - keywords['port_range'] = port_range - keywords['public_key'] = public_key - keywords['response_address'] = response_addr - keywords['kernel_id'] = kernel_id - keywords['kernel_name'] = os.path.basename(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - keywords['kernel_spark_context_init_mode'] = spark_context_init_mode + keywords["port_range"] = port_range + keywords["public_key"] = public_key + keywords["response_address"] = response_addr + keywords["kernel_id"] = kernel_id + keywords["kernel_name"] = os.path.basename( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + ) + keywords["kernel_spark_context_init_mode"] = spark_context_init_mode # Walk env variables looking for names prefixed with KERNEL_. When found, set corresponding keyword value # with name in lower case. for name, value in os.environ.items(): - if name.startswith('KERNEL_'): + if name.startswith("KERNEL_"): keywords[name.lower()] = yaml.safe_load(value) # Substitute all template variable (wrapped with {{ }}) and generate `yaml` string. @@ -62,62 +68,120 @@ def launch_kubernetes_kernel(kernel_id, port_range, response_addr, public_key, s # https://github.com/kubernetes-client/python for API signatures. Other examples can be found in # https://github.com/jupyter-incubator/enterprise_gateway/blob/master/enterprise_gateway/services/processproxies/k8s.py # - kernel_namespace = keywords['kernel_namespace'] + kernel_namespace = keywords["kernel_namespace"] k8s_objs = yaml.safe_load_all(k8s_yaml) for k8s_obj in k8s_objs: - if k8s_obj.get('kind'): - if k8s_obj['kind'] == 'Pod': + if k8s_obj.get("kind"): + if k8s_obj["kind"] == "Pod": # print("{}".format(k8s_obj)) # useful for debug - client.CoreV1Api(client.ApiClient()).create_namespaced_pod(body=k8s_obj, namespace=kernel_namespace) - elif k8s_obj['kind'] == 'Secret': - client.CoreV1Api(client.ApiClient()).create_namespaced_secret(body=k8s_obj, namespace=kernel_namespace) - elif k8s_obj['kind'] == 'PersistentVolumeClaim': + client.CoreV1Api(client.ApiClient()).create_namespaced_pod( + body=k8s_obj, namespace=kernel_namespace + ) + elif k8s_obj["kind"] == "Secret": + client.CoreV1Api(client.ApiClient()).create_namespaced_secret( + body=k8s_obj, namespace=kernel_namespace + ) + elif k8s_obj["kind"] == "PersistentVolumeClaim": client.CoreV1Api(client.ApiClient()).create_namespaced_persistent_volume_claim( - body=k8s_obj, namespace=kernel_namespace) - elif k8s_obj['kind'] == 'PersistentVolume': + body=k8s_obj, namespace=kernel_namespace + ) + elif k8s_obj["kind"] == "PersistentVolume": client.CoreV1Api(client.ApiClient()).create_persistent_volume(body=k8s_obj) else: - sys.exit("ERROR - Unhandled Kubernetes object kind '{}' found in yaml file - kernel launch terminating!". - format(k8s_obj['kind'])) + sys.exit( + "ERROR - Unhandled Kubernetes object kind '{}' found in yaml file - kernel launch terminating!".format( + k8s_obj["kind"] + ) + ) else: - sys.exit("ERROR - Unknown Kubernetes object '{}' found in yaml file - kernel launch terminating!". - format(k8s_obj)) + sys.exit( + "ERROR - Unknown Kubernetes object '{}' found in yaml file - kernel launch terminating!".format( + k8s_obj + ) + ) -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--kernel-id', dest='kernel_id', nargs='?', - help='Indicates the id associated with the launched kernel.') - parser.add_argument('--port-range', dest='port_range', nargs='?', - metavar='..', help='Port range to impose for kernel ports') - parser.add_argument('--response-address', dest='response_address', nargs='?', - metavar=':', help='Connection address (:) for returning connection file') - parser.add_argument('--public-key', dest='public_key', nargs='?', - help='Public key used to encrypt connection information') - parser.add_argument('--spark-context-initialization-mode', dest='spark_context_init_mode', - nargs='?', help='Indicates whether or how a spark context should be created') + parser.add_argument( + "--kernel-id", + dest="kernel_id", + nargs="?", + help="Indicates the id associated with the launched kernel.", + ) + parser.add_argument( + "--port-range", + dest="port_range", + nargs="?", + metavar="..", + help="Port range to impose for kernel ports", + ) + parser.add_argument( + "--response-address", + dest="response_address", + nargs="?", + metavar=":", + help="Connection address (:) for returning connection file", + ) + parser.add_argument( + "--public-key", + dest="public_key", + nargs="?", + help="Public key used to encrypt connection information", + ) + parser.add_argument( + "--spark-context-initialization-mode", + dest="spark_context_init_mode", + nargs="?", + help="Indicates whether or how a spark context should be created", + ) # The following arguments are deprecated and will be used only if their mirroring arguments have no value. # This means that the default value for --spark-context-initialization-mode (none) will need to come from # the mirrored args' default until deprecated item has been removed. - parser.add_argument('--RemoteProcessProxy.kernel-id', dest='rpp_kernel_id', nargs='?', - help='Indicates the id associated with the launched kernel. (deprecated)') - parser.add_argument('--RemoteProcessProxy.port-range', dest='rpp_port_range', nargs='?', - metavar='..', help='Port range to impose for kernel ports (deprecated)') - parser.add_argument('--RemoteProcessProxy.response-address', dest='rpp_response_address', nargs='?', - metavar=':', - help='Connection address (:) for returning connection file (deprecated)') - parser.add_argument('--RemoteProcessProxy.public-key', dest='rpp_public_key', nargs='?', - help='Public key used to encrypt connection information (deprecated)') - parser.add_argument('--RemoteProcessProxy.spark-context-initialization-mode', dest='rpp_spark_context_init_mode', - nargs='?', help='Indicates whether or how a spark context should be created (deprecated)', - default='none') + parser.add_argument( + "--RemoteProcessProxy.kernel-id", + dest="rpp_kernel_id", + nargs="?", + help="Indicates the id associated with the launched kernel. (deprecated)", + ) + parser.add_argument( + "--RemoteProcessProxy.port-range", + dest="rpp_port_range", + nargs="?", + metavar="..", + help="Port range to impose for kernel ports (deprecated)", + ) + parser.add_argument( + "--RemoteProcessProxy.response-address", + dest="rpp_response_address", + nargs="?", + metavar=":", + help="Connection address (:) for returning connection file (deprecated)", + ) + parser.add_argument( + "--RemoteProcessProxy.public-key", + dest="rpp_public_key", + nargs="?", + help="Public key used to encrypt connection information (deprecated)", + ) + parser.add_argument( + "--RemoteProcessProxy.spark-context-initialization-mode", + dest="rpp_spark_context_init_mode", + nargs="?", + help="Indicates whether or how a spark context should be created (deprecated)", + default="none", + ) arguments = vars(parser.parse_args()) - kernel_id = arguments['kernel_id'] or arguments['rpp_kernel_id'] - port_range = arguments['port_range'] or arguments['rpp_port_range'] - response_addr = arguments['response_address'] or arguments['rpp_response_address'] - public_key = arguments['public_key'] or arguments['rpp_public_key'] - spark_context_init_mode = arguments['spark_context_init_mode'] or arguments['rpp_spark_context_init_mode'] - - launch_kubernetes_kernel(kernel_id, port_range, response_addr, public_key, spark_context_init_mode) + kernel_id = arguments["kernel_id"] or arguments["rpp_kernel_id"] + port_range = arguments["port_range"] or arguments["rpp_port_range"] + response_addr = arguments["response_address"] or arguments["rpp_response_address"] + public_key = arguments["public_key"] or arguments["rpp_public_key"] + spark_context_init_mode = ( + arguments["spark_context_init_mode"] or arguments["rpp_spark_context_init_mode"] + ) + + launch_kubernetes_kernel( + kernel_id, port_range, response_addr, public_key, spark_context_init_mode + ) diff --git a/etc/kernel-launchers/operators/scripts/launch_custom_resource.py b/etc/kernel-launchers/operators/scripts/launch_custom_resource.py index 761796ddf..ce30ebbfe 100644 --- a/etc/kernel-launchers/operators/scripts/launch_custom_resource.py +++ b/etc/kernel-launchers/operators/scripts/launch_custom_resource.py @@ -1,70 +1,106 @@ #!/opt/conda/bin/python +import argparse import os + +import urllib3 import yaml -import argparse +from jinja2 import Environment, FileSystemLoader from kubernetes import client, config -import urllib3 - -from jinja2 import FileSystemLoader, Environment urllib3.disable_warnings() def generate_kernel_custom_resource_yaml(kernel_crd_template, keywords): - j_env = Environment(loader=FileSystemLoader(os.path.dirname(__file__)), trim_blocks=True, lstrip_blocks=True) - k8s_yaml = j_env.get_template('/' + kernel_crd_template + '.yaml.j2').render(**keywords) + j_env = Environment( + loader=FileSystemLoader(os.path.dirname(__file__)), trim_blocks=True, lstrip_blocks=True + ) + k8s_yaml = j_env.get_template("/" + kernel_crd_template + ".yaml.j2").render(**keywords) return k8s_yaml -def launch_custom_resource_kernel(kernel_id, port_range, response_addr, public_key, spark_context_init_mode): +def launch_custom_resource_kernel( + kernel_id, port_range, response_addr, public_key, spark_context_init_mode +): config.load_incluster_config() keywords = dict() - keywords['eg_port_range'] = port_range - keywords['eg_public_key'] = public_key - keywords['eg_response_address'] = response_addr - keywords['kernel_id'] = kernel_id - keywords['kernel_name'] = os.path.basename(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - keywords['spark_context_initialization_mode'] = spark_context_init_mode + keywords["eg_port_range"] = port_range + keywords["eg_public_key"] = public_key + keywords["eg_response_address"] = response_addr + keywords["kernel_id"] = kernel_id + keywords["kernel_name"] = os.path.basename( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + ) + keywords["spark_context_initialization_mode"] = spark_context_init_mode for name, value in os.environ.items(): - if name.startswith('KERNEL_'): + if name.startswith("KERNEL_"): keywords[name.lower()] = yaml.safe_load(value) - kernel_crd_template = keywords['kernel_crd_group'] + '-' + keywords['kernel_crd_version'] + kernel_crd_template = keywords["kernel_crd_group"] + "-" + keywords["kernel_crd_version"] custom_resource_yaml = generate_kernel_custom_resource_yaml(kernel_crd_template, keywords) - kernel_namespace = keywords['kernel_namespace'] - group = keywords['kernel_crd_group'] - version = keywords['kernel_crd_version'] - plural = keywords['kernel_crd_plural'] + kernel_namespace = keywords["kernel_namespace"] + group = keywords["kernel_crd_group"] + version = keywords["kernel_crd_version"] + plural = keywords["kernel_crd_plural"] custom_resource_object = yaml.safe_load(custom_resource_yaml) - client.CustomObjectsApi().create_namespaced_custom_object(group, version, kernel_namespace, plural, - custom_resource_object) + client.CustomObjectsApi().create_namespaced_custom_object( + group, version, kernel_namespace, plural, custom_resource_object + ) -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--kernel-id', '--RemoteProcessProxy.kernel-id', dest='kernel_id', nargs='?', - help='Indicates the id associated with the launched kernel.') - parser.add_argument('--port-range', '--RemoteProcessProxy.port-range', dest='port_range', nargs='?', - metavar='..', help='Port range to impose for kernel ports') - parser.add_argument('--response-address', '--RemoteProcessProxy.response-address', dest='response_address', nargs='?', - metavar=':', help='Connection address (:) for returning connection file') - parser.add_argument('--public-key', '--RemoteProcessProxy.public-key', dest='public_key', nargs='?', - help='Public key used to encrypt connection information') - parser.add_argument('--spark-context-initialization-mode', '--RemoteProcessProxy.spark-context-initialization-mode', dest='spark_context_init_mode', - nargs='?', help='Indicates whether or how a spark context should be created', - default='none') + parser.add_argument( + "--kernel-id", + "--RemoteProcessProxy.kernel-id", + dest="kernel_id", + nargs="?", + help="Indicates the id associated with the launched kernel.", + ) + parser.add_argument( + "--port-range", + "--RemoteProcessProxy.port-range", + dest="port_range", + nargs="?", + metavar="..", + help="Port range to impose for kernel ports", + ) + parser.add_argument( + "--response-address", + "--RemoteProcessProxy.response-address", + dest="response_address", + nargs="?", + metavar=":", + help="Connection address (:) for returning connection file", + ) + parser.add_argument( + "--public-key", + "--RemoteProcessProxy.public-key", + dest="public_key", + nargs="?", + help="Public key used to encrypt connection information", + ) + parser.add_argument( + "--spark-context-initialization-mode", + "--RemoteProcessProxy.spark-context-initialization-mode", + dest="spark_context_init_mode", + nargs="?", + help="Indicates whether or how a spark context should be created", + default="none", + ) arguments = vars(parser.parse_args()) - kernel_id = arguments['kernel_id'] - port_range = arguments['port_range'] - response_addr = arguments['response_address'] - public_key = arguments['public_key'] - spark_context_init_mode = arguments['spark_context_init_mode'] - - launch_custom_resource_kernel(kernel_id, port_range, response_addr, public_key, spark_context_init_mode) + kernel_id = arguments["kernel_id"] + port_range = arguments["port_range"] + response_addr = arguments["response_address"] + public_key = arguments["public_key"] + spark_context_init_mode = arguments["spark_context_init_mode"] + + launch_custom_resource_kernel( + kernel_id, port_range, response_addr, public_key, spark_context_init_mode + ) diff --git a/etc/kernel-launchers/python/scripts/launch_ipykernel.py b/etc/kernel-launchers/python/scripts/launch_ipykernel.py index 1c02e4570..b37b7bc77 100644 --- a/etc/kernel-launchers/python/scripts/launch_ipykernel.py +++ b/etc/kernel-launchers/python/scripts/launch_ipykernel.py @@ -3,32 +3,36 @@ import json import logging import os +import random import socket import tempfile import uuid -from future.utils import raise_from from multiprocessing import Process -import random from threading import Thread -from Cryptodome.Cipher import PKCS1_v1_5, AES +from Cryptodome.Cipher import AES, PKCS1_v1_5 from Cryptodome.PublicKey import RSA from Cryptodome.Random import get_random_bytes from Cryptodome.Util.Padding import pad +from future.utils import raise_from from jupyter_client.connect import write_connection_file LAUNCHER_VERSION = 1 # Indicate to server the version of this launcher (payloads may vary) # Minimum port range size and max retries, let EG_ env values act as the default for b/c purposes -min_port_range_size = int(os.getenv('MIN_PORT_RANGE_SIZE', os.getenv('EG_MIN_PORT_RANGE_SIZE', '1000'))) -max_port_range_retries = int(os.getenv('MAX_PORT_RANGE_RETRIES', os.getenv('EG_MAX_PORT_RANGE_RETRIES', '5'))) - -log_level = os.getenv('LOG_LEVEL', os.getenv('EG_LOG_LEVEL', '10')) +min_port_range_size = int( + os.getenv("MIN_PORT_RANGE_SIZE", os.getenv("EG_MIN_PORT_RANGE_SIZE", "1000")) +) +max_port_range_retries = int( + os.getenv("MAX_PORT_RANGE_RETRIES", os.getenv("EG_MAX_PORT_RANGE_RETRIES", "5")) +) + +log_level = os.getenv("LOG_LEVEL", os.getenv("EG_LOG_LEVEL", "10")) log_level = int(log_level) if log_level.isdigit() else log_level -logging.basicConfig(format='[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s') +logging.basicConfig(format="[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s") -logger = logging.getLogger('launch_ipykernel') +logger = logging.getLogger("launch_ipykernel") logger.setLevel(log_level) @@ -46,7 +50,7 @@ def run(self): self.exc = exc -def initialize_namespace(namespace, cluster_type='spark'): +def initialize_namespace(namespace, cluster_type="spark"): """Initialize the kernel namespace. Parameters @@ -55,12 +59,14 @@ def initialize_namespace(namespace, cluster_type='spark'): The cluster type to initialize. ``'none'`` results in no variables in the initial namespace. """ - if cluster_type == 'spark': + if cluster_type == "spark": try: from pyspark.sql import SparkSession except ImportError: - logger.info("A spark context was desired but the pyspark distribution is not present. " - "Spark context creation will not occur.") + logger.info( + "A spark context was desired but the pyspark distribution is not present. " + "Spark context creation will not occur." + ) return {} def initialize_spark_session(): @@ -77,39 +83,42 @@ def initialize_spark_session(): # Stop the spark session on exit atexit.register(lambda: spark.stop()) - namespace.update({'spark': spark, - 'sc': spark.sparkContext, - 'sql': spark.sql, - 'sqlContext': spark._wrapped, - 'sqlCtx': spark._wrapped}) + namespace.update( + { + "spark": spark, + "sc": spark.sparkContext, + "sql": spark.sql, + "sqlContext": spark._wrapped, + "sqlCtx": spark._wrapped, + } + ) init_thread = ExceptionThread(target=initialize_spark_session) - spark = WaitingForSparkSessionToBeInitialized('spark', init_thread, namespace) - sc = WaitingForSparkSessionToBeInitialized('sc', init_thread, namespace) - sqlContext = WaitingForSparkSessionToBeInitialized('sqlContext', init_thread, namespace) + spark = WaitingForSparkSessionToBeInitialized("spark", init_thread, namespace) + sc = WaitingForSparkSessionToBeInitialized("sc", init_thread, namespace) + sqlContext = WaitingForSparkSessionToBeInitialized("sqlContext", init_thread, namespace) def sql(query): """Placeholder function. When called will wait for Spark session to be initialized and call ``spark.sql(query)``""" return spark.sql(query) - namespace.update({'spark': spark, - 'sc': sc, - 'sql': sql, - 'sqlContext': sqlContext, - 'sqlCtx': sqlContext}) + namespace.update( + {"spark": spark, "sc": sc, "sql": sql, "sqlContext": sqlContext, "sqlCtx": sqlContext} + ) init_thread.start() - elif cluster_type == 'dask': + elif cluster_type == "dask": import dask_yarn + cluster = dask_yarn.YarnCluster.from_current() - namespace.update({'cluster': cluster}) - elif cluster_type != 'none': + namespace.update({"cluster": cluster}) + elif cluster_type != "none": raise RuntimeError("Unknown cluster_type: %r" % cluster_type) -class WaitingForSparkSessionToBeInitialized(object): +class WaitingForSparkSessionToBeInitialized: """Wrapper object for SparkContext and other Spark session variables while the real Spark session is being initialized in a background thread. The class name is intentionally worded verbosely explicit as it will show up when executing a cell that contains only a Spark session variable like ``sc`` or ``sqlContext``. @@ -117,8 +126,8 @@ class WaitingForSparkSessionToBeInitialized(object): # private and public attributes that show up for tab completion, # to indicate pending initialization of Spark session - _WAITING_FOR_SPARK_SESSION_TO_BE_INITIALIZED = 'Spark Session not yet initialized ...' - WAITING_FOR_SPARK_SESSION_TO_BE_INITIALIZED = 'Spark Session not yet initialized ...' + _WAITING_FOR_SPARK_SESSION_TO_BE_INITIALIZED = "Spark Session not yet initialized ..." + WAITING_FOR_SPARK_SESSION_TO_BE_INITIALIZED = "Spark Session not yet initialized ..." # the same wrapper class is used for all Spark session variables, so we need to record the name of the variable def __init__(self, global_variable_name, init_thread, namespace): @@ -142,7 +151,14 @@ def __getattr__(self, name): self._init_thread.join(timeout=None) exc = self._init_thread.exc if exc: - raise_from(RuntimeError("Variable: {} was not initialized properly.".format(self._spark_session_variable)), exc) + raise_from( + RuntimeError( + "Variable: {} was not initialized properly.".format( + self._spark_session_variable + ) + ), + exc, + ) # now return attribute/function reference from actual Spark object return getattr(self._namespace[self._spark_session_variable], name) @@ -162,11 +178,16 @@ def _validate_port_range(port_range): if port_range_size < min_port_range_size: raise RuntimeError( "Port range validation failed for range: '{}'. Range size must be at least {} as specified by" - " env EG_MIN_PORT_RANGE_SIZE".format(port_range, min_port_range_size)) + " env EG_MIN_PORT_RANGE_SIZE".format(port_range, min_port_range_size) + ) except ValueError as ve: - raise RuntimeError("Port range validation failed for range: '{}'. Error was: {}".format(port_range, ve)) + raise RuntimeError( + f"Port range validation failed for range: '{port_range}'. Error was: {ve}" + ) except IndexError as ie: - raise RuntimeError("Port range validation failed for range: '{}'. Error was: {}".format(port_range, ie)) + raise RuntimeError( + f"Port range validation failed for range: '{port_range}'. Error was: {ie}" + ) return lower_port, upper_port @@ -175,21 +196,21 @@ def determine_connection_file(conn_file, kid): # If the directory exists, use the original file, else create a temporary file. if conn_file is None or not os.path.exists(os.path.dirname(conn_file)): if kid is not None: - basename = 'kernel-' + kid + basename = "kernel-" + kid else: basename = os.path.splitext(os.path.basename(conn_file))[0] fd, conn_file = tempfile.mkstemp(suffix=".json", prefix=basename + "_") os.close(fd) - logger.debug("Using connection file '{}'.".format(conn_file)) + logger.debug(f"Using connection file '{conn_file}'.") return conn_file def _encrypt(connection_info_str, public_key): """Encrypt the connection information using a generated AES key that is then encrypted using - the public key passed from the server. Both are then returned in an encoded JSON payload. + the public key passed from the server. Both are then returned in an encoded JSON payload. - This code also exists in the R kernel-launcher's server_listener.py script. + This code also exists in the R kernel-launcher's server_listener.py script. """ aes_key = get_random_bytes(16) cipher = AES.new(aes_key, mode=AES.MODE_ECB) @@ -204,28 +225,38 @@ def _encrypt(connection_info_str, public_key): encrypted_key = base64.b64encode(cipher.encrypt(aes_key)) # Compose the payload and Base64 encode it - payload = {"version": LAUNCHER_VERSION, "key": encrypted_key.decode(), "conn_info": b64_connection_info.decode()} - b64_payload = base64.b64encode(json.dumps(payload).encode(encoding='utf-8')) + payload = { + "version": LAUNCHER_VERSION, + "key": encrypted_key.decode(), + "conn_info": b64_connection_info.decode(), + } + b64_payload = base64.b64encode(json.dumps(payload).encode(encoding="utf-8")) return b64_payload -def return_connection_info(connection_file, response_addr, lower_port, upper_port, kernel_id, public_key): +def return_connection_info( + connection_file, response_addr, lower_port, upper_port, kernel_id, public_key +): """Returns the connection information corresponding to this kernel. - This code also exists in the R kernel-launcher's server_listener.py script. + This code also exists in the R kernel-launcher's server_listener.py script. """ response_parts = response_addr.split(":") if len(response_parts) != 2: - logger.error("Invalid format for response address '{}'. " - "Assuming 'pull' mode...".format(response_addr)) + logger.error( + "Invalid format for response address '{}'. " + "Assuming 'pull' mode...".format(response_addr) + ) return response_ip = response_parts[0] try: response_port = int(response_parts[1]) except ValueError: - logger.error("Invalid port component found in response address '{}'. " - "Assuming 'pull' mode...".format(response_addr)) + logger.error( + "Invalid port component found in response address '{}'. " + "Assuming 'pull' mode...".format(response_addr) + ) return with open(connection_file) as fp: @@ -234,21 +265,21 @@ def return_connection_info(connection_file, response_addr, lower_port, upper_por # add process and process group ids into connection info pid = os.getpid() - cf_json['pid'] = pid - cf_json['pgid'] = os.getpgid(pid) + cf_json["pid"] = pid + cf_json["pgid"] = os.getpgid(pid) # prepare socket address for handling signals comm_sock = prepare_comm_socket(lower_port, upper_port) - cf_json['comm_port'] = comm_sock.getsockname()[1] - cf_json['kernel_id'] = kernel_id + cf_json["comm_port"] = comm_sock.getsockname()[1] + cf_json["kernel_id"] = kernel_id s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((response_ip, response_port)) - json_content = json.dumps(cf_json).encode(encoding='utf-8') - logger.debug("JSON Payload '{}".format(json_content)) + json_content = json.dumps(cf_json).encode(encoding="utf-8") + logger.debug(f"JSON Payload '{json_content}") payload = _encrypt(json_content, public_key) - logger.debug("Encrypted Payload '{}".format(payload)) + logger.debug(f"Encrypted Payload '{payload}") s.send(payload) finally: s.close() @@ -259,10 +290,14 @@ def return_connection_info(connection_file, response_addr, lower_port, upper_por def prepare_comm_socket(lower_port, upper_port): """Prepares the socket to which the server will send signal and shutdown requests. - This code also exists in the R kernel-launcher's server_listener.py script. + This code also exists in the R kernel-launcher's server_listener.py script. """ sock = _select_socket(lower_port, upper_port) - logger.info("Signal socket bound to host: {}, port: {}".format(sock.getsockname()[0], sock.getsockname()[1])) + logger.info( + "Signal socket bound to host: {}, port: {}".format( + sock.getsockname()[0], sock.getsockname()[1] + ) + ) sock.listen(1) sock.settimeout(5) return sock @@ -271,7 +306,7 @@ def prepare_comm_socket(lower_port, upper_port): def _select_ports(count, lower_port, upper_port): """Select and return n random ports that are available and adhere to the given port range, if applicable. - This code also exists in the R kernel-launcher's server_listener.py script. + This code also exists in the R kernel-launcher's server_listener.py script. """ ports = [] sockets = [] @@ -287,28 +322,30 @@ def _select_ports(count, lower_port, upper_port): def _select_socket(lower_port, upper_port): """Create and return a socket whose port is available and adheres to the given port range, if applicable. - This code also exists in the R kernel-launcher's server_listener.py script. + This code also exists in the R kernel-launcher's server_listener.py script. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) found_port = False retries = 0 while not found_port: try: - sock.bind(('0.0.0.0', _get_candidate_port(lower_port, upper_port))) + sock.bind(("0.0.0.0", _get_candidate_port(lower_port, upper_port))) found_port = True except Exception: retries = retries + 1 if retries > max_port_range_retries: raise RuntimeError( - "Failed to locate port within range {}..{} after {} retries!". - format(lower_port, upper_port, max_port_range_retries)) + "Failed to locate port within range {}..{} after {} retries!".format( + lower_port, upper_port, max_port_range_retries + ) + ) return sock def _get_candidate_port(lower_port, upper_port): """Returns a port within the given range. If the range is zero, the zero is returned. - This code also exists in the R kernel-launcher's server_listener.py script. + This code also exists in the R kernel-launcher's server_listener.py script. """ range_size = upper_port - lower_port if range_size == 0: @@ -319,15 +356,15 @@ def _get_candidate_port(lower_port, upper_port): def get_server_request(sock): """Gets a request from the server and returns the corresponding dictionary. - This code also exists in the R kernel-launcher's server_listener.py script. + This code also exists in the R kernel-launcher's server_listener.py script. """ conn = None - data = '' + data = "" request_info = None try: conn, addr = sock.accept() while True: - buffer = conn.recv(1024).decode('utf-8') + buffer = conn.recv(1024).decode("utf-8") if not buffer: # send is complete request_info = json.loads(data) break @@ -344,23 +381,23 @@ def get_server_request(sock): def server_listener(sock, parent_pid): """Waits for requests from the server and processes each when received. Currently, - these will be one of a sending a signal to the corresponding kernel process (signum) or - stopping the listener and exiting the kernel (shutdown). + these will be one of a sending a signal to the corresponding kernel process (signum) or + stopping the listener and exiting the kernel (shutdown). - This code also exists in the R kernel-launcher's server_listener.py script. + This code also exists in the R kernel-launcher's server_listener.py script. """ shutdown = False while not shutdown: request = get_server_request(sock) if request: signum = -1 # prevent logging poll requests since that occurs every 3 seconds - if request.get('signum') is not None: - signum = int(request.get('signum')) + if request.get("signum") is not None: + signum = int(request.get("signum")) os.kill(parent_pid, signum) - if request.get('shutdown') is not None: - shutdown = bool(request.get('shutdown')) + if request.get("shutdown") is not None: + shutdown = bool(request.get("shutdown")) if signum != 0: - logger.info("server_listener got request: {}".format(request)) + logger.info(f"server_listener got request: {request}") def start_ipython(namespace, cluster_type="spark", **kwargs): @@ -369,7 +406,7 @@ def start_ipython(namespace, cluster_type="spark", **kwargs): # create an initial list of variables to clear # we do this without deleting to preserve the locals so that # initialize_namespace isn't affected by this mutation - to_delete = [k for k in namespace if not k.startswith('__')] + to_delete = [k for k in namespace if not k.startswith("__")] # initialize the namespace with the proper variables initialize_namespace(namespace, cluster_type=cluster_type) @@ -385,49 +422,107 @@ def start_ipython(namespace, cluster_type="spark", **kwargs): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--response-address', dest='response_address', nargs='?', metavar=':', - help='Connection address (:) for returning connection file') - parser.add_argument('--kernel-id', dest='kernel_id', nargs='?', - help='Indicates the id associated with the launched kernel.') - parser.add_argument('--public-key', dest='public_key', nargs='?', - help='Public key used to encrypt connection information') - parser.add_argument('--port-range', dest='port_range', nargs='?', metavar='..', - help='Port range to impose for kernel ports') - parser.add_argument('--spark-context-initialization-mode', dest='init_mode', nargs='?', - help='the initialization mode of the spark context: lazy, eager or none') - parser.add_argument('--cluster-type', dest='cluster_type', nargs='?', - help='the kind of cluster to initialize: spark, dask, or none') + parser.add_argument( + "--response-address", + dest="response_address", + nargs="?", + metavar=":", + help="Connection address (:) for returning connection file", + ) + parser.add_argument( + "--kernel-id", + dest="kernel_id", + nargs="?", + help="Indicates the id associated with the launched kernel.", + ) + parser.add_argument( + "--public-key", + dest="public_key", + nargs="?", + help="Public key used to encrypt connection information", + ) + parser.add_argument( + "--port-range", + dest="port_range", + nargs="?", + metavar="..", + help="Port range to impose for kernel ports", + ) + parser.add_argument( + "--spark-context-initialization-mode", + dest="init_mode", + nargs="?", + help="the initialization mode of the spark context: lazy, eager or none", + ) + parser.add_argument( + "--cluster-type", + dest="cluster_type", + nargs="?", + help="the kind of cluster to initialize: spark, dask, or none", + ) # The following arguments are deprecated and will be used only if their mirroring arguments have no value. # This means that the default values for --spark-context-initialization-mode (none) and --cluster-type (spark) # will need to come from the mirrored args' default until deprecated items have been removed. - parser.add_argument('connection_file', nargs='?', help='Connection file to write connection info (deprecated)') - parser.add_argument('--RemoteProcessProxy.response-address', dest='rpp_response_address', nargs='?', - metavar=':', - help='Connection address (:) for returning connection file (deprecated)') - parser.add_argument('--RemoteProcessProxy.kernel-id', dest='rpp_kernel_id', nargs='?', - help='Indicates the id associated with the launched kernel. (deprecated)') - parser.add_argument('--RemoteProcessProxy.public-key', dest='rpp_public_key', nargs='?', - help='Public key used to encrypt connection information (deprecated)') - parser.add_argument('--RemoteProcessProxy.port-range', dest='rpp_port_range', nargs='?', - metavar='..', help='Port range to impose for kernel ports (deprecated)') - parser.add_argument('--RemoteProcessProxy.spark-context-initialization-mode', dest='rpp_init_mode', nargs='?', - default='none', - help='the initialization mode of the spark context: lazy, eager or none (deprecated)') - parser.add_argument('--RemoteProcessProxy.cluster-type', dest='rpp_cluster_type', nargs='?', - default='spark', help='the kind of cluster to initialize: spark, dask, or none (deprecated)') + parser.add_argument( + "connection_file", nargs="?", help="Connection file to write connection info (deprecated)" + ) + parser.add_argument( + "--RemoteProcessProxy.response-address", + dest="rpp_response_address", + nargs="?", + metavar=":", + help="Connection address (:) for returning connection file (deprecated)", + ) + parser.add_argument( + "--RemoteProcessProxy.kernel-id", + dest="rpp_kernel_id", + nargs="?", + help="Indicates the id associated with the launched kernel. (deprecated)", + ) + parser.add_argument( + "--RemoteProcessProxy.public-key", + dest="rpp_public_key", + nargs="?", + help="Public key used to encrypt connection information (deprecated)", + ) + parser.add_argument( + "--RemoteProcessProxy.port-range", + dest="rpp_port_range", + nargs="?", + metavar="..", + help="Port range to impose for kernel ports (deprecated)", + ) + parser.add_argument( + "--RemoteProcessProxy.spark-context-initialization-mode", + dest="rpp_init_mode", + nargs="?", + default="none", + help="the initialization mode of the spark context: lazy, eager or none (deprecated)", + ) + parser.add_argument( + "--RemoteProcessProxy.cluster-type", + dest="rpp_cluster_type", + nargs="?", + default="spark", + help="the kind of cluster to initialize: spark, dask, or none (deprecated)", + ) arguments = vars(parser.parse_args()) - connection_file = arguments['connection_file'] - response_addr = arguments['response_address'] or arguments['rpp_response_address'] - kernel_id = arguments['kernel_id'] or arguments['rpp_kernel_id'] - public_key = arguments['public_key'] or arguments['rpp_public_key'] - lower_port, upper_port = _validate_port_range(arguments['port_range'] or arguments['rpp_port_range']) - spark_init_mode = arguments['init_mode'] or arguments['rpp_init_mode'] - cluster_type = arguments['cluster_type'] or arguments['rpp_cluster_type'] + connection_file = arguments["connection_file"] + response_addr = arguments["response_address"] or arguments["rpp_response_address"] + kernel_id = arguments["kernel_id"] or arguments["rpp_kernel_id"] + public_key = arguments["public_key"] or arguments["rpp_public_key"] + lower_port, upper_port = _validate_port_range( + arguments["port_range"] or arguments["rpp_port_range"] + ) + spark_init_mode = arguments["init_mode"] or arguments["rpp_init_mode"] + cluster_type = arguments["cluster_type"] or arguments["rpp_cluster_type"] ip = "0.0.0.0" if connection_file is None and kernel_id is None: - raise RuntimeError("At least one of the parameters: 'connection_file' or '--kernel-id' must be provided!") + raise RuntimeError( + "At least one of the parameters: 'connection_file' or '--kernel-id' must be provided!" + ) if kernel_id is None: raise RuntimeError("Parameter '--kernel-id' must be provided!") @@ -442,18 +537,33 @@ def start_ipython(namespace, cluster_type="spark", **kwargs): ports = _select_ports(5, lower_port, upper_port) - write_connection_file(fname=connection_file, ip=ip, key=key, shell_port=ports[0], iopub_port=ports[1], - stdin_port=ports[2], hb_port=ports[3], control_port=ports[4]) + write_connection_file( + fname=connection_file, + ip=ip, + key=key, + shell_port=ports[0], + iopub_port=ports[1], + stdin_port=ports[2], + hb_port=ports[3], + control_port=ports[4], + ) if response_addr: - comm_socket = return_connection_info(connection_file, response_addr, lower_port, upper_port, - kernel_id, public_key) + comm_socket = return_connection_info( + connection_file, response_addr, lower_port, upper_port, kernel_id, public_key + ) if comm_socket: # socket in use, start server listener process - server_listener_process = Process(target=server_listener, args=(comm_socket, os.getpid(),)) + server_listener_process = Process( + target=server_listener, + args=( + comm_socket, + os.getpid(), + ), + ) server_listener_process.start() # Initialize the kernel namespace for the given cluster type - if cluster_type == 'spark' and spark_init_mode == 'none': - cluster_type = 'none' + if cluster_type == "spark" and spark_init_mode == "none": + cluster_type = "none" # launch the IPython kernel instance start_ipython(locals(), cluster_type=cluster_type, connection_file=connection_file, ip=ip) diff --git a/etc/kernel-resources/ir/kernel.js b/etc/kernel-resources/ir/kernel.js index ff812bce0..c26bc62ca 100644 --- a/etc/kernel-resources/ir/kernel.js +++ b/etc/kernel-resources/ir/kernel.js @@ -1,79 +1,92 @@ -const cmd_key = /Mac/.test(navigator.platform) ? 'Cmd' : 'Ctrl' +const cmd_key = /Mac/.test(navigator.platform) ? "Cmd" : "Ctrl"; const edit_actions = [ - { - name: 'R Assign', - shortcut: 'Alt--', - icon: 'fa-long-arrow-left', - help: 'R: Inserts the left-assign operator (<-)', - handler(cm) { - cm.replaceSelection(' <- ') - }, - }, - { - name: 'R Pipe', - shortcut: `Shift-${cmd_key}-M`, - icon: 'fa-angle-right', - help: 'R: Inserts the magrittr pipe operator (%>%)', - handler(cm) { - cm.replaceSelection(' %>% ') - }, - }, - { - name: 'R Help', - shortcut: 'F1', - icon: 'fa-book', - help: 'R: Shows the manpage for the item under the cursor', - handler(cm, cell) { - const {anchor, head} = cm.findWordAt(cm.getCursor()) - const word = cm.getRange(anchor, head) - - const callbacks = cell.get_callbacks() - const options = {silent: false, store_history: false, stop_on_error: true} - cell.last_msg_id = cell.notebook.kernel.execute(`help(\`${word}\`)`, callbacks, options) - }, - }, -] + { + name: "R Assign", + shortcut: "Alt--", + icon: "fa-long-arrow-left", + help: "R: Inserts the left-assign operator (<-)", + handler(cm) { + cm.replaceSelection(" <- "); + }, + }, + { + name: "R Pipe", + shortcut: `Shift-${cmd_key}-M`, + icon: "fa-angle-right", + help: "R: Inserts the magrittr pipe operator (%>%)", + handler(cm) { + cm.replaceSelection(" %>% "); + }, + }, + { + name: "R Help", + shortcut: "F1", + icon: "fa-book", + help: "R: Shows the manpage for the item under the cursor", + handler(cm, cell) { + const { anchor, head } = cm.findWordAt(cm.getCursor()); + const word = cm.getRange(anchor, head); -const prefix = 'irkernel' + const callbacks = cell.get_callbacks(); + const options = { + silent: false, + store_history: false, + stop_on_error: true, + }; + cell.last_msg_id = cell.notebook.kernel.execute( + `help(\`${word}\`)`, + callbacks, + options + ); + }, + }, +]; + +const prefix = "irkernel"; function add_edit_shortcut(notebook, actions, keyboard_manager, edit_action) { - const {name, shortcut, icon, help, handler} = edit_action - - const action = { - icon, help, - help_index : 'zz', - handler: () => { - const cell = notebook.get_selected_cell() - handler(cell.code_mirror, cell) - }, - } - - const full_name = actions.register(action, name, prefix) - - Jupyter.keyboard_manager.edit_shortcuts.add_shortcut(shortcut, full_name) + const { name, shortcut, icon, help, handler } = edit_action; + + const action = { + icon, + help, + help_index: "zz", + handler: () => { + const cell = notebook.get_selected_cell(); + handler(cell.code_mirror, cell); + }, + }; + + const full_name = actions.register(action, name, prefix); + + Jupyter.keyboard_manager.edit_shortcuts.add_shortcut(shortcut, full_name); } function render_math(pager, html) { - if (!html) return - const $container = pager.pager_element.find('#pager-container') - $container.find('p[style="text-align: center;"]').map((i, e) => - e.outerHTML = `\\[${e.querySelector('i').innerHTML}\\]`) - $container.find('i').map((i, e) => - e.outerHTML = `\\(${e.innerHTML}\\)`) - MathJax.Hub.Queue(['Typeset', MathJax.Hub, $container[0]]) + if (!html) return; + const $container = pager.pager_element.find("#pager-container"); + $container + .find('p[style="text-align: center;"]') + .map((i, e) => (e.outerHTML = `\\[${e.querySelector("i").innerHTML}\\]`)); + $container.find("i").map((i, e) => (e.outerHTML = `\\(${e.innerHTML}\\)`)); + MathJax.Hub.Queue(["Typeset", MathJax.Hub, $container[0]]); } -define(['base/js/namespace'], ({ - notebook, - actions, - keyboard_manager, - pager, +define(["base/js/namespace"], ({ + notebook, + actions, + keyboard_manager, + pager, }) => ({ - onload() { - edit_actions.forEach(a => add_edit_shortcut(notebook, actions, keyboard_manager, a)) - - pager.events.on('open_with_text.Pager', (event, {data: {'text/html': html}}) => - render_math(pager, html)) - }, -})) + onload() { + edit_actions.forEach((a) => + add_edit_shortcut(notebook, actions, keyboard_manager, a) + ); + + pager.events.on( + "open_with_text.Pager", + (event, { data: { "text/html": html } }) => render_math(pager, html) + ); + }, +})); diff --git a/etc/kernelspecs/R_docker/kernel.json b/etc/kernelspecs/R_docker/kernel.json index 2e31b30a0..756f00eff 100644 --- a/etc/kernelspecs/R_docker/kernel.json +++ b/etc/kernelspecs/R_docker/kernel.json @@ -9,8 +9,7 @@ } } }, - "env": { - }, + "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/R_docker/scripts/launch_docker.py", diff --git a/etc/kernelspecs/R_kubernetes/kernel.json b/etc/kernelspecs/R_kubernetes/kernel.json index e6423ea6c..51a70dbaf 100644 --- a/etc/kernelspecs/R_kubernetes/kernel.json +++ b/etc/kernelspecs/R_kubernetes/kernel.json @@ -9,8 +9,7 @@ } } }, - "env": { - }, + "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/R_kubernetes/scripts/launch_kubernetes.py", diff --git a/etc/kernelspecs/python_distributed/kernel.json b/etc/kernelspecs/python_distributed/kernel.json index 1996c77fa..e37737716 100644 --- a/etc/kernelspecs/python_distributed/kernel.json +++ b/etc/kernelspecs/python_distributed/kernel.json @@ -1,6 +1,6 @@ { "display_name": "Python 3 (distributed)", - "language": "python", + "language": "python", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy" diff --git a/etc/kernelspecs/python_docker/kernel.json b/etc/kernelspecs/python_docker/kernel.json index cd2a4aa94..fcf215d49 100644 --- a/etc/kernelspecs/python_docker/kernel.json +++ b/etc/kernelspecs/python_docker/kernel.json @@ -10,8 +10,7 @@ }, "debugger": true }, - "env": { - }, + "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_docker/scripts/launch_docker.py", diff --git a/etc/kernelspecs/python_kubernetes/kernel.json b/etc/kernelspecs/python_kubernetes/kernel.json index 0ad17e8f9..171c584dd 100644 --- a/etc/kernelspecs/python_kubernetes/kernel.json +++ b/etc/kernelspecs/python_kubernetes/kernel.json @@ -10,8 +10,7 @@ }, "debugger": true }, - "env": { - }, + "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_kubernetes/scripts/launch_kubernetes.py", diff --git a/etc/kernelspecs/python_tf_docker/kernel.json b/etc/kernelspecs/python_tf_docker/kernel.json index 3c806ac0b..5d317d950 100644 --- a/etc/kernelspecs/python_tf_docker/kernel.json +++ b/etc/kernelspecs/python_tf_docker/kernel.json @@ -10,8 +10,7 @@ }, "debugger": true }, - "env": { - }, + "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_tf_docker/scripts/launch_docker.py", diff --git a/etc/kernelspecs/python_tf_gpu_docker/kernel.json b/etc/kernelspecs/python_tf_gpu_docker/kernel.json index 5bea0f64a..eeb19526d 100644 --- a/etc/kernelspecs/python_tf_gpu_docker/kernel.json +++ b/etc/kernelspecs/python_tf_gpu_docker/kernel.json @@ -10,8 +10,7 @@ }, "debugger": true }, - "env": { - }, + "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_tf_gpu_docker/scripts/launch_docker.py", diff --git a/etc/kernelspecs/python_tf_gpu_kubernetes/kernel.json b/etc/kernelspecs/python_tf_gpu_kubernetes/kernel.json index e4f06ae99..32824866f 100644 --- a/etc/kernelspecs/python_tf_gpu_kubernetes/kernel.json +++ b/etc/kernelspecs/python_tf_gpu_kubernetes/kernel.json @@ -10,8 +10,7 @@ }, "debugger": true }, - "env": { - }, + "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_tf_gpu_kubernetes/scripts/launch_kubernetes.py", diff --git a/etc/kernelspecs/python_tf_kubernetes/kernel.json b/etc/kernelspecs/python_tf_kubernetes/kernel.json index 1b2ae616a..f337cbebb 100644 --- a/etc/kernelspecs/python_tf_kubernetes/kernel.json +++ b/etc/kernelspecs/python_tf_kubernetes/kernel.json @@ -10,8 +10,7 @@ }, "debugger": true }, - "env": { - }, + "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_tf_kubernetes/scripts/launch_kubernetes.py", diff --git a/etc/kernelspecs/scala_docker/kernel.json b/etc/kernelspecs/scala_docker/kernel.json index 3685eee15..7fdc51f34 100644 --- a/etc/kernelspecs/scala_docker/kernel.json +++ b/etc/kernelspecs/scala_docker/kernel.json @@ -9,8 +9,7 @@ } } }, - "env": { - }, + "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/scala_docker/scripts/launch_docker.py", diff --git a/etc/kernelspecs/scala_kubernetes/kernel.json b/etc/kernelspecs/scala_kubernetes/kernel.json index 5525ac286..fc630a650 100644 --- a/etc/kernelspecs/scala_kubernetes/kernel.json +++ b/etc/kernelspecs/scala_kubernetes/kernel.json @@ -9,8 +9,7 @@ } } }, - "env": { - }, + "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/scala_kubernetes/scripts/launch_kubernetes.py", diff --git a/etc/kernelspecs/spark_python_conductor_cluster/kernel.json b/etc/kernelspecs/spark_python_conductor_cluster/kernel.json index 1db2ae420..f294b013e 100644 --- a/etc/kernelspecs/spark_python_conductor_cluster/kernel.json +++ b/etc/kernelspecs/spark_python_conductor_cluster/kernel.json @@ -7,11 +7,11 @@ }, "debugger": true }, - "env": { + "env": { "SPARK_OPTS": "--name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} ${KERNEL_EXTRA_SPARK_OPTS}", "LAUNCH_OPTS": "" }, - "argv": [ + "argv": [ "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", diff --git a/etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh b/etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh index 264f79854..e9b00510d 100755 --- a/etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh +++ b/etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh @@ -27,4 +27,4 @@ eval exec \ "${PROG_HOME}/scripts/launch_ipykernel.py" \ "${LAUNCH_OPTS}" \ "$@" -set +x \ No newline at end of file +set +x diff --git a/etc/kubernetes/enterprise-gateway.yaml b/etc/kubernetes/enterprise-gateway.yaml index 1ed16d985..b917e1c8a 100644 --- a/etc/kubernetes/enterprise-gateway.yaml +++ b/etc/kubernetes/enterprise-gateway.yaml @@ -190,7 +190,7 @@ spec: value: "python_kubernetes" # Optional authorization token passed in all requests - #- name: EG_AUTH_TOKEN + #- name: EG_AUTH_TOKEN # value: # Ensure the following VERSION tag is updated to the version of Enterprise Gateway you wish to run @@ -221,16 +221,16 @@ metadata: spec: selector: matchLabels: - name: kernel-image-puller + name: kernel-image-puller template: metadata: labels: - name: kernel-image-puller + name: kernel-image-puller app: enterprise-gateway component: kernel-image-puller spec: containers: - - name: kernel-image-puller + - name: kernel-image-puller image: elyra/kernel-image-puller:dev env: - name: KIP_GATEWAY_HOST diff --git a/etc/kubernetes/helm/enterprise-gateway/Chart.yaml b/etc/kubernetes/helm/enterprise-gateway/Chart.yaml index aa273e4fe..d92eb7696 100644 --- a/etc/kubernetes/helm/enterprise-gateway/Chart.yaml +++ b/etc/kubernetes/helm/enterprise-gateway/Chart.yaml @@ -8,4 +8,3 @@ sources: - https://github.com/jupyter-server/enterprise_gateway kubeVersion: '>=1.11.0-0' tillerVersion: '>=2.11.0-0' - diff --git a/etc/kubernetes/helm/enterprise-gateway/templates/daemonset.yaml b/etc/kubernetes/helm/enterprise-gateway/templates/daemonset.yaml index c31f3244c..28aca8ef6 100644 --- a/etc/kubernetes/helm/enterprise-gateway/templates/daemonset.yaml +++ b/etc/kubernetes/helm/enterprise-gateway/templates/daemonset.yaml @@ -38,7 +38,7 @@ spec: value: {{ .Values.kip.defaultContainerRegistry }} # Optional authorization token passed in all requests (should match EG_AUTH_TOKEN) {{- if .Values.authToken }} - - name: KIP_AUTH_TOKEN + - name: KIP_AUTH_TOKEN value: {{ .Values.authToken }} {{- end }} volumeMounts: diff --git a/etc/kubernetes/helm/enterprise-gateway/templates/deployment.yaml b/etc/kubernetes/helm/enterprise-gateway/templates/deployment.yaml index 62478badd..6595bd1dc 100644 --- a/etc/kubernetes/helm/enterprise-gateway/templates/deployment.yaml +++ b/etc/kubernetes/helm/enterprise-gateway/templates/deployment.yaml @@ -63,7 +63,7 @@ spec: value: {{ .Values.kernel.defaultKernelName }} # Optional authorization token passed in all requests {{- if .Values.authToken }} - - name: EG_AUTH_TOKEN + - name: EG_AUTH_TOKEN value: {{ .Values.authToken }} {{- end }} ports: diff --git a/etc/kubernetes/helm/enterprise-gateway/templates/ingress.yaml b/etc/kubernetes/helm/enterprise-gateway/templates/ingress.yaml index 9ec6a3acf..fcf62faa6 100644 --- a/etc/kubernetes/helm/enterprise-gateway/templates/ingress.yaml +++ b/etc/kubernetes/helm/enterprise-gateway/templates/ingress.yaml @@ -32,4 +32,3 @@ spec: serviceName: enterprise-gateway servicePort: {{ .Values.port }} {{ end }} - diff --git a/etc/kubernetes/helm/enterprise-gateway/values.yaml b/etc/kubernetes/helm/enterprise-gateway/values.yaml index bccfe9faf..5e2e94062 100644 --- a/etc/kubernetes/helm/enterprise-gateway/values.yaml +++ b/etc/kubernetes/helm/enterprise-gateway/values.yaml @@ -53,8 +53,8 @@ nfs: kernelspecsPvc: enabled: false - # PVC name. Required if want mount kernelspecs without nfs. PVC should create in the same namespace before EG deployed. - name: + # PVC name. Required if want mount kernelspecs without nfs. PVC should create in the same namespace before EG deployed. + name: ingress: enabled: false @@ -94,5 +94,3 @@ kip: criSocket: /var/run/docker.sock # Prefix to use if a registry is not already specified on image name (e.g., quay.io/elyra/kernel-py:2.5.0) defaultContainerRegistry: docker.io - - diff --git a/readthedocs.yml b/readthedocs.yml index 30eec12a2..3e388237d 100644 --- a/readthedocs.yml +++ b/readthedocs.yml @@ -1,5 +1,5 @@ version: 2 sphinx: - configuration: docs/source/conf.py + configuration: docs/source/conf.py conda: - environment: docs/environment.yml + environment: docs/environment.yml diff --git a/requirements.yml b/requirements.yml index ba047a26c..5a83b0509 100644 --- a/requirements.yml +++ b/requirements.yml @@ -31,5 +31,5 @@ dependencies: - flake8 - pip: - - .. - - -r docs/doc-requirements.txt + - .. + - -r docs/doc-requirements.txt diff --git a/setup.cfg b/setup.cfg index 47f1ab5cf..45f1eb6a7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -32,5 +32,3 @@ ignore = # Multi line docstrings should start with a one line summary followed by an empty line H405 max-line-length = 120 - - diff --git a/setup.py b/setup.py index f5ff2b541..de9938a13 100644 --- a/setup.py +++ b/setup.py @@ -3,6 +3,7 @@ import os import sys + from setuptools import setup here = os.path.abspath(os.path.dirname(__file__)) @@ -14,82 +15,82 @@ sys.exit(1) version_ns = {} -with open(os.path.join(here, 'enterprise_gateway', '_version.py')) as f: +with open(os.path.join(here, "enterprise_gateway", "_version.py")) as f: exec(f.read(), {}, version_ns) setup_args = dict( - name='jupyter_enterprise_gateway', - author='Jupyter Development Team', - author_email='jupyter@googlegroups.com', - url='http://github.com/jupyter/enterprise_gateway', - description='A web server for spawning and communicating with remote Jupyter kernels', - long_description='''\ + name="jupyter_enterprise_gateway", + author="Jupyter Development Team", + author_email="jupyter@googlegroups.com", + url="http://github.com/jupyter/enterprise_gateway", + description="A web server for spawning and communicating with remote Jupyter kernels", + long_description="""\ A lightweight, multi-tenant, scalable and secure gateway that enables Jupyter Notebooks to share resources across distributed clusters such as Apache Spark, Kubernetes and others.. -''', - version=version_ns['__version__'], - license='BSD', +""", + version=version_ns["__version__"], + license="BSD", platforms="Linux, Mac OS X, Windows", - keywords=['Interactive', 'Interpreter', 'Kernel', 'Web', 'Cloud'], + keywords=["Interactive", "Interpreter", "Kernel", "Web", "Cloud"], packages=[ - 'enterprise_gateway', - 'enterprise_gateway.base', - 'enterprise_gateway.client', - 'enterprise_gateway.services', - 'enterprise_gateway.services.api', - 'enterprise_gateway.services.kernels', - 'enterprise_gateway.services.kernelspecs', - 'enterprise_gateway.services.processproxies', - 'enterprise_gateway.services.sessions' + "enterprise_gateway", + "enterprise_gateway.base", + "enterprise_gateway.client", + "enterprise_gateway.services", + "enterprise_gateway.services.api", + "enterprise_gateway.services.kernels", + "enterprise_gateway.services.kernelspecs", + "enterprise_gateway.services.processproxies", + "enterprise_gateway.services.sessions", ], install_requires=[ - 'docker>=3.5.0', - 'future', - 'jinja2>=3.1', - 'jupyter_client~=6.1', - 'jupyter_core>=4.6.0', - 'kubernetes>=4.0.0', - 'jupyter_server>=1.2', - 'paramiko>=2.1.2', - 'pexpect>=4.2.0', - 'pycryptodomex>=3.9.7', - 'pyzmq>=17.0.0', - 'requests~=2.7', - 'tornado>=6.1', - 'traitlets>=4.3.3', - 'watchdog>=2.1.3', - 'yarn-api-client>=1.0', + "docker>=3.5.0", + "future", + "jinja2>=3.1", + "jupyter_client~=6.1", + "jupyter_core>=4.6.0", + "kubernetes>=4.0.0", + "jupyter_server>=1.2", + "paramiko>=2.1.2", + "pexpect>=4.2.0", + "pycryptodomex>=3.9.7", + "pyzmq>=17.0.0", + "requests~=2.7", + "tornado>=6.1", + "traitlets>=4.3.3", + "watchdog>=2.1.3", + "yarn-api-client>=1.0", ], extras_require={ - 'test': ['coverage', 'pytest', 'pytest-tornasync', 'ipykernel', 'pre-commit'], + "test": ["coverage", "pytest", "pytest-tornasync", "ipykernel", "pre-commit"], }, - python_requires='>=3.7', + python_requires=">=3.7", classifiers=[ - 'Intended Audience :: Developers', - 'Intended Audience :: Science/Research', - 'Intended Audience :: System Administrators', - 'License :: OSI Approved :: BSD License', - 'Programming Language :: Python', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10' + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: BSD License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", ], include_package_data=True, ) -if 'setuptools' in sys.modules: +if "setuptools" in sys.modules: # setupstools turns entrypoint scripts into executables on windows - setup_args['entry_points'] = { - 'console_scripts': [ - 'jupyter-enterprisegateway = enterprise_gateway.enterprisegatewayapp:launch_instance' + setup_args["entry_points"] = { + "console_scripts": [ + "jupyter-enterprisegateway = enterprise_gateway.enterprisegatewayapp:launch_instance" ] } # Don't bother installing the .py scripts if if we're using entrypoints - setup_args.pop('scripts', None) + setup_args.pop("scripts", None) -if __name__ == '__main__': +if __name__ == "__main__": setup(**setup_args) diff --git a/website/_data/navigation.yml b/website/_data/navigation.yml index 968ef274a..d168fc6f3 100644 --- a/website/_data/navigation.yml +++ b/website/_data/navigation.yml @@ -19,4 +19,4 @@ topnav: url: https://github.com/jupyter/enterprise_gateway - title: Privacy - url: /enterprise_gateway/privacy-policy \ No newline at end of file + url: /enterprise_gateway/privacy-policy diff --git a/website/_layouts/home.html b/website/_layouts/home.html index 5bd3e83ff..29485e434 100644 --- a/website/_layouts/home.html +++ b/website/_layouts/home.html @@ -13,4 +13,4 @@ {% include scripts.html %} - \ No newline at end of file + diff --git a/website/_layouts/page.html b/website/_layouts/page.html index bb402ba13..a7d4fbe6b 100644 --- a/website/_layouts/page.html +++ b/website/_layouts/page.html @@ -18,4 +18,4 @@ {% include scripts.html %} - \ No newline at end of file + diff --git a/website/_sass/_base.scss b/website/_sass/_base.scss old mode 100755 new mode 100644 diff --git a/website/_sass/_mixins.scss b/website/_sass/_mixins.scss old mode 100755 new mode 100644 index e02eae471..63faf2527 --- a/website/_sass/_mixins.scss +++ b/website/_sass/_mixins.scss @@ -63,4 +63,4 @@ @mixin serif-font() { font-family: 'Merriweather', 'Helvetica Neue', Arial, sans-serif; -} \ No newline at end of file +} diff --git a/website/css/animate.min.css b/website/css/animate.min.css index 8d975272b..13f11cacc 100644 --- a/website/css/animate.min.css +++ b/website/css/animate.min.css @@ -3,4 +3,4 @@ Animate.css - http://daneden.me/animate Licensed under the MIT license - http://opensource.org/licenses/MIT Copyright (c) 2015 Daniel Eden -*/.animated{-webkit-animation-duration:1s;animation-duration:1s;-webkit-animation-fill-mode:both;animation-fill-mode:both}.animated.infinite{-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite}.animated.hinge{-webkit-animation-duration:2s;animation-duration:2s}.animated.bounceIn,.animated.bounceOut,.animated.flipOutX,.animated.flipOutY{-webkit-animation-duration:.75s;animation-duration:.75s}@-webkit-keyframes bounce{0%,100%,20%,53%,80%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1);-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}40%,43%{-webkit-transition-timing-function:cubic-bezier(0.755,.050,.855,.060);transition-timing-function:cubic-bezier(0.755,.050,.855,.060);-webkit-transform:translate3d(0,-30px,0);transform:translate3d(0,-30px,0)}70%{-webkit-transition-timing-function:cubic-bezier(0.755,.050,.855,.060);transition-timing-function:cubic-bezier(0.755,.050,.855,.060);-webkit-transform:translate3d(0,-15px,0);transform:translate3d(0,-15px,0)}90%{-webkit-transform:translate3d(0,-4px,0);transform:translate3d(0,-4px,0)}}@keyframes bounce{0%,100%,20%,53%,80%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1);-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}40%,43%{-webkit-transition-timing-function:cubic-bezier(0.755,.050,.855,.060);transition-timing-function:cubic-bezier(0.755,.050,.855,.060);-webkit-transform:translate3d(0,-30px,0);transform:translate3d(0,-30px,0)}70%{-webkit-transition-timing-function:cubic-bezier(0.755,.050,.855,.060);transition-timing-function:cubic-bezier(0.755,.050,.855,.060);-webkit-transform:translate3d(0,-15px,0);transform:translate3d(0,-15px,0)}90%{-webkit-transform:translate3d(0,-4px,0);transform:translate3d(0,-4px,0)}}.bounce{-webkit-animation-name:bounce;animation-name:bounce;-webkit-transform-origin:center bottom;transform-origin:center bottom}@-webkit-keyframes flash{0%,100%,50%{opacity:1}25%,75%{opacity:0}}@keyframes flash{0%,100%,50%{opacity:1}25%,75%{opacity:0}}.flash{-webkit-animation-name:flash;animation-name:flash}@-webkit-keyframes pulse{0%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}50%{-webkit-transform:scale3d(1.05,1.05,1.05);transform:scale3d(1.05,1.05,1.05)}100%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}@keyframes pulse{0%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}50%{-webkit-transform:scale3d(1.05,1.05,1.05);transform:scale3d(1.05,1.05,1.05)}100%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}.pulse{-webkit-animation-name:pulse;animation-name:pulse}@-webkit-keyframes rubberBand{0%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}30%{-webkit-transform:scale3d(1.25,.75,1);transform:scale3d(1.25,.75,1)}40%{-webkit-transform:scale3d(0.75,1.25,1);transform:scale3d(0.75,1.25,1)}50%{-webkit-transform:scale3d(1.15,.85,1);transform:scale3d(1.15,.85,1)}65%{-webkit-transform:scale3d(.95,1.05,1);transform:scale3d(.95,1.05,1)}75%{-webkit-transform:scale3d(1.05,.95,1);transform:scale3d(1.05,.95,1)}100%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}@keyframes rubberBand{0%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}30%{-webkit-transform:scale3d(1.25,.75,1);transform:scale3d(1.25,.75,1)}40%{-webkit-transform:scale3d(0.75,1.25,1);transform:scale3d(0.75,1.25,1)}50%{-webkit-transform:scale3d(1.15,.85,1);transform:scale3d(1.15,.85,1)}65%{-webkit-transform:scale3d(.95,1.05,1);transform:scale3d(.95,1.05,1)}75%{-webkit-transform:scale3d(1.05,.95,1);transform:scale3d(1.05,.95,1)}100%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}.rubberBand{-webkit-animation-name:rubberBand;animation-name:rubberBand}@-webkit-keyframes shake{0%,100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}10%,30%,50%,70%,90%{-webkit-transform:translate3d(-10px,0,0);transform:translate3d(-10px,0,0)}20%,40%,60%,80%{-webkit-transform:translate3d(10px,0,0);transform:translate3d(10px,0,0)}}@keyframes shake{0%,100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}10%,30%,50%,70%,90%{-webkit-transform:translate3d(-10px,0,0);transform:translate3d(-10px,0,0)}20%,40%,60%,80%{-webkit-transform:translate3d(10px,0,0);transform:translate3d(10px,0,0)}}.shake{-webkit-animation-name:shake;animation-name:shake}@-webkit-keyframes swing{20%{-webkit-transform:rotate3d(0,0,1,15deg);transform:rotate3d(0,0,1,15deg)}40%{-webkit-transform:rotate3d(0,0,1,-10deg);transform:rotate3d(0,0,1,-10deg)}60%{-webkit-transform:rotate3d(0,0,1,5deg);transform:rotate3d(0,0,1,5deg)}80%{-webkit-transform:rotate3d(0,0,1,-5deg);transform:rotate3d(0,0,1,-5deg)}100%{-webkit-transform:rotate3d(0,0,1,0deg);transform:rotate3d(0,0,1,0deg)}}@keyframes swing{20%{-webkit-transform:rotate3d(0,0,1,15deg);transform:rotate3d(0,0,1,15deg)}40%{-webkit-transform:rotate3d(0,0,1,-10deg);transform:rotate3d(0,0,1,-10deg)}60%{-webkit-transform:rotate3d(0,0,1,5deg);transform:rotate3d(0,0,1,5deg)}80%{-webkit-transform:rotate3d(0,0,1,-5deg);transform:rotate3d(0,0,1,-5deg)}100%{-webkit-transform:rotate3d(0,0,1,0deg);transform:rotate3d(0,0,1,0deg)}}.swing{-webkit-transform-origin:top center;transform-origin:top center;-webkit-animation-name:swing;animation-name:swing}@-webkit-keyframes tada{0%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}10%,20%{-webkit-transform:scale3d(.9,.9,.9) rotate3d(0,0,1,-3deg);transform:scale3d(.9,.9,.9) rotate3d(0,0,1,-3deg)}30%,50%,70%,90%{-webkit-transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,3deg);transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,3deg)}40%,60%,80%{-webkit-transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,-3deg);transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,-3deg)}100%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}@keyframes tada{0%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}10%,20%{-webkit-transform:scale3d(.9,.9,.9) rotate3d(0,0,1,-3deg);transform:scale3d(.9,.9,.9) rotate3d(0,0,1,-3deg)}30%,50%,70%,90%{-webkit-transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,3deg);transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,3deg)}40%,60%,80%{-webkit-transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,-3deg);transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,-3deg)}100%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}.tada{-webkit-animation-name:tada;animation-name:tada}@-webkit-keyframes wobble{0%{-webkit-transform:none;transform:none}15%{-webkit-transform:translate3d(-25%,0,0) rotate3d(0,0,1,-5deg);transform:translate3d(-25%,0,0) rotate3d(0,0,1,-5deg)}30%{-webkit-transform:translate3d(20%,0,0) rotate3d(0,0,1,3deg);transform:translate3d(20%,0,0) rotate3d(0,0,1,3deg)}45%{-webkit-transform:translate3d(-15%,0,0) rotate3d(0,0,1,-3deg);transform:translate3d(-15%,0,0) rotate3d(0,0,1,-3deg)}60%{-webkit-transform:translate3d(10%,0,0) rotate3d(0,0,1,2deg);transform:translate3d(10%,0,0) rotate3d(0,0,1,2deg)}75%{-webkit-transform:translate3d(-5%,0,0) rotate3d(0,0,1,-1deg);transform:translate3d(-5%,0,0) rotate3d(0,0,1,-1deg)}100%{-webkit-transform:none;transform:none}}@keyframes wobble{0%{-webkit-transform:none;transform:none}15%{-webkit-transform:translate3d(-25%,0,0) rotate3d(0,0,1,-5deg);transform:translate3d(-25%,0,0) rotate3d(0,0,1,-5deg)}30%{-webkit-transform:translate3d(20%,0,0) rotate3d(0,0,1,3deg);transform:translate3d(20%,0,0) rotate3d(0,0,1,3deg)}45%{-webkit-transform:translate3d(-15%,0,0) rotate3d(0,0,1,-3deg);transform:translate3d(-15%,0,0) rotate3d(0,0,1,-3deg)}60%{-webkit-transform:translate3d(10%,0,0) rotate3d(0,0,1,2deg);transform:translate3d(10%,0,0) rotate3d(0,0,1,2deg)}75%{-webkit-transform:translate3d(-5%,0,0) rotate3d(0,0,1,-1deg);transform:translate3d(-5%,0,0) rotate3d(0,0,1,-1deg)}100%{-webkit-transform:none;transform:none}}.wobble{-webkit-animation-name:wobble;animation-name:wobble}@-webkit-keyframes bounceIn{0%,100%,20%,40%,60%,80%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}20%{-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}40%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}60%{opacity:1;-webkit-transform:scale3d(1.03,1.03,1.03);transform:scale3d(1.03,1.03,1.03)}80%{-webkit-transform:scale3d(.97,.97,.97);transform:scale3d(.97,.97,.97)}100%{opacity:1;-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}@keyframes bounceIn{0%,100%,20%,40%,60%,80%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}20%{-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}40%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}60%{opacity:1;-webkit-transform:scale3d(1.03,1.03,1.03);transform:scale3d(1.03,1.03,1.03)}80%{-webkit-transform:scale3d(.97,.97,.97);transform:scale3d(.97,.97,.97)}100%{opacity:1;-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}.bounceIn{-webkit-animation-name:bounceIn;animation-name:bounceIn}@-webkit-keyframes bounceInDown{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(0,-3000px,0);transform:translate3d(0,-3000px,0)}60%{opacity:1;-webkit-transform:translate3d(0,25px,0);transform:translate3d(0,25px,0)}75%{-webkit-transform:translate3d(0,-10px,0);transform:translate3d(0,-10px,0)}90%{-webkit-transform:translate3d(0,5px,0);transform:translate3d(0,5px,0)}100%{-webkit-transform:none;transform:none}}@keyframes bounceInDown{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(0,-3000px,0);transform:translate3d(0,-3000px,0)}60%{opacity:1;-webkit-transform:translate3d(0,25px,0);transform:translate3d(0,25px,0)}75%{-webkit-transform:translate3d(0,-10px,0);transform:translate3d(0,-10px,0)}90%{-webkit-transform:translate3d(0,5px,0);transform:translate3d(0,5px,0)}100%{-webkit-transform:none;transform:none}}.bounceInDown{-webkit-animation-name:bounceInDown;animation-name:bounceInDown}@-webkit-keyframes bounceInLeft{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(-3000px,0,0);transform:translate3d(-3000px,0,0)}60%{opacity:1;-webkit-transform:translate3d(25px,0,0);transform:translate3d(25px,0,0)}75%{-webkit-transform:translate3d(-10px,0,0);transform:translate3d(-10px,0,0)}90%{-webkit-transform:translate3d(5px,0,0);transform:translate3d(5px,0,0)}100%{-webkit-transform:none;transform:none}}@keyframes bounceInLeft{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(-3000px,0,0);transform:translate3d(-3000px,0,0)}60%{opacity:1;-webkit-transform:translate3d(25px,0,0);transform:translate3d(25px,0,0)}75%{-webkit-transform:translate3d(-10px,0,0);transform:translate3d(-10px,0,0)}90%{-webkit-transform:translate3d(5px,0,0);transform:translate3d(5px,0,0)}100%{-webkit-transform:none;transform:none}}.bounceInLeft{-webkit-animation-name:bounceInLeft;animation-name:bounceInLeft}@-webkit-keyframes bounceInRight{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(3000px,0,0);transform:translate3d(3000px,0,0)}60%{opacity:1;-webkit-transform:translate3d(-25px,0,0);transform:translate3d(-25px,0,0)}75%{-webkit-transform:translate3d(10px,0,0);transform:translate3d(10px,0,0)}90%{-webkit-transform:translate3d(-5px,0,0);transform:translate3d(-5px,0,0)}100%{-webkit-transform:none;transform:none}}@keyframes bounceInRight{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(3000px,0,0);transform:translate3d(3000px,0,0)}60%{opacity:1;-webkit-transform:translate3d(-25px,0,0);transform:translate3d(-25px,0,0)}75%{-webkit-transform:translate3d(10px,0,0);transform:translate3d(10px,0,0)}90%{-webkit-transform:translate3d(-5px,0,0);transform:translate3d(-5px,0,0)}100%{-webkit-transform:none;transform:none}}.bounceInRight{-webkit-animation-name:bounceInRight;animation-name:bounceInRight}@-webkit-keyframes bounceInUp{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(0,3000px,0);transform:translate3d(0,3000px,0)}60%{opacity:1;-webkit-transform:translate3d(0,-20px,0);transform:translate3d(0,-20px,0)}75%{-webkit-transform:translate3d(0,10px,0);transform:translate3d(0,10px,0)}90%{-webkit-transform:translate3d(0,-5px,0);transform:translate3d(0,-5px,0)}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}@keyframes bounceInUp{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(0,3000px,0);transform:translate3d(0,3000px,0)}60%{opacity:1;-webkit-transform:translate3d(0,-20px,0);transform:translate3d(0,-20px,0)}75%{-webkit-transform:translate3d(0,10px,0);transform:translate3d(0,10px,0)}90%{-webkit-transform:translate3d(0,-5px,0);transform:translate3d(0,-5px,0)}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.bounceInUp{-webkit-animation-name:bounceInUp;animation-name:bounceInUp}@-webkit-keyframes bounceOut{20%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}50%,55%{opacity:1;-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}100%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}}@keyframes bounceOut{20%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}50%,55%{opacity:1;-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}100%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}}.bounceOut{-webkit-animation-name:bounceOut;animation-name:bounceOut}@-webkit-keyframes bounceOutDown{20%{-webkit-transform:translate3d(0,10px,0);transform:translate3d(0,10px,0)}40%,45%{opacity:1;-webkit-transform:translate3d(0,-20px,0);transform:translate3d(0,-20px,0)}100%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}}@keyframes bounceOutDown{20%{-webkit-transform:translate3d(0,10px,0);transform:translate3d(0,10px,0)}40%,45%{opacity:1;-webkit-transform:translate3d(0,-20px,0);transform:translate3d(0,-20px,0)}100%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}}.bounceOutDown{-webkit-animation-name:bounceOutDown;animation-name:bounceOutDown}@-webkit-keyframes bounceOutLeft{20%{opacity:1;-webkit-transform:translate3d(20px,0,0);transform:translate3d(20px,0,0)}100%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}}@keyframes bounceOutLeft{20%{opacity:1;-webkit-transform:translate3d(20px,0,0);transform:translate3d(20px,0,0)}100%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}}.bounceOutLeft{-webkit-animation-name:bounceOutLeft;animation-name:bounceOutLeft}@-webkit-keyframes bounceOutRight{20%{opacity:1;-webkit-transform:translate3d(-20px,0,0);transform:translate3d(-20px,0,0)}100%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}}@keyframes bounceOutRight{20%{opacity:1;-webkit-transform:translate3d(-20px,0,0);transform:translate3d(-20px,0,0)}100%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}}.bounceOutRight{-webkit-animation-name:bounceOutRight;animation-name:bounceOutRight}@-webkit-keyframes bounceOutUp{20%{-webkit-transform:translate3d(0,-10px,0);transform:translate3d(0,-10px,0)}40%,45%{opacity:1;-webkit-transform:translate3d(0,20px,0);transform:translate3d(0,20px,0)}100%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}}@keyframes bounceOutUp{20%{-webkit-transform:translate3d(0,-10px,0);transform:translate3d(0,-10px,0)}40%,45%{opacity:1;-webkit-transform:translate3d(0,20px,0);transform:translate3d(0,20px,0)}100%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}}.bounceOutUp{-webkit-animation-name:bounceOutUp;animation-name:bounceOutUp}@-webkit-keyframes fadeIn{0%{opacity:0}100%{opacity:1}}@keyframes fadeIn{0%{opacity:0}100%{opacity:1}}.fadeIn{-webkit-animation-name:fadeIn;animation-name:fadeIn}@-webkit-keyframes fadeInDown{0%{opacity:0;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInDown{0%{opacity:0;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInDown{-webkit-animation-name:fadeInDown;animation-name:fadeInDown}@-webkit-keyframes fadeInDownBig{0%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInDownBig{0%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInDownBig{-webkit-animation-name:fadeInDownBig;animation-name:fadeInDownBig}@-webkit-keyframes fadeInLeft{0%{opacity:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInLeft{0%{opacity:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInLeft{-webkit-animation-name:fadeInLeft;animation-name:fadeInLeft}@-webkit-keyframes fadeInLeftBig{0%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInLeftBig{0%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInLeftBig{-webkit-animation-name:fadeInLeftBig;animation-name:fadeInLeftBig}@-webkit-keyframes fadeInRight{0%{opacity:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInRight{0%{opacity:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInRight{-webkit-animation-name:fadeInRight;animation-name:fadeInRight}@-webkit-keyframes fadeInRightBig{0%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInRightBig{0%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInRightBig{-webkit-animation-name:fadeInRightBig;animation-name:fadeInRightBig}@-webkit-keyframes fadeInUp{0%{opacity:0;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInUp{0%{opacity:0;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInUp{-webkit-animation-name:fadeInUp;animation-name:fadeInUp}@-webkit-keyframes fadeInUpBig{0%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInUpBig{0%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInUpBig{-webkit-animation-name:fadeInUpBig;animation-name:fadeInUpBig}@-webkit-keyframes fadeOut{0%{opacity:1}100%{opacity:0}}@keyframes fadeOut{0%{opacity:1}100%{opacity:0}}.fadeOut{-webkit-animation-name:fadeOut;animation-name:fadeOut}@-webkit-keyframes fadeOutDown{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}}@keyframes fadeOutDown{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}}.fadeOutDown{-webkit-animation-name:fadeOutDown;animation-name:fadeOutDown}@-webkit-keyframes fadeOutDownBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}}@keyframes fadeOutDownBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}}.fadeOutDownBig{-webkit-animation-name:fadeOutDownBig;animation-name:fadeOutDownBig}@-webkit-keyframes fadeOutLeft{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}@keyframes fadeOutLeft{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}.fadeOutLeft{-webkit-animation-name:fadeOutLeft;animation-name:fadeOutLeft}@-webkit-keyframes fadeOutLeftBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}}@keyframes fadeOutLeftBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}}.fadeOutLeftBig{-webkit-animation-name:fadeOutLeftBig;animation-name:fadeOutLeftBig}@-webkit-keyframes fadeOutRight{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}}@keyframes fadeOutRight{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}}.fadeOutRight{-webkit-animation-name:fadeOutRight;animation-name:fadeOutRight}@-webkit-keyframes fadeOutRightBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}}@keyframes fadeOutRightBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}}.fadeOutRightBig{-webkit-animation-name:fadeOutRightBig;animation-name:fadeOutRightBig}@-webkit-keyframes fadeOutUp{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}}@keyframes fadeOutUp{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}}.fadeOutUp{-webkit-animation-name:fadeOutUp;animation-name:fadeOutUp}@-webkit-keyframes fadeOutUpBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}}@keyframes fadeOutUpBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}}.fadeOutUpBig{-webkit-animation-name:fadeOutUpBig;animation-name:fadeOutUpBig}@-webkit-keyframes flip{0%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-360deg);transform:perspective(400px) rotate3d(0,1,0,-360deg);-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}40%{-webkit-transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-190deg);transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-190deg);-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}50%{-webkit-transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-170deg);transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-170deg);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}80%{-webkit-transform:perspective(400px) scale3d(.95,.95,.95);transform:perspective(400px) scale3d(.95,.95,.95);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}100%{-webkit-transform:perspective(400px);transform:perspective(400px);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}}@keyframes flip{0%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-360deg);transform:perspective(400px) rotate3d(0,1,0,-360deg);-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}40%{-webkit-transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-190deg);transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-190deg);-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}50%{-webkit-transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-170deg);transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-170deg);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}80%{-webkit-transform:perspective(400px) scale3d(.95,.95,.95);transform:perspective(400px) scale3d(.95,.95,.95);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}100%{-webkit-transform:perspective(400px);transform:perspective(400px);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}}.animated.flip{-webkit-backface-visibility:visible;backface-visibility:visible;-webkit-animation-name:flip;animation-name:flip}@-webkit-keyframes flipInX{0%{-webkit-transform:perspective(400px) rotate3d(1,0,0,90deg);transform:perspective(400px) rotate3d(1,0,0,90deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in;opacity:0}40%{-webkit-transform:perspective(400px) rotate3d(1,0,0,-20deg);transform:perspective(400px) rotate3d(1,0,0,-20deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in}60%{-webkit-transform:perspective(400px) rotate3d(1,0,0,10deg);transform:perspective(400px) rotate3d(1,0,0,10deg);opacity:1}80%{-webkit-transform:perspective(400px) rotate3d(1,0,0,-5deg);transform:perspective(400px) rotate3d(1,0,0,-5deg)}100%{-webkit-transform:perspective(400px);transform:perspective(400px)}}@keyframes flipInX{0%{-webkit-transform:perspective(400px) rotate3d(1,0,0,90deg);transform:perspective(400px) rotate3d(1,0,0,90deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in;opacity:0}40%{-webkit-transform:perspective(400px) rotate3d(1,0,0,-20deg);transform:perspective(400px) rotate3d(1,0,0,-20deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in}60%{-webkit-transform:perspective(400px) rotate3d(1,0,0,10deg);transform:perspective(400px) rotate3d(1,0,0,10deg);opacity:1}80%{-webkit-transform:perspective(400px) rotate3d(1,0,0,-5deg);transform:perspective(400px) rotate3d(1,0,0,-5deg)}100%{-webkit-transform:perspective(400px);transform:perspective(400px)}}.flipInX{-webkit-backface-visibility:visible!important;backface-visibility:visible!important;-webkit-animation-name:flipInX;animation-name:flipInX}@-webkit-keyframes flipInY{0%{-webkit-transform:perspective(400px) rotate3d(0,1,0,90deg);transform:perspective(400px) rotate3d(0,1,0,90deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in;opacity:0}40%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-20deg);transform:perspective(400px) rotate3d(0,1,0,-20deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in}60%{-webkit-transform:perspective(400px) rotate3d(0,1,0,10deg);transform:perspective(400px) rotate3d(0,1,0,10deg);opacity:1}80%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-5deg);transform:perspective(400px) rotate3d(0,1,0,-5deg)}100%{-webkit-transform:perspective(400px);transform:perspective(400px)}}@keyframes flipInY{0%{-webkit-transform:perspective(400px) rotate3d(0,1,0,90deg);transform:perspective(400px) rotate3d(0,1,0,90deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in;opacity:0}40%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-20deg);transform:perspective(400px) rotate3d(0,1,0,-20deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in}60%{-webkit-transform:perspective(400px) rotate3d(0,1,0,10deg);transform:perspective(400px) rotate3d(0,1,0,10deg);opacity:1}80%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-5deg);transform:perspective(400px) rotate3d(0,1,0,-5deg)}100%{-webkit-transform:perspective(400px);transform:perspective(400px)}}.flipInY{-webkit-backface-visibility:visible!important;backface-visibility:visible!important;-webkit-animation-name:flipInY;animation-name:flipInY}@-webkit-keyframes flipOutX{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{-webkit-transform:perspective(400px) rotate3d(1,0,0,-20deg);transform:perspective(400px) rotate3d(1,0,0,-20deg);opacity:1}100%{-webkit-transform:perspective(400px) rotate3d(1,0,0,90deg);transform:perspective(400px) rotate3d(1,0,0,90deg);opacity:0}}@keyframes flipOutX{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{-webkit-transform:perspective(400px) rotate3d(1,0,0,-20deg);transform:perspective(400px) rotate3d(1,0,0,-20deg);opacity:1}100%{-webkit-transform:perspective(400px) rotate3d(1,0,0,90deg);transform:perspective(400px) rotate3d(1,0,0,90deg);opacity:0}}.flipOutX{-webkit-animation-name:flipOutX;animation-name:flipOutX;-webkit-backface-visibility:visible!important;backface-visibility:visible!important}@-webkit-keyframes flipOutY{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-15deg);transform:perspective(400px) rotate3d(0,1,0,-15deg);opacity:1}100%{-webkit-transform:perspective(400px) rotate3d(0,1,0,90deg);transform:perspective(400px) rotate3d(0,1,0,90deg);opacity:0}}@keyframes flipOutY{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-15deg);transform:perspective(400px) rotate3d(0,1,0,-15deg);opacity:1}100%{-webkit-transform:perspective(400px) rotate3d(0,1,0,90deg);transform:perspective(400px) rotate3d(0,1,0,90deg);opacity:0}}.flipOutY{-webkit-backface-visibility:visible!important;backface-visibility:visible!important;-webkit-animation-name:flipOutY;animation-name:flipOutY}@-webkit-keyframes lightSpeedIn{0%{-webkit-transform:translate3d(100%,0,0) skewX(-30deg);transform:translate3d(100%,0,0) skewX(-30deg);opacity:0}60%{-webkit-transform:skewX(20deg);transform:skewX(20deg);opacity:1}80%{-webkit-transform:skewX(-5deg);transform:skewX(-5deg);opacity:1}100%{-webkit-transform:none;transform:none;opacity:1}}@keyframes lightSpeedIn{0%{-webkit-transform:translate3d(100%,0,0) skewX(-30deg);transform:translate3d(100%,0,0) skewX(-30deg);opacity:0}60%{-webkit-transform:skewX(20deg);transform:skewX(20deg);opacity:1}80%{-webkit-transform:skewX(-5deg);transform:skewX(-5deg);opacity:1}100%{-webkit-transform:none;transform:none;opacity:1}}.lightSpeedIn{-webkit-animation-name:lightSpeedIn;animation-name:lightSpeedIn;-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}@-webkit-keyframes lightSpeedOut{0%{opacity:1}100%{-webkit-transform:translate3d(100%,0,0) skewX(30deg);transform:translate3d(100%,0,0) skewX(30deg);opacity:0}}@keyframes lightSpeedOut{0%{opacity:1}100%{-webkit-transform:translate3d(100%,0,0) skewX(30deg);transform:translate3d(100%,0,0) skewX(30deg);opacity:0}}.lightSpeedOut{-webkit-animation-name:lightSpeedOut;animation-name:lightSpeedOut;-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}@-webkit-keyframes rotateIn{0%{-webkit-transform-origin:center;transform-origin:center;-webkit-transform:rotate3d(0,0,1,-200deg);transform:rotate3d(0,0,1,-200deg);opacity:0}100%{-webkit-transform-origin:center;transform-origin:center;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateIn{0%{-webkit-transform-origin:center;transform-origin:center;-webkit-transform:rotate3d(0,0,1,-200deg);transform:rotate3d(0,0,1,-200deg);opacity:0}100%{-webkit-transform-origin:center;transform-origin:center;-webkit-transform:none;transform:none;opacity:1}}.rotateIn{-webkit-animation-name:rotateIn;animation-name:rotateIn}@-webkit-keyframes rotateInDownLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,-45deg);transform:rotate3d(0,0,1,-45deg);opacity:0}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateInDownLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,-45deg);transform:rotate3d(0,0,1,-45deg);opacity:0}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:none;transform:none;opacity:1}}.rotateInDownLeft{-webkit-animation-name:rotateInDownLeft;animation-name:rotateInDownLeft}@-webkit-keyframes rotateInDownRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,45deg);transform:rotate3d(0,0,1,45deg);opacity:0}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateInDownRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,45deg);transform:rotate3d(0,0,1,45deg);opacity:0}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:none;transform:none;opacity:1}}.rotateInDownRight{-webkit-animation-name:rotateInDownRight;animation-name:rotateInDownRight}@-webkit-keyframes rotateInUpLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,45deg);transform:rotate3d(0,0,1,45deg);opacity:0}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateInUpLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,45deg);transform:rotate3d(0,0,1,45deg);opacity:0}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:none;transform:none;opacity:1}}.rotateInUpLeft{-webkit-animation-name:rotateInUpLeft;animation-name:rotateInUpLeft}@-webkit-keyframes rotateInUpRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,-90deg);transform:rotate3d(0,0,1,-90deg);opacity:0}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateInUpRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,-90deg);transform:rotate3d(0,0,1,-90deg);opacity:0}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:none;transform:none;opacity:1}}.rotateInUpRight{-webkit-animation-name:rotateInUpRight;animation-name:rotateInUpRight}@-webkit-keyframes rotateOut{0%{-webkit-transform-origin:center;transform-origin:center;opacity:1}100%{-webkit-transform-origin:center;transform-origin:center;-webkit-transform:rotate3d(0,0,1,200deg);transform:rotate3d(0,0,1,200deg);opacity:0}}@keyframes rotateOut{0%{-webkit-transform-origin:center;transform-origin:center;opacity:1}100%{-webkit-transform-origin:center;transform-origin:center;-webkit-transform:rotate3d(0,0,1,200deg);transform:rotate3d(0,0,1,200deg);opacity:0}}.rotateOut{-webkit-animation-name:rotateOut;animation-name:rotateOut}@-webkit-keyframes rotateOutDownLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;opacity:1}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,45deg);transform:rotate3d(0,0,1,45deg);opacity:0}}@keyframes rotateOutDownLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;opacity:1}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,45deg);transform:rotate3d(0,0,1,45deg);opacity:0}}.rotateOutDownLeft{-webkit-animation-name:rotateOutDownLeft;animation-name:rotateOutDownLeft}@-webkit-keyframes rotateOutDownRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;opacity:1}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,-45deg);transform:rotate3d(0,0,1,-45deg);opacity:0}}@keyframes rotateOutDownRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;opacity:1}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,-45deg);transform:rotate3d(0,0,1,-45deg);opacity:0}}.rotateOutDownRight{-webkit-animation-name:rotateOutDownRight;animation-name:rotateOutDownRight}@-webkit-keyframes rotateOutUpLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;opacity:1}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,-45deg);transform:rotate3d(0,0,1,-45deg);opacity:0}}@keyframes rotateOutUpLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;opacity:1}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,-45deg);transform:rotate3d(0,0,1,-45deg);opacity:0}}.rotateOutUpLeft{-webkit-animation-name:rotateOutUpLeft;animation-name:rotateOutUpLeft}@-webkit-keyframes rotateOutUpRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;opacity:1}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,90deg);transform:rotate3d(0,0,1,90deg);opacity:0}}@keyframes rotateOutUpRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;opacity:1}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,90deg);transform:rotate3d(0,0,1,90deg);opacity:0}}.rotateOutUpRight{-webkit-animation-name:rotateOutUpRight;animation-name:rotateOutUpRight}@-webkit-keyframes hinge{0%{-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}20%,60%{-webkit-transform:rotate3d(0,0,1,80deg);transform:rotate3d(0,0,1,80deg);-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}40%,80%{-webkit-transform:rotate3d(0,0,1,60deg);transform:rotate3d(0,0,1,60deg);-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out;opacity:1}100%{-webkit-transform:translate3d(0,700px,0);transform:translate3d(0,700px,0);opacity:0}}@keyframes hinge{0%{-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}20%,60%{-webkit-transform:rotate3d(0,0,1,80deg);transform:rotate3d(0,0,1,80deg);-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}40%,80%{-webkit-transform:rotate3d(0,0,1,60deg);transform:rotate3d(0,0,1,60deg);-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out;opacity:1}100%{-webkit-transform:translate3d(0,700px,0);transform:translate3d(0,700px,0);opacity:0}}.hinge{-webkit-animation-name:hinge;animation-name:hinge}@-webkit-keyframes rollIn{0%{opacity:0;-webkit-transform:translate3d(-100%,0,0) rotate3d(0,0,1,-120deg);transform:translate3d(-100%,0,0) rotate3d(0,0,1,-120deg)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes rollIn{0%{opacity:0;-webkit-transform:translate3d(-100%,0,0) rotate3d(0,0,1,-120deg);transform:translate3d(-100%,0,0) rotate3d(0,0,1,-120deg)}100%{opacity:1;-webkit-transform:none;transform:none}}.rollIn{-webkit-animation-name:rollIn;animation-name:rollIn}@-webkit-keyframes rollOut{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(100%,0,0) rotate3d(0,0,1,120deg);transform:translate3d(100%,0,0) rotate3d(0,0,1,120deg)}}@keyframes rollOut{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(100%,0,0) rotate3d(0,0,1,120deg);transform:translate3d(100%,0,0) rotate3d(0,0,1,120deg)}}.rollOut{-webkit-animation-name:rollOut;animation-name:rollOut}@-webkit-keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}@keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}.zoomIn{-webkit-animation-name:zoomIn;animation-name:zoomIn}@-webkit-keyframes zoomInDown{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,-1000px,0);transform:scale3d(.1,.1,.1) translate3d(0,-1000px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,60px,0);transform:scale3d(.475,.475,.475) translate3d(0,60px,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}@keyframes zoomInDown{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,-1000px,0);transform:scale3d(.1,.1,.1) translate3d(0,-1000px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,60px,0);transform:scale3d(.475,.475,.475) translate3d(0,60px,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}.zoomInDown{-webkit-animation-name:zoomInDown;animation-name:zoomInDown}@-webkit-keyframes zoomInLeft{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(-1000px,0,0);transform:scale3d(.1,.1,.1) translate3d(-1000px,0,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(10px,0,0);transform:scale3d(.475,.475,.475) translate3d(10px,0,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}@keyframes zoomInLeft{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(-1000px,0,0);transform:scale3d(.1,.1,.1) translate3d(-1000px,0,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(10px,0,0);transform:scale3d(.475,.475,.475) translate3d(10px,0,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}.zoomInLeft{-webkit-animation-name:zoomInLeft;animation-name:zoomInLeft}@-webkit-keyframes zoomInRight{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(1000px,0,0);transform:scale3d(.1,.1,.1) translate3d(1000px,0,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(-10px,0,0);transform:scale3d(.475,.475,.475) translate3d(-10px,0,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}@keyframes zoomInRight{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(1000px,0,0);transform:scale3d(.1,.1,.1) translate3d(1000px,0,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(-10px,0,0);transform:scale3d(.475,.475,.475) translate3d(-10px,0,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}.zoomInRight{-webkit-animation-name:zoomInRight;animation-name:zoomInRight}@-webkit-keyframes zoomInUp{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,1000px,0);transform:scale3d(.1,.1,.1) translate3d(0,1000px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}@keyframes zoomInUp{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,1000px,0);transform:scale3d(.1,.1,.1) translate3d(0,1000px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}.zoomInUp{-webkit-animation-name:zoomInUp;animation-name:zoomInUp}@-webkit-keyframes zoomOut{0%{opacity:1}50%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}100%{opacity:0}}@keyframes zoomOut{0%{opacity:1}50%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}100%{opacity:0}}.zoomOut{-webkit-animation-name:zoomOut;animation-name:zoomOut}@-webkit-keyframes zoomOutDown{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}100%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,2000px,0);transform:scale3d(.1,.1,.1) translate3d(0,2000px,0);-webkit-transform-origin:center bottom;transform-origin:center bottom;-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}@keyframes zoomOutDown{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}100%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,2000px,0);transform:scale3d(.1,.1,.1) translate3d(0,2000px,0);-webkit-transform-origin:center bottom;transform-origin:center bottom;-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}.zoomOutDown{-webkit-animation-name:zoomOutDown;animation-name:zoomOutDown}@-webkit-keyframes zoomOutLeft{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(42px,0,0);transform:scale3d(.475,.475,.475) translate3d(42px,0,0)}100%{opacity:0;-webkit-transform:scale(.1) translate3d(-2000px,0,0);transform:scale(.1) translate3d(-2000px,0,0);-webkit-transform-origin:left center;transform-origin:left center}}@keyframes zoomOutLeft{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(42px,0,0);transform:scale3d(.475,.475,.475) translate3d(42px,0,0)}100%{opacity:0;-webkit-transform:scale(.1) translate3d(-2000px,0,0);transform:scale(.1) translate3d(-2000px,0,0);-webkit-transform-origin:left center;transform-origin:left center}}.zoomOutLeft{-webkit-animation-name:zoomOutLeft;animation-name:zoomOutLeft}@-webkit-keyframes zoomOutRight{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(-42px,0,0);transform:scale3d(.475,.475,.475) translate3d(-42px,0,0)}100%{opacity:0;-webkit-transform:scale(.1) translate3d(2000px,0,0);transform:scale(.1) translate3d(2000px,0,0);-webkit-transform-origin:right center;transform-origin:right center}}@keyframes zoomOutRight{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(-42px,0,0);transform:scale3d(.475,.475,.475) translate3d(-42px,0,0)}100%{opacity:0;-webkit-transform:scale(.1) translate3d(2000px,0,0);transform:scale(.1) translate3d(2000px,0,0);-webkit-transform-origin:right center;transform-origin:right center}}.zoomOutRight{-webkit-animation-name:zoomOutRight;animation-name:zoomOutRight}@-webkit-keyframes zoomOutUp{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,60px,0);transform:scale3d(.475,.475,.475) translate3d(0,60px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}100%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,-2000px,0);transform:scale3d(.1,.1,.1) translate3d(0,-2000px,0);-webkit-transform-origin:center bottom;transform-origin:center bottom;-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}@keyframes zoomOutUp{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,60px,0);transform:scale3d(.475,.475,.475) translate3d(0,60px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}100%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,-2000px,0);transform:scale3d(.1,.1,.1) translate3d(0,-2000px,0);-webkit-transform-origin:center bottom;transform-origin:center bottom;-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}.zoomOutUp{-webkit-animation-name:zoomOutUp;animation-name:zoomOutUp}@-webkit-keyframes slideInDown{0%{-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}@keyframes slideInDown{0%{-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.slideInDown{-webkit-animation-name:slideInDown;animation-name:slideInDown}@-webkit-keyframes slideInLeft{0%{-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}@keyframes slideInLeft{0%{-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.slideInLeft{-webkit-animation-name:slideInLeft;animation-name:slideInLeft}@-webkit-keyframes slideInRight{0%{-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}@keyframes slideInRight{0%{-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.slideInRight{-webkit-animation-name:slideInRight;animation-name:slideInRight}@-webkit-keyframes slideInUp{0%{-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}@keyframes slideInUp{0%{-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.slideInUp{-webkit-animation-name:slideInUp;animation-name:slideInUp}@-webkit-keyframes slideOutDown{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}}@keyframes slideOutDown{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}}.slideOutDown{-webkit-animation-name:slideOutDown;animation-name:slideOutDown}@-webkit-keyframes slideOutLeft{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}@keyframes slideOutLeft{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}.slideOutLeft{-webkit-animation-name:slideOutLeft;animation-name:slideOutLeft}@-webkit-keyframes slideOutRight{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}}@keyframes slideOutRight{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}}.slideOutRight{-webkit-animation-name:slideOutRight;animation-name:slideOutRight}@-webkit-keyframes slideOutUp{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}}@keyframes slideOutUp{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}}.slideOutUp{-webkit-animation-name:slideOutUp;animation-name:slideOutUp} \ No newline at end of file +*/.animated{-webkit-animation-duration:1s;animation-duration:1s;-webkit-animation-fill-mode:both;animation-fill-mode:both}.animated.infinite{-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite}.animated.hinge{-webkit-animation-duration:2s;animation-duration:2s}.animated.bounceIn,.animated.bounceOut,.animated.flipOutX,.animated.flipOutY{-webkit-animation-duration:.75s;animation-duration:.75s}@-webkit-keyframes bounce{0%,100%,20%,53%,80%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1);-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}40%,43%{-webkit-transition-timing-function:cubic-bezier(0.755,.050,.855,.060);transition-timing-function:cubic-bezier(0.755,.050,.855,.060);-webkit-transform:translate3d(0,-30px,0);transform:translate3d(0,-30px,0)}70%{-webkit-transition-timing-function:cubic-bezier(0.755,.050,.855,.060);transition-timing-function:cubic-bezier(0.755,.050,.855,.060);-webkit-transform:translate3d(0,-15px,0);transform:translate3d(0,-15px,0)}90%{-webkit-transform:translate3d(0,-4px,0);transform:translate3d(0,-4px,0)}}@keyframes bounce{0%,100%,20%,53%,80%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1);-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}40%,43%{-webkit-transition-timing-function:cubic-bezier(0.755,.050,.855,.060);transition-timing-function:cubic-bezier(0.755,.050,.855,.060);-webkit-transform:translate3d(0,-30px,0);transform:translate3d(0,-30px,0)}70%{-webkit-transition-timing-function:cubic-bezier(0.755,.050,.855,.060);transition-timing-function:cubic-bezier(0.755,.050,.855,.060);-webkit-transform:translate3d(0,-15px,0);transform:translate3d(0,-15px,0)}90%{-webkit-transform:translate3d(0,-4px,0);transform:translate3d(0,-4px,0)}}.bounce{-webkit-animation-name:bounce;animation-name:bounce;-webkit-transform-origin:center bottom;transform-origin:center bottom}@-webkit-keyframes flash{0%,100%,50%{opacity:1}25%,75%{opacity:0}}@keyframes flash{0%,100%,50%{opacity:1}25%,75%{opacity:0}}.flash{-webkit-animation-name:flash;animation-name:flash}@-webkit-keyframes pulse{0%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}50%{-webkit-transform:scale3d(1.05,1.05,1.05);transform:scale3d(1.05,1.05,1.05)}100%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}@keyframes pulse{0%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}50%{-webkit-transform:scale3d(1.05,1.05,1.05);transform:scale3d(1.05,1.05,1.05)}100%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}.pulse{-webkit-animation-name:pulse;animation-name:pulse}@-webkit-keyframes rubberBand{0%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}30%{-webkit-transform:scale3d(1.25,.75,1);transform:scale3d(1.25,.75,1)}40%{-webkit-transform:scale3d(0.75,1.25,1);transform:scale3d(0.75,1.25,1)}50%{-webkit-transform:scale3d(1.15,.85,1);transform:scale3d(1.15,.85,1)}65%{-webkit-transform:scale3d(.95,1.05,1);transform:scale3d(.95,1.05,1)}75%{-webkit-transform:scale3d(1.05,.95,1);transform:scale3d(1.05,.95,1)}100%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}@keyframes rubberBand{0%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}30%{-webkit-transform:scale3d(1.25,.75,1);transform:scale3d(1.25,.75,1)}40%{-webkit-transform:scale3d(0.75,1.25,1);transform:scale3d(0.75,1.25,1)}50%{-webkit-transform:scale3d(1.15,.85,1);transform:scale3d(1.15,.85,1)}65%{-webkit-transform:scale3d(.95,1.05,1);transform:scale3d(.95,1.05,1)}75%{-webkit-transform:scale3d(1.05,.95,1);transform:scale3d(1.05,.95,1)}100%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}.rubberBand{-webkit-animation-name:rubberBand;animation-name:rubberBand}@-webkit-keyframes shake{0%,100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}10%,30%,50%,70%,90%{-webkit-transform:translate3d(-10px,0,0);transform:translate3d(-10px,0,0)}20%,40%,60%,80%{-webkit-transform:translate3d(10px,0,0);transform:translate3d(10px,0,0)}}@keyframes shake{0%,100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}10%,30%,50%,70%,90%{-webkit-transform:translate3d(-10px,0,0);transform:translate3d(-10px,0,0)}20%,40%,60%,80%{-webkit-transform:translate3d(10px,0,0);transform:translate3d(10px,0,0)}}.shake{-webkit-animation-name:shake;animation-name:shake}@-webkit-keyframes swing{20%{-webkit-transform:rotate3d(0,0,1,15deg);transform:rotate3d(0,0,1,15deg)}40%{-webkit-transform:rotate3d(0,0,1,-10deg);transform:rotate3d(0,0,1,-10deg)}60%{-webkit-transform:rotate3d(0,0,1,5deg);transform:rotate3d(0,0,1,5deg)}80%{-webkit-transform:rotate3d(0,0,1,-5deg);transform:rotate3d(0,0,1,-5deg)}100%{-webkit-transform:rotate3d(0,0,1,0deg);transform:rotate3d(0,0,1,0deg)}}@keyframes swing{20%{-webkit-transform:rotate3d(0,0,1,15deg);transform:rotate3d(0,0,1,15deg)}40%{-webkit-transform:rotate3d(0,0,1,-10deg);transform:rotate3d(0,0,1,-10deg)}60%{-webkit-transform:rotate3d(0,0,1,5deg);transform:rotate3d(0,0,1,5deg)}80%{-webkit-transform:rotate3d(0,0,1,-5deg);transform:rotate3d(0,0,1,-5deg)}100%{-webkit-transform:rotate3d(0,0,1,0deg);transform:rotate3d(0,0,1,0deg)}}.swing{-webkit-transform-origin:top center;transform-origin:top center;-webkit-animation-name:swing;animation-name:swing}@-webkit-keyframes tada{0%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}10%,20%{-webkit-transform:scale3d(.9,.9,.9) rotate3d(0,0,1,-3deg);transform:scale3d(.9,.9,.9) rotate3d(0,0,1,-3deg)}30%,50%,70%,90%{-webkit-transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,3deg);transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,3deg)}40%,60%,80%{-webkit-transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,-3deg);transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,-3deg)}100%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}@keyframes tada{0%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}10%,20%{-webkit-transform:scale3d(.9,.9,.9) rotate3d(0,0,1,-3deg);transform:scale3d(.9,.9,.9) rotate3d(0,0,1,-3deg)}30%,50%,70%,90%{-webkit-transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,3deg);transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,3deg)}40%,60%,80%{-webkit-transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,-3deg);transform:scale3d(1.1,1.1,1.1) rotate3d(0,0,1,-3deg)}100%{-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}.tada{-webkit-animation-name:tada;animation-name:tada}@-webkit-keyframes wobble{0%{-webkit-transform:none;transform:none}15%{-webkit-transform:translate3d(-25%,0,0) rotate3d(0,0,1,-5deg);transform:translate3d(-25%,0,0) rotate3d(0,0,1,-5deg)}30%{-webkit-transform:translate3d(20%,0,0) rotate3d(0,0,1,3deg);transform:translate3d(20%,0,0) rotate3d(0,0,1,3deg)}45%{-webkit-transform:translate3d(-15%,0,0) rotate3d(0,0,1,-3deg);transform:translate3d(-15%,0,0) rotate3d(0,0,1,-3deg)}60%{-webkit-transform:translate3d(10%,0,0) rotate3d(0,0,1,2deg);transform:translate3d(10%,0,0) rotate3d(0,0,1,2deg)}75%{-webkit-transform:translate3d(-5%,0,0) rotate3d(0,0,1,-1deg);transform:translate3d(-5%,0,0) rotate3d(0,0,1,-1deg)}100%{-webkit-transform:none;transform:none}}@keyframes wobble{0%{-webkit-transform:none;transform:none}15%{-webkit-transform:translate3d(-25%,0,0) rotate3d(0,0,1,-5deg);transform:translate3d(-25%,0,0) rotate3d(0,0,1,-5deg)}30%{-webkit-transform:translate3d(20%,0,0) rotate3d(0,0,1,3deg);transform:translate3d(20%,0,0) rotate3d(0,0,1,3deg)}45%{-webkit-transform:translate3d(-15%,0,0) rotate3d(0,0,1,-3deg);transform:translate3d(-15%,0,0) rotate3d(0,0,1,-3deg)}60%{-webkit-transform:translate3d(10%,0,0) rotate3d(0,0,1,2deg);transform:translate3d(10%,0,0) rotate3d(0,0,1,2deg)}75%{-webkit-transform:translate3d(-5%,0,0) rotate3d(0,0,1,-1deg);transform:translate3d(-5%,0,0) rotate3d(0,0,1,-1deg)}100%{-webkit-transform:none;transform:none}}.wobble{-webkit-animation-name:wobble;animation-name:wobble}@-webkit-keyframes bounceIn{0%,100%,20%,40%,60%,80%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}20%{-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}40%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}60%{opacity:1;-webkit-transform:scale3d(1.03,1.03,1.03);transform:scale3d(1.03,1.03,1.03)}80%{-webkit-transform:scale3d(.97,.97,.97);transform:scale3d(.97,.97,.97)}100%{opacity:1;-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}@keyframes bounceIn{0%,100%,20%,40%,60%,80%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}20%{-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}40%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}60%{opacity:1;-webkit-transform:scale3d(1.03,1.03,1.03);transform:scale3d(1.03,1.03,1.03)}80%{-webkit-transform:scale3d(.97,.97,.97);transform:scale3d(.97,.97,.97)}100%{opacity:1;-webkit-transform:scale3d(1,1,1);transform:scale3d(1,1,1)}}.bounceIn{-webkit-animation-name:bounceIn;animation-name:bounceIn}@-webkit-keyframes bounceInDown{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(0,-3000px,0);transform:translate3d(0,-3000px,0)}60%{opacity:1;-webkit-transform:translate3d(0,25px,0);transform:translate3d(0,25px,0)}75%{-webkit-transform:translate3d(0,-10px,0);transform:translate3d(0,-10px,0)}90%{-webkit-transform:translate3d(0,5px,0);transform:translate3d(0,5px,0)}100%{-webkit-transform:none;transform:none}}@keyframes bounceInDown{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(0,-3000px,0);transform:translate3d(0,-3000px,0)}60%{opacity:1;-webkit-transform:translate3d(0,25px,0);transform:translate3d(0,25px,0)}75%{-webkit-transform:translate3d(0,-10px,0);transform:translate3d(0,-10px,0)}90%{-webkit-transform:translate3d(0,5px,0);transform:translate3d(0,5px,0)}100%{-webkit-transform:none;transform:none}}.bounceInDown{-webkit-animation-name:bounceInDown;animation-name:bounceInDown}@-webkit-keyframes bounceInLeft{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(-3000px,0,0);transform:translate3d(-3000px,0,0)}60%{opacity:1;-webkit-transform:translate3d(25px,0,0);transform:translate3d(25px,0,0)}75%{-webkit-transform:translate3d(-10px,0,0);transform:translate3d(-10px,0,0)}90%{-webkit-transform:translate3d(5px,0,0);transform:translate3d(5px,0,0)}100%{-webkit-transform:none;transform:none}}@keyframes bounceInLeft{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(-3000px,0,0);transform:translate3d(-3000px,0,0)}60%{opacity:1;-webkit-transform:translate3d(25px,0,0);transform:translate3d(25px,0,0)}75%{-webkit-transform:translate3d(-10px,0,0);transform:translate3d(-10px,0,0)}90%{-webkit-transform:translate3d(5px,0,0);transform:translate3d(5px,0,0)}100%{-webkit-transform:none;transform:none}}.bounceInLeft{-webkit-animation-name:bounceInLeft;animation-name:bounceInLeft}@-webkit-keyframes bounceInRight{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(3000px,0,0);transform:translate3d(3000px,0,0)}60%{opacity:1;-webkit-transform:translate3d(-25px,0,0);transform:translate3d(-25px,0,0)}75%{-webkit-transform:translate3d(10px,0,0);transform:translate3d(10px,0,0)}90%{-webkit-transform:translate3d(-5px,0,0);transform:translate3d(-5px,0,0)}100%{-webkit-transform:none;transform:none}}@keyframes bounceInRight{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(3000px,0,0);transform:translate3d(3000px,0,0)}60%{opacity:1;-webkit-transform:translate3d(-25px,0,0);transform:translate3d(-25px,0,0)}75%{-webkit-transform:translate3d(10px,0,0);transform:translate3d(10px,0,0)}90%{-webkit-transform:translate3d(-5px,0,0);transform:translate3d(-5px,0,0)}100%{-webkit-transform:none;transform:none}}.bounceInRight{-webkit-animation-name:bounceInRight;animation-name:bounceInRight}@-webkit-keyframes bounceInUp{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(0,3000px,0);transform:translate3d(0,3000px,0)}60%{opacity:1;-webkit-transform:translate3d(0,-20px,0);transform:translate3d(0,-20px,0)}75%{-webkit-transform:translate3d(0,10px,0);transform:translate3d(0,10px,0)}90%{-webkit-transform:translate3d(0,-5px,0);transform:translate3d(0,-5px,0)}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}@keyframes bounceInUp{0%,100%,60%,75%,90%{-webkit-transition-timing-function:cubic-bezier(0.215,.61,.355,1);transition-timing-function:cubic-bezier(0.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(0,3000px,0);transform:translate3d(0,3000px,0)}60%{opacity:1;-webkit-transform:translate3d(0,-20px,0);transform:translate3d(0,-20px,0)}75%{-webkit-transform:translate3d(0,10px,0);transform:translate3d(0,10px,0)}90%{-webkit-transform:translate3d(0,-5px,0);transform:translate3d(0,-5px,0)}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.bounceInUp{-webkit-animation-name:bounceInUp;animation-name:bounceInUp}@-webkit-keyframes bounceOut{20%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}50%,55%{opacity:1;-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}100%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}}@keyframes bounceOut{20%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}50%,55%{opacity:1;-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}100%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}}.bounceOut{-webkit-animation-name:bounceOut;animation-name:bounceOut}@-webkit-keyframes bounceOutDown{20%{-webkit-transform:translate3d(0,10px,0);transform:translate3d(0,10px,0)}40%,45%{opacity:1;-webkit-transform:translate3d(0,-20px,0);transform:translate3d(0,-20px,0)}100%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}}@keyframes bounceOutDown{20%{-webkit-transform:translate3d(0,10px,0);transform:translate3d(0,10px,0)}40%,45%{opacity:1;-webkit-transform:translate3d(0,-20px,0);transform:translate3d(0,-20px,0)}100%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}}.bounceOutDown{-webkit-animation-name:bounceOutDown;animation-name:bounceOutDown}@-webkit-keyframes bounceOutLeft{20%{opacity:1;-webkit-transform:translate3d(20px,0,0);transform:translate3d(20px,0,0)}100%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}}@keyframes bounceOutLeft{20%{opacity:1;-webkit-transform:translate3d(20px,0,0);transform:translate3d(20px,0,0)}100%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}}.bounceOutLeft{-webkit-animation-name:bounceOutLeft;animation-name:bounceOutLeft}@-webkit-keyframes bounceOutRight{20%{opacity:1;-webkit-transform:translate3d(-20px,0,0);transform:translate3d(-20px,0,0)}100%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}}@keyframes bounceOutRight{20%{opacity:1;-webkit-transform:translate3d(-20px,0,0);transform:translate3d(-20px,0,0)}100%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}}.bounceOutRight{-webkit-animation-name:bounceOutRight;animation-name:bounceOutRight}@-webkit-keyframes bounceOutUp{20%{-webkit-transform:translate3d(0,-10px,0);transform:translate3d(0,-10px,0)}40%,45%{opacity:1;-webkit-transform:translate3d(0,20px,0);transform:translate3d(0,20px,0)}100%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}}@keyframes bounceOutUp{20%{-webkit-transform:translate3d(0,-10px,0);transform:translate3d(0,-10px,0)}40%,45%{opacity:1;-webkit-transform:translate3d(0,20px,0);transform:translate3d(0,20px,0)}100%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}}.bounceOutUp{-webkit-animation-name:bounceOutUp;animation-name:bounceOutUp}@-webkit-keyframes fadeIn{0%{opacity:0}100%{opacity:1}}@keyframes fadeIn{0%{opacity:0}100%{opacity:1}}.fadeIn{-webkit-animation-name:fadeIn;animation-name:fadeIn}@-webkit-keyframes fadeInDown{0%{opacity:0;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInDown{0%{opacity:0;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInDown{-webkit-animation-name:fadeInDown;animation-name:fadeInDown}@-webkit-keyframes fadeInDownBig{0%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInDownBig{0%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInDownBig{-webkit-animation-name:fadeInDownBig;animation-name:fadeInDownBig}@-webkit-keyframes fadeInLeft{0%{opacity:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInLeft{0%{opacity:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInLeft{-webkit-animation-name:fadeInLeft;animation-name:fadeInLeft}@-webkit-keyframes fadeInLeftBig{0%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInLeftBig{0%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInLeftBig{-webkit-animation-name:fadeInLeftBig;animation-name:fadeInLeftBig}@-webkit-keyframes fadeInRight{0%{opacity:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInRight{0%{opacity:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInRight{-webkit-animation-name:fadeInRight;animation-name:fadeInRight}@-webkit-keyframes fadeInRightBig{0%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInRightBig{0%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInRightBig{-webkit-animation-name:fadeInRightBig;animation-name:fadeInRightBig}@-webkit-keyframes fadeInUp{0%{opacity:0;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInUp{0%{opacity:0;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInUp{-webkit-animation-name:fadeInUp;animation-name:fadeInUp}@-webkit-keyframes fadeInUpBig{0%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInUpBig{0%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}100%{opacity:1;-webkit-transform:none;transform:none}}.fadeInUpBig{-webkit-animation-name:fadeInUpBig;animation-name:fadeInUpBig}@-webkit-keyframes fadeOut{0%{opacity:1}100%{opacity:0}}@keyframes fadeOut{0%{opacity:1}100%{opacity:0}}.fadeOut{-webkit-animation-name:fadeOut;animation-name:fadeOut}@-webkit-keyframes fadeOutDown{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}}@keyframes fadeOutDown{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}}.fadeOutDown{-webkit-animation-name:fadeOutDown;animation-name:fadeOutDown}@-webkit-keyframes fadeOutDownBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}}@keyframes fadeOutDownBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}}.fadeOutDownBig{-webkit-animation-name:fadeOutDownBig;animation-name:fadeOutDownBig}@-webkit-keyframes fadeOutLeft{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}@keyframes fadeOutLeft{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}.fadeOutLeft{-webkit-animation-name:fadeOutLeft;animation-name:fadeOutLeft}@-webkit-keyframes fadeOutLeftBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}}@keyframes fadeOutLeftBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}}.fadeOutLeftBig{-webkit-animation-name:fadeOutLeftBig;animation-name:fadeOutLeftBig}@-webkit-keyframes fadeOutRight{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}}@keyframes fadeOutRight{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}}.fadeOutRight{-webkit-animation-name:fadeOutRight;animation-name:fadeOutRight}@-webkit-keyframes fadeOutRightBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}}@keyframes fadeOutRightBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}}.fadeOutRightBig{-webkit-animation-name:fadeOutRightBig;animation-name:fadeOutRightBig}@-webkit-keyframes fadeOutUp{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}}@keyframes fadeOutUp{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}}.fadeOutUp{-webkit-animation-name:fadeOutUp;animation-name:fadeOutUp}@-webkit-keyframes fadeOutUpBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}}@keyframes fadeOutUpBig{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}}.fadeOutUpBig{-webkit-animation-name:fadeOutUpBig;animation-name:fadeOutUpBig}@-webkit-keyframes flip{0%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-360deg);transform:perspective(400px) rotate3d(0,1,0,-360deg);-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}40%{-webkit-transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-190deg);transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-190deg);-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}50%{-webkit-transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-170deg);transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-170deg);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}80%{-webkit-transform:perspective(400px) scale3d(.95,.95,.95);transform:perspective(400px) scale3d(.95,.95,.95);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}100%{-webkit-transform:perspective(400px);transform:perspective(400px);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}}@keyframes flip{0%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-360deg);transform:perspective(400px) rotate3d(0,1,0,-360deg);-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}40%{-webkit-transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-190deg);transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-190deg);-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}50%{-webkit-transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-170deg);transform:perspective(400px) translate3d(0,0,150px) rotate3d(0,1,0,-170deg);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}80%{-webkit-transform:perspective(400px) scale3d(.95,.95,.95);transform:perspective(400px) scale3d(.95,.95,.95);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}100%{-webkit-transform:perspective(400px);transform:perspective(400px);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}}.animated.flip{-webkit-backface-visibility:visible;backface-visibility:visible;-webkit-animation-name:flip;animation-name:flip}@-webkit-keyframes flipInX{0%{-webkit-transform:perspective(400px) rotate3d(1,0,0,90deg);transform:perspective(400px) rotate3d(1,0,0,90deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in;opacity:0}40%{-webkit-transform:perspective(400px) rotate3d(1,0,0,-20deg);transform:perspective(400px) rotate3d(1,0,0,-20deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in}60%{-webkit-transform:perspective(400px) rotate3d(1,0,0,10deg);transform:perspective(400px) rotate3d(1,0,0,10deg);opacity:1}80%{-webkit-transform:perspective(400px) rotate3d(1,0,0,-5deg);transform:perspective(400px) rotate3d(1,0,0,-5deg)}100%{-webkit-transform:perspective(400px);transform:perspective(400px)}}@keyframes flipInX{0%{-webkit-transform:perspective(400px) rotate3d(1,0,0,90deg);transform:perspective(400px) rotate3d(1,0,0,90deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in;opacity:0}40%{-webkit-transform:perspective(400px) rotate3d(1,0,0,-20deg);transform:perspective(400px) rotate3d(1,0,0,-20deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in}60%{-webkit-transform:perspective(400px) rotate3d(1,0,0,10deg);transform:perspective(400px) rotate3d(1,0,0,10deg);opacity:1}80%{-webkit-transform:perspective(400px) rotate3d(1,0,0,-5deg);transform:perspective(400px) rotate3d(1,0,0,-5deg)}100%{-webkit-transform:perspective(400px);transform:perspective(400px)}}.flipInX{-webkit-backface-visibility:visible!important;backface-visibility:visible!important;-webkit-animation-name:flipInX;animation-name:flipInX}@-webkit-keyframes flipInY{0%{-webkit-transform:perspective(400px) rotate3d(0,1,0,90deg);transform:perspective(400px) rotate3d(0,1,0,90deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in;opacity:0}40%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-20deg);transform:perspective(400px) rotate3d(0,1,0,-20deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in}60%{-webkit-transform:perspective(400px) rotate3d(0,1,0,10deg);transform:perspective(400px) rotate3d(0,1,0,10deg);opacity:1}80%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-5deg);transform:perspective(400px) rotate3d(0,1,0,-5deg)}100%{-webkit-transform:perspective(400px);transform:perspective(400px)}}@keyframes flipInY{0%{-webkit-transform:perspective(400px) rotate3d(0,1,0,90deg);transform:perspective(400px) rotate3d(0,1,0,90deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in;opacity:0}40%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-20deg);transform:perspective(400px) rotate3d(0,1,0,-20deg);-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in}60%{-webkit-transform:perspective(400px) rotate3d(0,1,0,10deg);transform:perspective(400px) rotate3d(0,1,0,10deg);opacity:1}80%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-5deg);transform:perspective(400px) rotate3d(0,1,0,-5deg)}100%{-webkit-transform:perspective(400px);transform:perspective(400px)}}.flipInY{-webkit-backface-visibility:visible!important;backface-visibility:visible!important;-webkit-animation-name:flipInY;animation-name:flipInY}@-webkit-keyframes flipOutX{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{-webkit-transform:perspective(400px) rotate3d(1,0,0,-20deg);transform:perspective(400px) rotate3d(1,0,0,-20deg);opacity:1}100%{-webkit-transform:perspective(400px) rotate3d(1,0,0,90deg);transform:perspective(400px) rotate3d(1,0,0,90deg);opacity:0}}@keyframes flipOutX{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{-webkit-transform:perspective(400px) rotate3d(1,0,0,-20deg);transform:perspective(400px) rotate3d(1,0,0,-20deg);opacity:1}100%{-webkit-transform:perspective(400px) rotate3d(1,0,0,90deg);transform:perspective(400px) rotate3d(1,0,0,90deg);opacity:0}}.flipOutX{-webkit-animation-name:flipOutX;animation-name:flipOutX;-webkit-backface-visibility:visible!important;backface-visibility:visible!important}@-webkit-keyframes flipOutY{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-15deg);transform:perspective(400px) rotate3d(0,1,0,-15deg);opacity:1}100%{-webkit-transform:perspective(400px) rotate3d(0,1,0,90deg);transform:perspective(400px) rotate3d(0,1,0,90deg);opacity:0}}@keyframes flipOutY{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{-webkit-transform:perspective(400px) rotate3d(0,1,0,-15deg);transform:perspective(400px) rotate3d(0,1,0,-15deg);opacity:1}100%{-webkit-transform:perspective(400px) rotate3d(0,1,0,90deg);transform:perspective(400px) rotate3d(0,1,0,90deg);opacity:0}}.flipOutY{-webkit-backface-visibility:visible!important;backface-visibility:visible!important;-webkit-animation-name:flipOutY;animation-name:flipOutY}@-webkit-keyframes lightSpeedIn{0%{-webkit-transform:translate3d(100%,0,0) skewX(-30deg);transform:translate3d(100%,0,0) skewX(-30deg);opacity:0}60%{-webkit-transform:skewX(20deg);transform:skewX(20deg);opacity:1}80%{-webkit-transform:skewX(-5deg);transform:skewX(-5deg);opacity:1}100%{-webkit-transform:none;transform:none;opacity:1}}@keyframes lightSpeedIn{0%{-webkit-transform:translate3d(100%,0,0) skewX(-30deg);transform:translate3d(100%,0,0) skewX(-30deg);opacity:0}60%{-webkit-transform:skewX(20deg);transform:skewX(20deg);opacity:1}80%{-webkit-transform:skewX(-5deg);transform:skewX(-5deg);opacity:1}100%{-webkit-transform:none;transform:none;opacity:1}}.lightSpeedIn{-webkit-animation-name:lightSpeedIn;animation-name:lightSpeedIn;-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}@-webkit-keyframes lightSpeedOut{0%{opacity:1}100%{-webkit-transform:translate3d(100%,0,0) skewX(30deg);transform:translate3d(100%,0,0) skewX(30deg);opacity:0}}@keyframes lightSpeedOut{0%{opacity:1}100%{-webkit-transform:translate3d(100%,0,0) skewX(30deg);transform:translate3d(100%,0,0) skewX(30deg);opacity:0}}.lightSpeedOut{-webkit-animation-name:lightSpeedOut;animation-name:lightSpeedOut;-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}@-webkit-keyframes rotateIn{0%{-webkit-transform-origin:center;transform-origin:center;-webkit-transform:rotate3d(0,0,1,-200deg);transform:rotate3d(0,0,1,-200deg);opacity:0}100%{-webkit-transform-origin:center;transform-origin:center;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateIn{0%{-webkit-transform-origin:center;transform-origin:center;-webkit-transform:rotate3d(0,0,1,-200deg);transform:rotate3d(0,0,1,-200deg);opacity:0}100%{-webkit-transform-origin:center;transform-origin:center;-webkit-transform:none;transform:none;opacity:1}}.rotateIn{-webkit-animation-name:rotateIn;animation-name:rotateIn}@-webkit-keyframes rotateInDownLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,-45deg);transform:rotate3d(0,0,1,-45deg);opacity:0}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateInDownLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,-45deg);transform:rotate3d(0,0,1,-45deg);opacity:0}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:none;transform:none;opacity:1}}.rotateInDownLeft{-webkit-animation-name:rotateInDownLeft;animation-name:rotateInDownLeft}@-webkit-keyframes rotateInDownRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,45deg);transform:rotate3d(0,0,1,45deg);opacity:0}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateInDownRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,45deg);transform:rotate3d(0,0,1,45deg);opacity:0}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:none;transform:none;opacity:1}}.rotateInDownRight{-webkit-animation-name:rotateInDownRight;animation-name:rotateInDownRight}@-webkit-keyframes rotateInUpLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,45deg);transform:rotate3d(0,0,1,45deg);opacity:0}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateInUpLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,45deg);transform:rotate3d(0,0,1,45deg);opacity:0}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:none;transform:none;opacity:1}}.rotateInUpLeft{-webkit-animation-name:rotateInUpLeft;animation-name:rotateInUpLeft}@-webkit-keyframes rotateInUpRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,-90deg);transform:rotate3d(0,0,1,-90deg);opacity:0}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateInUpRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,-90deg);transform:rotate3d(0,0,1,-90deg);opacity:0}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:none;transform:none;opacity:1}}.rotateInUpRight{-webkit-animation-name:rotateInUpRight;animation-name:rotateInUpRight}@-webkit-keyframes rotateOut{0%{-webkit-transform-origin:center;transform-origin:center;opacity:1}100%{-webkit-transform-origin:center;transform-origin:center;-webkit-transform:rotate3d(0,0,1,200deg);transform:rotate3d(0,0,1,200deg);opacity:0}}@keyframes rotateOut{0%{-webkit-transform-origin:center;transform-origin:center;opacity:1}100%{-webkit-transform-origin:center;transform-origin:center;-webkit-transform:rotate3d(0,0,1,200deg);transform:rotate3d(0,0,1,200deg);opacity:0}}.rotateOut{-webkit-animation-name:rotateOut;animation-name:rotateOut}@-webkit-keyframes rotateOutDownLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;opacity:1}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,45deg);transform:rotate3d(0,0,1,45deg);opacity:0}}@keyframes rotateOutDownLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;opacity:1}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,45deg);transform:rotate3d(0,0,1,45deg);opacity:0}}.rotateOutDownLeft{-webkit-animation-name:rotateOutDownLeft;animation-name:rotateOutDownLeft}@-webkit-keyframes rotateOutDownRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;opacity:1}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,-45deg);transform:rotate3d(0,0,1,-45deg);opacity:0}}@keyframes rotateOutDownRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;opacity:1}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,-45deg);transform:rotate3d(0,0,1,-45deg);opacity:0}}.rotateOutDownRight{-webkit-animation-name:rotateOutDownRight;animation-name:rotateOutDownRight}@-webkit-keyframes rotateOutUpLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;opacity:1}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,-45deg);transform:rotate3d(0,0,1,-45deg);opacity:0}}@keyframes rotateOutUpLeft{0%{-webkit-transform-origin:left bottom;transform-origin:left bottom;opacity:1}100%{-webkit-transform-origin:left bottom;transform-origin:left bottom;-webkit-transform:rotate3d(0,0,1,-45deg);transform:rotate3d(0,0,1,-45deg);opacity:0}}.rotateOutUpLeft{-webkit-animation-name:rotateOutUpLeft;animation-name:rotateOutUpLeft}@-webkit-keyframes rotateOutUpRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;opacity:1}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,90deg);transform:rotate3d(0,0,1,90deg);opacity:0}}@keyframes rotateOutUpRight{0%{-webkit-transform-origin:right bottom;transform-origin:right bottom;opacity:1}100%{-webkit-transform-origin:right bottom;transform-origin:right bottom;-webkit-transform:rotate3d(0,0,1,90deg);transform:rotate3d(0,0,1,90deg);opacity:0}}.rotateOutUpRight{-webkit-animation-name:rotateOutUpRight;animation-name:rotateOutUpRight}@-webkit-keyframes hinge{0%{-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}20%,60%{-webkit-transform:rotate3d(0,0,1,80deg);transform:rotate3d(0,0,1,80deg);-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}40%,80%{-webkit-transform:rotate3d(0,0,1,60deg);transform:rotate3d(0,0,1,60deg);-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out;opacity:1}100%{-webkit-transform:translate3d(0,700px,0);transform:translate3d(0,700px,0);opacity:0}}@keyframes hinge{0%{-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}20%,60%{-webkit-transform:rotate3d(0,0,1,80deg);transform:rotate3d(0,0,1,80deg);-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}40%,80%{-webkit-transform:rotate3d(0,0,1,60deg);transform:rotate3d(0,0,1,60deg);-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out;opacity:1}100%{-webkit-transform:translate3d(0,700px,0);transform:translate3d(0,700px,0);opacity:0}}.hinge{-webkit-animation-name:hinge;animation-name:hinge}@-webkit-keyframes rollIn{0%{opacity:0;-webkit-transform:translate3d(-100%,0,0) rotate3d(0,0,1,-120deg);transform:translate3d(-100%,0,0) rotate3d(0,0,1,-120deg)}100%{opacity:1;-webkit-transform:none;transform:none}}@keyframes rollIn{0%{opacity:0;-webkit-transform:translate3d(-100%,0,0) rotate3d(0,0,1,-120deg);transform:translate3d(-100%,0,0) rotate3d(0,0,1,-120deg)}100%{opacity:1;-webkit-transform:none;transform:none}}.rollIn{-webkit-animation-name:rollIn;animation-name:rollIn}@-webkit-keyframes rollOut{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(100%,0,0) rotate3d(0,0,1,120deg);transform:translate3d(100%,0,0) rotate3d(0,0,1,120deg)}}@keyframes rollOut{0%{opacity:1}100%{opacity:0;-webkit-transform:translate3d(100%,0,0) rotate3d(0,0,1,120deg);transform:translate3d(100%,0,0) rotate3d(0,0,1,120deg)}}.rollOut{-webkit-animation-name:rollOut;animation-name:rollOut}@-webkit-keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}@keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}.zoomIn{-webkit-animation-name:zoomIn;animation-name:zoomIn}@-webkit-keyframes zoomInDown{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,-1000px,0);transform:scale3d(.1,.1,.1) translate3d(0,-1000px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,60px,0);transform:scale3d(.475,.475,.475) translate3d(0,60px,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}@keyframes zoomInDown{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,-1000px,0);transform:scale3d(.1,.1,.1) translate3d(0,-1000px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,60px,0);transform:scale3d(.475,.475,.475) translate3d(0,60px,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}.zoomInDown{-webkit-animation-name:zoomInDown;animation-name:zoomInDown}@-webkit-keyframes zoomInLeft{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(-1000px,0,0);transform:scale3d(.1,.1,.1) translate3d(-1000px,0,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(10px,0,0);transform:scale3d(.475,.475,.475) translate3d(10px,0,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}@keyframes zoomInLeft{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(-1000px,0,0);transform:scale3d(.1,.1,.1) translate3d(-1000px,0,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(10px,0,0);transform:scale3d(.475,.475,.475) translate3d(10px,0,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}.zoomInLeft{-webkit-animation-name:zoomInLeft;animation-name:zoomInLeft}@-webkit-keyframes zoomInRight{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(1000px,0,0);transform:scale3d(.1,.1,.1) translate3d(1000px,0,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(-10px,0,0);transform:scale3d(.475,.475,.475) translate3d(-10px,0,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}@keyframes zoomInRight{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(1000px,0,0);transform:scale3d(.1,.1,.1) translate3d(1000px,0,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(-10px,0,0);transform:scale3d(.475,.475,.475) translate3d(-10px,0,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}.zoomInRight{-webkit-animation-name:zoomInRight;animation-name:zoomInRight}@-webkit-keyframes zoomInUp{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,1000px,0);transform:scale3d(.1,.1,.1) translate3d(0,1000px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}@keyframes zoomInUp{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,1000px,0);transform:scale3d(.1,.1,.1) translate3d(0,1000px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}.zoomInUp{-webkit-animation-name:zoomInUp;animation-name:zoomInUp}@-webkit-keyframes zoomOut{0%{opacity:1}50%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}100%{opacity:0}}@keyframes zoomOut{0%{opacity:1}50%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}100%{opacity:0}}.zoomOut{-webkit-animation-name:zoomOut;animation-name:zoomOut}@-webkit-keyframes zoomOutDown{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}100%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,2000px,0);transform:scale3d(.1,.1,.1) translate3d(0,2000px,0);-webkit-transform-origin:center bottom;transform-origin:center bottom;-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}@keyframes zoomOutDown{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}100%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,2000px,0);transform:scale3d(.1,.1,.1) translate3d(0,2000px,0);-webkit-transform-origin:center bottom;transform-origin:center bottom;-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}.zoomOutDown{-webkit-animation-name:zoomOutDown;animation-name:zoomOutDown}@-webkit-keyframes zoomOutLeft{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(42px,0,0);transform:scale3d(.475,.475,.475) translate3d(42px,0,0)}100%{opacity:0;-webkit-transform:scale(.1) translate3d(-2000px,0,0);transform:scale(.1) translate3d(-2000px,0,0);-webkit-transform-origin:left center;transform-origin:left center}}@keyframes zoomOutLeft{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(42px,0,0);transform:scale3d(.475,.475,.475) translate3d(42px,0,0)}100%{opacity:0;-webkit-transform:scale(.1) translate3d(-2000px,0,0);transform:scale(.1) translate3d(-2000px,0,0);-webkit-transform-origin:left center;transform-origin:left center}}.zoomOutLeft{-webkit-animation-name:zoomOutLeft;animation-name:zoomOutLeft}@-webkit-keyframes zoomOutRight{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(-42px,0,0);transform:scale3d(.475,.475,.475) translate3d(-42px,0,0)}100%{opacity:0;-webkit-transform:scale(.1) translate3d(2000px,0,0);transform:scale(.1) translate3d(2000px,0,0);-webkit-transform-origin:right center;transform-origin:right center}}@keyframes zoomOutRight{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(-42px,0,0);transform:scale3d(.475,.475,.475) translate3d(-42px,0,0)}100%{opacity:0;-webkit-transform:scale(.1) translate3d(2000px,0,0);transform:scale(.1) translate3d(2000px,0,0);-webkit-transform-origin:right center;transform-origin:right center}}.zoomOutRight{-webkit-animation-name:zoomOutRight;animation-name:zoomOutRight}@-webkit-keyframes zoomOutUp{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,60px,0);transform:scale3d(.475,.475,.475) translate3d(0,60px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}100%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,-2000px,0);transform:scale3d(.1,.1,.1) translate3d(0,-2000px,0);-webkit-transform-origin:center bottom;transform-origin:center bottom;-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}@keyframes zoomOutUp{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,60px,0);transform:scale3d(.475,.475,.475) translate3d(0,60px,0);-webkit-animation-timing-function:cubic-bezier(0.55,.055,.675,.19);animation-timing-function:cubic-bezier(0.55,.055,.675,.19)}100%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,-2000px,0);transform:scale3d(.1,.1,.1) translate3d(0,-2000px,0);-webkit-transform-origin:center bottom;transform-origin:center bottom;-webkit-animation-timing-function:cubic-bezier(0.175,.885,.32,1);animation-timing-function:cubic-bezier(0.175,.885,.32,1)}}.zoomOutUp{-webkit-animation-name:zoomOutUp;animation-name:zoomOutUp}@-webkit-keyframes slideInDown{0%{-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}@keyframes slideInDown{0%{-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.slideInDown{-webkit-animation-name:slideInDown;animation-name:slideInDown}@-webkit-keyframes slideInLeft{0%{-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}@keyframes slideInLeft{0%{-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.slideInLeft{-webkit-animation-name:slideInLeft;animation-name:slideInLeft}@-webkit-keyframes slideInRight{0%{-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}@keyframes slideInRight{0%{-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.slideInRight{-webkit-animation-name:slideInRight;animation-name:slideInRight}@-webkit-keyframes slideInUp{0%{-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}@keyframes slideInUp{0%{-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0);visibility:visible}100%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.slideInUp{-webkit-animation-name:slideInUp;animation-name:slideInUp}@-webkit-keyframes slideOutDown{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}}@keyframes slideOutDown{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}}.slideOutDown{-webkit-animation-name:slideOutDown;animation-name:slideOutDown}@-webkit-keyframes slideOutLeft{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}@keyframes slideOutLeft{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}.slideOutLeft{-webkit-animation-name:slideOutLeft;animation-name:slideOutLeft}@-webkit-keyframes slideOutRight{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}}@keyframes slideOutRight{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}}.slideOutRight{-webkit-animation-name:slideOutRight;animation-name:slideOutRight}@-webkit-keyframes slideOutUp{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}}@keyframes slideOutUp{0%{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}100%{visibility:hidden;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}}.slideOutUp{-webkit-animation-name:slideOutUp;animation-name:slideOutUp} diff --git a/website/css/bootstrap.min.css b/website/css/bootstrap.min.css index 28f154dec..5bb49291b 100644 --- a/website/css/bootstrap.min.css +++ b/website/css/bootstrap.min.css @@ -2,4 +2,4 @@ * Bootstrap v3.3.2 (http://getbootstrap.com) * Copyright 2011-2015 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - *//*! normalize.css v3.0.2 | MIT License | git.io/normalize */html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid silver}legend{padding:0;border:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0 0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}select{background:#fff!important}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #ddd!important}}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff2) format('woff2'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hamburger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon-menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:after,:before{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#337ab7;text-decoration:none}a:focus,a:hover{color:#23527c;text-decoration:underline}a:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.carousel-inner>.item>a>img,.carousel-inner>.item>img,.img-responsive,.thumbnail a>img,.thumbnail>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;max-width:100%;height:auto;padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-weight:400;line-height:1;color:#777}.h1,.h2,.h3,h1,h2,h3{margin-top:20px;margin-bottom:10px}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small{font-size:65%}.h4,.h5,.h6,h4,h5,h6{margin-top:10px;margin-bottom:10px}.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-size:75%}.h1,h1{font-size:36px}.h2,h2{font-size:30px}.h3,h3{font-size:24px}.h4,h4{font-size:18px}.h5,h5{font-size:14px}.h6,h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}.small,small{font-size:85%}.mark,mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#337ab7}a.text-primary:hover{color:#286090}.text-success{color:#3c763d}a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#337ab7}a.bg-primary:hover{background-color:#286090}.bg-success{background-color:#dff0d8}a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ol,ul{margin-top:0;margin-bottom:10px}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;margin-left:-5px;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dd,dt{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[data-original-title],abbr[title]{cursor:help;border-bottom:1px dotted #777}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote ol:last-child,blockquote p:last-child,blockquote ul:last-child{margin-bottom:0}blockquote .small,blockquote footer,blockquote small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote .small:before,blockquote footer:before,blockquote small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse .small:before,.blockquote-reverse footer:before,.blockquote-reverse small:before,blockquote.pull-right .small:before,blockquote.pull-right footer:before,blockquote.pull-right small:before{content:''}.blockquote-reverse .small:after,.blockquote-reverse footer:after,.blockquote-reverse small:after,blockquote.pull-right .small:after,blockquote.pull-right footer:after,blockquote.pull-right small:after{content:'\00A0 \2014'}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;font-weight:700;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}caption{padding-top:8px;padding-bottom:8px;color:#777;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>tbody>tr>td,.table>tbody>tr>th,.table>tfoot>tr>td,.table>tfoot>tr>th,.table>thead>tr>td,.table>thead>tr>th{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>td,.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>td,.table>thead:first-child>tr:first-child>th{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>tbody>tr>td,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>td,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>thead>tr>th{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>tbody>tr>td,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>td,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border:1px solid #ddd}.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}.table>tbody>tr.active>td,.table>tbody>tr.active>th,.table>tbody>tr>td.active,.table>tbody>tr>th.active,.table>tfoot>tr.active>td,.table>tfoot>tr.active>th,.table>tfoot>tr>td.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>thead>tr.active>th,.table>thead>tr>td.active,.table>thead>tr>th.active{background-color:#f5f5f5}.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover{background-color:#e8e8e8}.table>tbody>tr.success>td,.table>tbody>tr.success>th,.table>tbody>tr>td.success,.table>tbody>tr>th.success,.table>tfoot>tr.success>td,.table>tfoot>tr.success>th,.table>tfoot>tr>td.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>thead>tr.success>th,.table>thead>tr>td.success,.table>thead>tr>th.success{background-color:#dff0d8}.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover{background-color:#d0e9c6}.table>tbody>tr.info>td,.table>tbody>tr.info>th,.table>tbody>tr>td.info,.table>tbody>tr>th.info,.table>tfoot>tr.info>td,.table>tfoot>tr.info>th,.table>tfoot>tr>td.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>thead>tr.info>th,.table>thead>tr>td.info,.table>thead>tr>th.info{background-color:#d9edf7}.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover{background-color:#c4e3f3}.table>tbody>tr.warning>td,.table>tbody>tr.warning>th,.table>tbody>tr>td.warning,.table>tbody>tr>th.warning,.table>tfoot>tr.warning>td,.table>tfoot>tr.warning>th,.table>tfoot>tr>td.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>thead>tr.warning>th,.table>thead>tr>td.warning,.table>thead>tr>th.warning{background-color:#fcf8e3}.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover{background-color:#faf2cc}.table>tbody>tr.danger>td,.table>tbody>tr.danger>th,.table>tbody>tr>td.danger,.table>tbody>tr>th.danger,.table>tfoot>tr.danger>td,.table>tfoot>tr.danger>th,.table>tfoot>tr>td.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>thead>tr.danger>th,.table>thead>tr>td.danger,.table>thead>tr>th.danger{background-color:#f2dede}.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover{background-color:#ebcccc}.table-responsive{min-height:.01%;overflow-x:auto}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>td,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>thead>tr>th{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=checkbox],input[type=radio]{margin:4px 0 0;margin-top:1px \9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=checkbox]:focus,input[type=radio]:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{cursor:not-allowed;background-color:#eee;opacity:1}textarea.form-control{height:auto}input[type=search]{-webkit-appearance:none}@media screen and (-webkit-min-device-pixel-ratio:0){input[type=date],input[type=time],input[type=datetime-local],input[type=month]{line-height:34px}.input-group-sm input[type=date],.input-group-sm input[type=time],.input-group-sm input[type=datetime-local],.input-group-sm input[type=month],input[type=date].input-sm,input[type=time].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm{line-height:30px}.input-group-lg input[type=date],.input-group-lg input[type=time],.input-group-lg input[type=datetime-local],.input-group-lg input[type=month],input[type=date].input-lg,input[type=time].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg{line-height:46px}}.form-group{margin-bottom:15px}.checkbox,.radio{position:relative;display:block;margin-top:10px;margin-bottom:10px}.checkbox label,.radio label{min-height:20px;padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox],.radio input[type=radio],.radio-inline input[type=radio]{position:absolute;margin-top:4px \9;margin-left:-20px}.checkbox+.checkbox,.radio+.radio{margin-top:-5px}.checkbox-inline,.radio-inline{display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.checkbox-inline+.checkbox-inline,.radio-inline+.radio-inline{margin-top:0;margin-left:10px}fieldset[disabled] input[type=checkbox],fieldset[disabled] input[type=radio],input[type=checkbox].disabled,input[type=checkbox][disabled],input[type=radio].disabled,input[type=radio][disabled]{cursor:not-allowed}.checkbox-inline.disabled,.radio-inline.disabled,fieldset[disabled] .checkbox-inline,fieldset[disabled] .radio-inline{cursor:not-allowed}.checkbox.disabled label,.radio.disabled label,fieldset[disabled] .checkbox label,fieldset[disabled] .radio label{cursor:not-allowed}.form-control-static{padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}select[multiple].input-sm,textarea.input-sm{height:auto}.form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.form-group-sm .form-control{height:30px;line-height:30px}select[multiple].form-group-sm .form-control,textarea.form-group-sm .form-control{height:auto}.form-group-sm .form-control-static{height:30px;padding:5px 10px;font-size:12px;line-height:1.5}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-lg{height:46px;line-height:46px}select[multiple].input-lg,textarea.input-lg{height:auto}.form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.form-group-lg .form-control{height:46px;line-height:46px}select[multiple].form-group-lg .form-control,textarea.form-group-lg .form-control{height:auto}.form-group-lg .form-control-static{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center;pointer-events:none}.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .checkbox,.has-success .checkbox-inline,.has-success .control-label,.has-success .help-block,.has-success .radio,.has-success .radio-inline,.has-success.checkbox label,.has-success.checkbox-inline label,.has-success.radio label,.has-success.radio-inline label{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .checkbox,.has-warning .checkbox-inline,.has-warning .control-label,.has-warning .help-block,.has-warning .radio,.has-warning .radio-inline,.has-warning.checkbox label,.has-warning.checkbox-inline label,.has-warning.radio label,.has-warning.radio-inline label{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .checkbox,.has-error .checkbox-inline,.has-error .control-label,.has-error .help-block,.has-error .radio,.has-error .radio-inline,.has-error.checkbox label,.has-error.checkbox-inline label,.has-error.radio label,.has-error.radio-inline label{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label~.form-control-feedback{top:25px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .form-control,.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .checkbox,.form-inline .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .checkbox label,.form-inline .radio label{padding-left:0}.form-inline .checkbox input[type=checkbox],.form-inline .radio input[type=radio]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .checkbox,.form-horizontal .checkbox-inline,.form-horizontal .radio,.form-horizontal .radio-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .checkbox,.form-horizontal .radio{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:14.33px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:400;line-height:1.42857143;text-align:center;white-space:nowrap;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-image:none;border:1px solid transparent;border-radius:4px}.btn.active.focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn:active:focus,.btn:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.focus,.btn:focus,.btn:hover{color:#333;text-decoration:none}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{pointer-events:none;cursor:not-allowed;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none;opacity:.65}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default.active,.btn-default.focus,.btn-default:active,.btn-default:focus,.btn-default:hover,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#337ab7;border-color:#2e6da4}.btn-primary.active,.btn-primary.focus,.btn-primary:active,.btn-primary:focus,.btn-primary:hover,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#337ab7;border-color:#2e6da4}.btn-primary .badge{color:#337ab7;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success.active,.btn-success.focus,.btn-success:active,.btn-success:focus,.btn-success:hover,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info.active,.btn-info.focus,.btn-info:active,.btn-info:focus,.btn-info:hover,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning.active,.btn-warning.focus,.btn-warning:active,.btn-warning:focus,.btn-warning:hover,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger.active,.btn-danger.focus,.btn-danger:active,.btn-danger:focus,.btn-danger:hover,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#337ab7;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:active,.btn-link:focus,.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#23527c;text-decoration:underline;background-color:transparent}.btn-link[disabled]:focus,.btn-link[disabled]:hover,fieldset[disabled] .btn-link:focus,fieldset[disabled] .btn-link:hover{color:#777;text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.btn-group-sm>.btn,.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-xs>.btn,.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none;visibility:hidden}.collapse.in{display:block;visibility:visible}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease;-webkit-transition-duration:.35s;-o-transition-duration:.35s;transition-duration:.35s;-webkit-transition-property:height,visibility;-o-transition-property:height,visibility;transition-property:height,visibility}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px solid;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#fff;text-decoration:none;background-color:#337ab7;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#777}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px solid}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;float:left}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle=buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn,textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn,textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group .form-control,.input-group-addon,.input-group-btn{display:table-cell}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn-group:not(:last-child)>.btn,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:first-child>.btn-group:not(:first-child)>.btn,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:focus,.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:focus,.nav>li.disabled>a:hover{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:focus,.nav .open>a:hover{background-color:#eee;border-color:#337ab7}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:focus,.nav-tabs>li.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:focus,.nav-pills>li.active>a:hover{color:#fff;background-color:#337ab7}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none;visibility:hidden}.tab-content>.active{display:block;visibility:visible}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;-webkit-overflow-scrolling:touch;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1)}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important;visibility:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:200px}}.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-bottom,.navbar-fixed-top{position:fixed;right:0;left:0;z-index:1030}@media (min-width:768px){.navbar-fixed-bottom,.navbar-fixed-top{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu .dropdown-header,.navbar-nav .open .dropdown-menu>li>a{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:focus,.navbar-nav .open .dropdown-menu>li>a:hover{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1)}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .form-control,.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .checkbox,.navbar-form .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .checkbox label,.navbar-form .radio label{padding-left:0}.navbar-form .checkbox input[type=checkbox],.navbar-form .radio input[type=radio]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:focus,.navbar-default .navbar-brand:hover{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:focus,.navbar-default .navbar-nav>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:focus,.navbar-default .navbar-nav>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:focus,.navbar-default .navbar-nav>.disabled>a:hover{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:focus,.navbar-default .navbar-toggle:hover{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:focus,.navbar-default .navbar-nav>.open>a:hover{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:focus,.navbar-default .btn-link:hover{color:#333}.navbar-default .btn-link[disabled]:focus,.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:focus,fieldset[disabled] .navbar-default .btn-link:hover{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#9d9d9d}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a:focus,.navbar-inverse .navbar-nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:focus,.navbar-inverse .navbar-nav>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:focus,.navbar-inverse .navbar-nav>.disabled>a:hover{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:focus,.navbar-inverse .navbar-toggle:hover{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:focus,.navbar-inverse .navbar-nav>.open>a:hover{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#9d9d9d}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#9d9d9d}.navbar-inverse .btn-link:focus,.navbar-inverse .btn-link:hover{color:#fff}.navbar-inverse .btn-link[disabled]:focus,.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:focus,fieldset[disabled] .navbar-inverse .btn-link:hover{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#337ab7;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:focus,.pagination>li>a:hover,.pagination>li>span:focus,.pagination>li>span:hover{color:#23527c;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>a:focus,.pagination>.active>a:hover,.pagination>.active>span,.pagination>.active>span:focus,.pagination>.active>span:hover{z-index:2;color:#fff;cursor:default;background-color:#337ab7;border-color:#337ab7}.pagination>.disabled>a,.pagination>.disabled>a:focus,.pagination>.disabled>a:hover,.pagination>.disabled>span,.pagination>.disabled>span:focus,.pagination>.disabled>span:hover{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:focus,.pager li>a:hover{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:focus,.pager .disabled>a:hover,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:focus,a.label:hover{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:focus,.label-default[href]:hover{background-color:#5e5e5e}.label-primary{background-color:#337ab7}.label-primary[href]:focus,.label-primary[href]:hover{background-color:#286090}.label-success{background-color:#5cb85c}.label-success[href]:focus,.label-success[href]:hover{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:focus,.label-info[href]:hover{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:focus,.label-warning[href]:hover{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:focus,.label-danger[href]:hover{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-xs .badge{top:0;padding:1px 5px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#337ab7;background-color:#fff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding:30px 15px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron .h1,.jumbotron h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron,.container-fluid .jumbotron{border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding:48px 0}.container .jumbotron,.container-fluid .jumbotron{padding-right:60px;padding-left:60px}.jumbotron .h1,.jumbotron h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail a>img,.thumbnail>img{margin-right:auto;margin-left:auto}a.thumbnail.active,a.thumbnail:focus,a.thumbnail:hover{border-color:#337ab7}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#337ab7;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-bar-striped,.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress-bar.active,.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{overflow:hidden;zoom:1}.media-body{width:10000px}.media-object{display:block}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-body,.media-left,.media-right{display:table-cell;vertical-align:top}.media-middle{vertical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}a.list-group-item{color:#555}a.list-group-item .list-group-item-heading{color:#333}a.list-group-item:focus,a.list-group-item:hover{color:#555;text-decoration:none;background-color:#f5f5f5}.list-group-item.disabled,.list-group-item.disabled:focus,.list-group-item.disabled:hover{color:#777;cursor:not-allowed;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{z-index:2;color:#fff;background-color:#337ab7;border-color:#337ab7}.list-group-item.active .list-group-item-heading,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:focus .list-group-item-text,.list-group-item.active:hover .list-group-item-text{color:#c7ddef}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:focus,a.list-group-item-success:hover{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:focus,a.list-group-item-success.active:hover{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:focus,a.list-group-item-info:hover{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:focus,a.list-group-item-info.active:hover{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:focus,a.list-group-item-warning:hover{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:focus,a.list-group-item-warning.active:hover{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:focus,a.list-group-item-danger:hover{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:focus,a.list-group-item-danger.active:hover{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>.small,.panel-title>.small>a,.panel-title>a,.panel-title>small,.panel-title>small>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group,.panel>.panel-collapse>.list-group{margin-bottom:0}.panel>.list-group .list-group-item,.panel>.panel-collapse>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child,.panel>.panel-collapse>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child,.panel>.panel-collapse>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.panel-collapse>.table,.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.panel-collapse>.table caption,.panel>.table caption,.panel>.table-responsive>.table caption{padding-right:15px;padding-left:15px}.panel>.table-responsive:first-child>.table:first-child,.panel>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child,.panel>.table:first-child>thead:first-child>tr:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table-responsive:last-child>.table:last-child,.panel>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive,.panel>.table+.panel-body,.panel>.table-responsive+.panel-body{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child td,.panel>.table>tbody:first-child>tr:first-child th{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.list-group,.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#337ab7}.panel-primary>.panel-heading{color:#fff;background-color:#337ab7;border-color:#337ab7}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#337ab7}.panel-primary>.panel-heading .badge{color:#337ab7;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#337ab7}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:focus,.close:hover{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{-webkit-appearance:none;padding:0;cursor:pointer;background:0 0;border:0}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out;-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);-o-transform:translate(0,-25%);transform:translate(0,-25%)}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);-o-transform:translate(0,0);transform:translate(0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5)}.modal-backdrop{position:absolute;top:0;right:0;left:0;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{min-height:16.43px;padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:12px;font-weight:400;line-height:1.4;visibility:visible;filter:alpha(opacity=0);opacity:0}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px;bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;left:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;font-weight:400;line-height:1.42857143;text-align:left;white-space:normal;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2)}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow{border-width:11px}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>a>img,.carousel-inner>.item>img{line-height:1}@media all and (transform-3d),(-webkit-transform-3d){.carousel-inner>.item{-webkit-transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000;perspective:1000}.carousel-inner>.item.active.right,.carousel-inner>.item.next{left:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}.carousel-inner>.item.active.left,.carousel-inner>.item.prev{left:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}.carousel-inner>.item.active,.carousel-inner>.item.next.left,.carousel-inner>.item.prev.right{left:0;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:focus,.carousel-control:hover{color:#fff;text-decoration:none;filter:alpha(opacity=90);outline:0;opacity:.9}.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{position:absolute;top:50%;z-index:5;display:inline-block}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{left:50%;margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{right:50%;margin-right:-10px}.carousel-control .icon-next,.carousel-control .icon-prev{width:20px;height:20px;margin-top:-10px;font-family:serif;line-height:1}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000 \9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{width:30px;height:30px;margin-top:-15px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-15px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-15px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.btn-group-vertical>.btn-group:after,.btn-group-vertical>.btn-group:before,.btn-toolbar:after,.btn-toolbar:before,.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.dl-horizontal dd:after,.dl-horizontal dd:before,.form-horizontal .form-group:after,.form-horizontal .form-group:before,.modal-footer:after,.modal-footer:before,.nav:after,.nav:before,.navbar-collapse:after,.navbar-collapse:before,.navbar-header:after,.navbar-header:before,.navbar:after,.navbar:before,.pager:after,.pager:before,.panel-body:after,.panel-body:before,.row:after,.row:before{display:table;content:" "}.btn-group-vertical>.btn-group:after,.btn-toolbar:after,.clearfix:after,.container-fluid:after,.container:after,.dl-horizontal dd:after,.form-horizontal .form-group:after,.modal-footer:after,.nav:after,.navbar-collapse:after,.navbar-header:after,.navbar:after,.pager:after,.panel-body:after,.row:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important;visibility:hidden!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-lg,.visible-md,.visible-sm,.visible-xs{display:none!important}.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}} \ No newline at end of file + *//*! normalize.css v3.0.2 | MIT License | git.io/normalize */html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid silver}legend{padding:0;border:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0 0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}select{background:#fff!important}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #ddd!important}}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff2) format('woff2'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hamburger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon-menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:after,:before{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#337ab7;text-decoration:none}a:focus,a:hover{color:#23527c;text-decoration:underline}a:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.carousel-inner>.item>a>img,.carousel-inner>.item>img,.img-responsive,.thumbnail a>img,.thumbnail>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;max-width:100%;height:auto;padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-weight:400;line-height:1;color:#777}.h1,.h2,.h3,h1,h2,h3{margin-top:20px;margin-bottom:10px}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small{font-size:65%}.h4,.h5,.h6,h4,h5,h6{margin-top:10px;margin-bottom:10px}.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-size:75%}.h1,h1{font-size:36px}.h2,h2{font-size:30px}.h3,h3{font-size:24px}.h4,h4{font-size:18px}.h5,h5{font-size:14px}.h6,h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}.small,small{font-size:85%}.mark,mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#337ab7}a.text-primary:hover{color:#286090}.text-success{color:#3c763d}a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#337ab7}a.bg-primary:hover{background-color:#286090}.bg-success{background-color:#dff0d8}a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ol,ul{margin-top:0;margin-bottom:10px}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;margin-left:-5px;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dd,dt{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[data-original-title],abbr[title]{cursor:help;border-bottom:1px dotted #777}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote ol:last-child,blockquote p:last-child,blockquote ul:last-child{margin-bottom:0}blockquote .small,blockquote footer,blockquote small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote .small:before,blockquote footer:before,blockquote small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse .small:before,.blockquote-reverse footer:before,.blockquote-reverse small:before,blockquote.pull-right .small:before,blockquote.pull-right footer:before,blockquote.pull-right small:before{content:''}.blockquote-reverse .small:after,.blockquote-reverse footer:after,.blockquote-reverse small:after,blockquote.pull-right .small:after,blockquote.pull-right footer:after,blockquote.pull-right small:after{content:'\00A0 \2014'}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;font-weight:700;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}caption{padding-top:8px;padding-bottom:8px;color:#777;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>tbody>tr>td,.table>tbody>tr>th,.table>tfoot>tr>td,.table>tfoot>tr>th,.table>thead>tr>td,.table>thead>tr>th{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>td,.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>td,.table>thead:first-child>tr:first-child>th{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>tbody>tr>td,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>td,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>thead>tr>th{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>tbody>tr>td,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>td,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border:1px solid #ddd}.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}.table>tbody>tr.active>td,.table>tbody>tr.active>th,.table>tbody>tr>td.active,.table>tbody>tr>th.active,.table>tfoot>tr.active>td,.table>tfoot>tr.active>th,.table>tfoot>tr>td.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>thead>tr.active>th,.table>thead>tr>td.active,.table>thead>tr>th.active{background-color:#f5f5f5}.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover{background-color:#e8e8e8}.table>tbody>tr.success>td,.table>tbody>tr.success>th,.table>tbody>tr>td.success,.table>tbody>tr>th.success,.table>tfoot>tr.success>td,.table>tfoot>tr.success>th,.table>tfoot>tr>td.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>thead>tr.success>th,.table>thead>tr>td.success,.table>thead>tr>th.success{background-color:#dff0d8}.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover{background-color:#d0e9c6}.table>tbody>tr.info>td,.table>tbody>tr.info>th,.table>tbody>tr>td.info,.table>tbody>tr>th.info,.table>tfoot>tr.info>td,.table>tfoot>tr.info>th,.table>tfoot>tr>td.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>thead>tr.info>th,.table>thead>tr>td.info,.table>thead>tr>th.info{background-color:#d9edf7}.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover{background-color:#c4e3f3}.table>tbody>tr.warning>td,.table>tbody>tr.warning>th,.table>tbody>tr>td.warning,.table>tbody>tr>th.warning,.table>tfoot>tr.warning>td,.table>tfoot>tr.warning>th,.table>tfoot>tr>td.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>thead>tr.warning>th,.table>thead>tr>td.warning,.table>thead>tr>th.warning{background-color:#fcf8e3}.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover{background-color:#faf2cc}.table>tbody>tr.danger>td,.table>tbody>tr.danger>th,.table>tbody>tr>td.danger,.table>tbody>tr>th.danger,.table>tfoot>tr.danger>td,.table>tfoot>tr.danger>th,.table>tfoot>tr>td.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>thead>tr.danger>th,.table>thead>tr>td.danger,.table>thead>tr>th.danger{background-color:#f2dede}.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover{background-color:#ebcccc}.table-responsive{min-height:.01%;overflow-x:auto}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>td,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>thead>tr>th{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=checkbox],input[type=radio]{margin:4px 0 0;margin-top:1px \9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=checkbox]:focus,input[type=radio]:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{cursor:not-allowed;background-color:#eee;opacity:1}textarea.form-control{height:auto}input[type=search]{-webkit-appearance:none}@media screen and (-webkit-min-device-pixel-ratio:0){input[type=date],input[type=time],input[type=datetime-local],input[type=month]{line-height:34px}.input-group-sm input[type=date],.input-group-sm input[type=time],.input-group-sm input[type=datetime-local],.input-group-sm input[type=month],input[type=date].input-sm,input[type=time].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm{line-height:30px}.input-group-lg input[type=date],.input-group-lg input[type=time],.input-group-lg input[type=datetime-local],.input-group-lg input[type=month],input[type=date].input-lg,input[type=time].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg{line-height:46px}}.form-group{margin-bottom:15px}.checkbox,.radio{position:relative;display:block;margin-top:10px;margin-bottom:10px}.checkbox label,.radio label{min-height:20px;padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox],.radio input[type=radio],.radio-inline input[type=radio]{position:absolute;margin-top:4px \9;margin-left:-20px}.checkbox+.checkbox,.radio+.radio{margin-top:-5px}.checkbox-inline,.radio-inline{display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.checkbox-inline+.checkbox-inline,.radio-inline+.radio-inline{margin-top:0;margin-left:10px}fieldset[disabled] input[type=checkbox],fieldset[disabled] input[type=radio],input[type=checkbox].disabled,input[type=checkbox][disabled],input[type=radio].disabled,input[type=radio][disabled]{cursor:not-allowed}.checkbox-inline.disabled,.radio-inline.disabled,fieldset[disabled] .checkbox-inline,fieldset[disabled] .radio-inline{cursor:not-allowed}.checkbox.disabled label,.radio.disabled label,fieldset[disabled] .checkbox label,fieldset[disabled] .radio label{cursor:not-allowed}.form-control-static{padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}select[multiple].input-sm,textarea.input-sm{height:auto}.form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.form-group-sm .form-control{height:30px;line-height:30px}select[multiple].form-group-sm .form-control,textarea.form-group-sm .form-control{height:auto}.form-group-sm .form-control-static{height:30px;padding:5px 10px;font-size:12px;line-height:1.5}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-lg{height:46px;line-height:46px}select[multiple].input-lg,textarea.input-lg{height:auto}.form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.form-group-lg .form-control{height:46px;line-height:46px}select[multiple].form-group-lg .form-control,textarea.form-group-lg .form-control{height:auto}.form-group-lg .form-control-static{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center;pointer-events:none}.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .checkbox,.has-success .checkbox-inline,.has-success .control-label,.has-success .help-block,.has-success .radio,.has-success .radio-inline,.has-success.checkbox label,.has-success.checkbox-inline label,.has-success.radio label,.has-success.radio-inline label{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .checkbox,.has-warning .checkbox-inline,.has-warning .control-label,.has-warning .help-block,.has-warning .radio,.has-warning .radio-inline,.has-warning.checkbox label,.has-warning.checkbox-inline label,.has-warning.radio label,.has-warning.radio-inline label{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .checkbox,.has-error .checkbox-inline,.has-error .control-label,.has-error .help-block,.has-error .radio,.has-error .radio-inline,.has-error.checkbox label,.has-error.checkbox-inline label,.has-error.radio label,.has-error.radio-inline label{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label~.form-control-feedback{top:25px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .form-control,.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .checkbox,.form-inline .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .checkbox label,.form-inline .radio label{padding-left:0}.form-inline .checkbox input[type=checkbox],.form-inline .radio input[type=radio]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .checkbox,.form-horizontal .checkbox-inline,.form-horizontal .radio,.form-horizontal .radio-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .checkbox,.form-horizontal .radio{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:14.33px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:400;line-height:1.42857143;text-align:center;white-space:nowrap;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-image:none;border:1px solid transparent;border-radius:4px}.btn.active.focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn:active:focus,.btn:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.focus,.btn:focus,.btn:hover{color:#333;text-decoration:none}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{pointer-events:none;cursor:not-allowed;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none;opacity:.65}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default.active,.btn-default.focus,.btn-default:active,.btn-default:focus,.btn-default:hover,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#337ab7;border-color:#2e6da4}.btn-primary.active,.btn-primary.focus,.btn-primary:active,.btn-primary:focus,.btn-primary:hover,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#337ab7;border-color:#2e6da4}.btn-primary .badge{color:#337ab7;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success.active,.btn-success.focus,.btn-success:active,.btn-success:focus,.btn-success:hover,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info.active,.btn-info.focus,.btn-info:active,.btn-info:focus,.btn-info:hover,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning.active,.btn-warning.focus,.btn-warning:active,.btn-warning:focus,.btn-warning:hover,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger.active,.btn-danger.focus,.btn-danger:active,.btn-danger:focus,.btn-danger:hover,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#337ab7;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:active,.btn-link:focus,.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#23527c;text-decoration:underline;background-color:transparent}.btn-link[disabled]:focus,.btn-link[disabled]:hover,fieldset[disabled] .btn-link:focus,fieldset[disabled] .btn-link:hover{color:#777;text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.btn-group-sm>.btn,.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-xs>.btn,.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none;visibility:hidden}.collapse.in{display:block;visibility:visible}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease;-webkit-transition-duration:.35s;-o-transition-duration:.35s;transition-duration:.35s;-webkit-transition-property:height,visibility;-o-transition-property:height,visibility;transition-property:height,visibility}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px solid;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#fff;text-decoration:none;background-color:#337ab7;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#777}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px solid}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;float:left}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle=buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn,textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn,textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group .form-control,.input-group-addon,.input-group-btn{display:table-cell}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn-group:not(:last-child)>.btn,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:first-child>.btn-group:not(:first-child)>.btn,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:focus,.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:focus,.nav>li.disabled>a:hover{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:focus,.nav .open>a:hover{background-color:#eee;border-color:#337ab7}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:focus,.nav-tabs>li.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:focus,.nav-pills>li.active>a:hover{color:#fff;background-color:#337ab7}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none;visibility:hidden}.tab-content>.active{display:block;visibility:visible}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;-webkit-overflow-scrolling:touch;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1)}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important;visibility:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:200px}}.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-bottom,.navbar-fixed-top{position:fixed;right:0;left:0;z-index:1030}@media (min-width:768px){.navbar-fixed-bottom,.navbar-fixed-top{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu .dropdown-header,.navbar-nav .open .dropdown-menu>li>a{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:focus,.navbar-nav .open .dropdown-menu>li>a:hover{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1)}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .form-control,.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .checkbox,.navbar-form .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .checkbox label,.navbar-form .radio label{padding-left:0}.navbar-form .checkbox input[type=checkbox],.navbar-form .radio input[type=radio]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:focus,.navbar-default .navbar-brand:hover{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:focus,.navbar-default .navbar-nav>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:focus,.navbar-default .navbar-nav>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:focus,.navbar-default .navbar-nav>.disabled>a:hover{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:focus,.navbar-default .navbar-toggle:hover{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:focus,.navbar-default .navbar-nav>.open>a:hover{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:focus,.navbar-default .btn-link:hover{color:#333}.navbar-default .btn-link[disabled]:focus,.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:focus,fieldset[disabled] .navbar-default .btn-link:hover{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#9d9d9d}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a:focus,.navbar-inverse .navbar-nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:focus,.navbar-inverse .navbar-nav>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:focus,.navbar-inverse .navbar-nav>.disabled>a:hover{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:focus,.navbar-inverse .navbar-toggle:hover{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:focus,.navbar-inverse .navbar-nav>.open>a:hover{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#9d9d9d}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#9d9d9d}.navbar-inverse .btn-link:focus,.navbar-inverse .btn-link:hover{color:#fff}.navbar-inverse .btn-link[disabled]:focus,.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:focus,fieldset[disabled] .navbar-inverse .btn-link:hover{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#337ab7;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:focus,.pagination>li>a:hover,.pagination>li>span:focus,.pagination>li>span:hover{color:#23527c;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>a:focus,.pagination>.active>a:hover,.pagination>.active>span,.pagination>.active>span:focus,.pagination>.active>span:hover{z-index:2;color:#fff;cursor:default;background-color:#337ab7;border-color:#337ab7}.pagination>.disabled>a,.pagination>.disabled>a:focus,.pagination>.disabled>a:hover,.pagination>.disabled>span,.pagination>.disabled>span:focus,.pagination>.disabled>span:hover{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:focus,.pager li>a:hover{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:focus,.pager .disabled>a:hover,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:focus,a.label:hover{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:focus,.label-default[href]:hover{background-color:#5e5e5e}.label-primary{background-color:#337ab7}.label-primary[href]:focus,.label-primary[href]:hover{background-color:#286090}.label-success{background-color:#5cb85c}.label-success[href]:focus,.label-success[href]:hover{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:focus,.label-info[href]:hover{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:focus,.label-warning[href]:hover{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:focus,.label-danger[href]:hover{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-xs .badge{top:0;padding:1px 5px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#337ab7;background-color:#fff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding:30px 15px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron .h1,.jumbotron h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron,.container-fluid .jumbotron{border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding:48px 0}.container .jumbotron,.container-fluid .jumbotron{padding-right:60px;padding-left:60px}.jumbotron .h1,.jumbotron h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail a>img,.thumbnail>img{margin-right:auto;margin-left:auto}a.thumbnail.active,a.thumbnail:focus,a.thumbnail:hover{border-color:#337ab7}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#337ab7;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-bar-striped,.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress-bar.active,.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{overflow:hidden;zoom:1}.media-body{width:10000px}.media-object{display:block}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-body,.media-left,.media-right{display:table-cell;vertical-align:top}.media-middle{vertical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}a.list-group-item{color:#555}a.list-group-item .list-group-item-heading{color:#333}a.list-group-item:focus,a.list-group-item:hover{color:#555;text-decoration:none;background-color:#f5f5f5}.list-group-item.disabled,.list-group-item.disabled:focus,.list-group-item.disabled:hover{color:#777;cursor:not-allowed;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{z-index:2;color:#fff;background-color:#337ab7;border-color:#337ab7}.list-group-item.active .list-group-item-heading,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:focus .list-group-item-text,.list-group-item.active:hover .list-group-item-text{color:#c7ddef}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:focus,a.list-group-item-success:hover{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:focus,a.list-group-item-success.active:hover{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:focus,a.list-group-item-info:hover{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:focus,a.list-group-item-info.active:hover{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:focus,a.list-group-item-warning:hover{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:focus,a.list-group-item-warning.active:hover{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:focus,a.list-group-item-danger:hover{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:focus,a.list-group-item-danger.active:hover{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>.small,.panel-title>.small>a,.panel-title>a,.panel-title>small,.panel-title>small>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group,.panel>.panel-collapse>.list-group{margin-bottom:0}.panel>.list-group .list-group-item,.panel>.panel-collapse>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child,.panel>.panel-collapse>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child,.panel>.panel-collapse>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.panel-collapse>.table,.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.panel-collapse>.table caption,.panel>.table caption,.panel>.table-responsive>.table caption{padding-right:15px;padding-left:15px}.panel>.table-responsive:first-child>.table:first-child,.panel>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child,.panel>.table:first-child>thead:first-child>tr:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table-responsive:last-child>.table:last-child,.panel>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive,.panel>.table+.panel-body,.panel>.table-responsive+.panel-body{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child td,.panel>.table>tbody:first-child>tr:first-child th{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.list-group,.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#337ab7}.panel-primary>.panel-heading{color:#fff;background-color:#337ab7;border-color:#337ab7}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#337ab7}.panel-primary>.panel-heading .badge{color:#337ab7;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#337ab7}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:focus,.close:hover{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{-webkit-appearance:none;padding:0;cursor:pointer;background:0 0;border:0}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out;-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);-o-transform:translate(0,-25%);transform:translate(0,-25%)}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);-o-transform:translate(0,0);transform:translate(0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5)}.modal-backdrop{position:absolute;top:0;right:0;left:0;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{min-height:16.43px;padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:12px;font-weight:400;line-height:1.4;visibility:visible;filter:alpha(opacity=0);opacity:0}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px;bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;left:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;font-weight:400;line-height:1.42857143;text-align:left;white-space:normal;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2)}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow{border-width:11px}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>a>img,.carousel-inner>.item>img{line-height:1}@media all and (transform-3d),(-webkit-transform-3d){.carousel-inner>.item{-webkit-transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000;perspective:1000}.carousel-inner>.item.active.right,.carousel-inner>.item.next{left:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}.carousel-inner>.item.active.left,.carousel-inner>.item.prev{left:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}.carousel-inner>.item.active,.carousel-inner>.item.next.left,.carousel-inner>.item.prev.right{left:0;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:focus,.carousel-control:hover{color:#fff;text-decoration:none;filter:alpha(opacity=90);outline:0;opacity:.9}.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{position:absolute;top:50%;z-index:5;display:inline-block}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{left:50%;margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{right:50%;margin-right:-10px}.carousel-control .icon-next,.carousel-control .icon-prev{width:20px;height:20px;margin-top:-10px;font-family:serif;line-height:1}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000 \9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{width:30px;height:30px;margin-top:-15px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-15px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-15px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.btn-group-vertical>.btn-group:after,.btn-group-vertical>.btn-group:before,.btn-toolbar:after,.btn-toolbar:before,.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.dl-horizontal dd:after,.dl-horizontal dd:before,.form-horizontal .form-group:after,.form-horizontal .form-group:before,.modal-footer:after,.modal-footer:before,.nav:after,.nav:before,.navbar-collapse:after,.navbar-collapse:before,.navbar-header:after,.navbar-header:before,.navbar:after,.navbar:before,.pager:after,.pager:before,.panel-body:after,.panel-body:before,.row:after,.row:before{display:table;content:" "}.btn-group-vertical>.btn-group:after,.btn-toolbar:after,.clearfix:after,.container-fluid:after,.container:after,.dl-horizontal dd:after,.form-horizontal .form-group:after,.modal-footer:after,.nav:after,.navbar-collapse:after,.navbar-header:after,.navbar:after,.pager:after,.panel-body:after,.row:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important;visibility:hidden!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-lg,.visible-md,.visible-sm,.visible-xs{display:none!important}.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}} diff --git a/website/font-awesome/css/font-awesome.min.css b/website/font-awesome/css/font-awesome.min.css index 24fcc04c4..123de921d 100644 --- a/website/font-awesome/css/font-awesome.min.css +++ b/website/font-awesome/css/font-awesome.min.css @@ -1,4 +1,4 @@ /*! * Font Awesome 4.3.0 by @davegandy - http://fontawesome.io - @fontawesome * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) - */@font-face{font-family:'FontAwesome';src:url('../fonts/fontawesome-webfont.eot?v=4.3.0');src:url('../fonts/fontawesome-webfont.eot?#iefix&v=4.3.0') format('embedded-opentype'),url('../fonts/fontawesome-webfont.woff2?v=4.3.0') format('woff2'),url('../fonts/fontawesome-webfont.woff?v=4.3.0') format('woff'),url('../fonts/fontawesome-webfont.ttf?v=4.3.0') format('truetype'),url('../fonts/fontawesome-webfont.svg?v=4.3.0#fontawesomeregular') format('svg');font-weight:normal;font-style:normal}.fa{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;transform:translate(0, 0)}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2);-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=3);-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-remove:before,.fa-close:before,.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook-f:before,.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-desc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-asc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before,.fa-gratipay:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"}.fa-space-shuttle:before{content:"\f197"}.fa-slack:before{content:"\f198"}.fa-envelope-square:before{content:"\f199"}.fa-wordpress:before{content:"\f19a"}.fa-openid:before{content:"\f19b"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:"\f19c"}.fa-mortar-board:before,.fa-graduation-cap:before{content:"\f19d"}.fa-yahoo:before{content:"\f19e"}.fa-google:before{content:"\f1a0"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-square:before{content:"\f1a2"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-stumbleupon:before{content:"\f1a4"}.fa-delicious:before{content:"\f1a5"}.fa-digg:before{content:"\f1a6"}.fa-pied-piper:before{content:"\f1a7"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-drupal:before{content:"\f1a9"}.fa-joomla:before{content:"\f1aa"}.fa-language:before{content:"\f1ab"}.fa-fax:before{content:"\f1ac"}.fa-building:before{content:"\f1ad"}.fa-child:before{content:"\f1ae"}.fa-paw:before{content:"\f1b0"}.fa-spoon:before{content:"\f1b1"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-recycle:before{content:"\f1b8"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-tree:before{content:"\f1bb"}.fa-spotify:before{content:"\f1bc"}.fa-deviantart:before{content:"\f1bd"}.fa-soundcloud:before{content:"\f1be"}.fa-database:before{content:"\f1c0"}.fa-file-pdf-o:before{content:"\f1c1"}.fa-file-word-o:before{content:"\f1c2"}.fa-file-excel-o:before{content:"\f1c3"}.fa-file-powerpoint-o:before{content:"\f1c4"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:"\f1c5"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:"\f1c6"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:"\f1c7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\f1c8"}.fa-file-code-o:before{content:"\f1c9"}.fa-vine:before{content:"\f1ca"}.fa-codepen:before{content:"\f1cb"}.fa-jsfiddle:before{content:"\f1cc"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"\f1cd"}.fa-circle-o-notch:before{content:"\f1ce"}.fa-ra:before,.fa-rebel:before{content:"\f1d0"}.fa-ge:before,.fa-empire:before{content:"\f1d1"}.fa-git-square:before{content:"\f1d2"}.fa-git:before{content:"\f1d3"}.fa-hacker-news:before{content:"\f1d4"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-qq:before{content:"\f1d6"}.fa-wechat:before,.fa-weixin:before{content:"\f1d7"}.fa-send:before,.fa-paper-plane:before{content:"\f1d8"}.fa-send-o:before,.fa-paper-plane-o:before{content:"\f1d9"}.fa-history:before{content:"\f1da"}.fa-genderless:before,.fa-circle-thin:before{content:"\f1db"}.fa-header:before{content:"\f1dc"}.fa-paragraph:before{content:"\f1dd"}.fa-sliders:before{content:"\f1de"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-bomb:before{content:"\f1e2"}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:"\f1e3"}.fa-tty:before{content:"\f1e4"}.fa-binoculars:before{content:"\f1e5"}.fa-plug:before{content:"\f1e6"}.fa-slideshare:before{content:"\f1e7"}.fa-twitch:before{content:"\f1e8"}.fa-yelp:before{content:"\f1e9"}.fa-newspaper-o:before{content:"\f1ea"}.fa-wifi:before{content:"\f1eb"}.fa-calculator:before{content:"\f1ec"}.fa-paypal:before{content:"\f1ed"}.fa-google-wallet:before{content:"\f1ee"}.fa-cc-visa:before{content:"\f1f0"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-bell-slash:before{content:"\f1f6"}.fa-bell-slash-o:before{content:"\f1f7"}.fa-trash:before{content:"\f1f8"}.fa-copyright:before{content:"\f1f9"}.fa-at:before{content:"\f1fa"}.fa-eyedropper:before{content:"\f1fb"}.fa-paint-brush:before{content:"\f1fc"}.fa-birthday-cake:before{content:"\f1fd"}.fa-area-chart:before{content:"\f1fe"}.fa-pie-chart:before{content:"\f200"}.fa-line-chart:before{content:"\f201"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-bicycle:before{content:"\f206"}.fa-bus:before{content:"\f207"}.fa-ioxhost:before{content:"\f208"}.fa-angellist:before{content:"\f209"}.fa-cc:before{content:"\f20a"}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:"\f20b"}.fa-meanpath:before{content:"\f20c"}.fa-buysellads:before{content:"\f20d"}.fa-connectdevelop:before{content:"\f20e"}.fa-dashcube:before{content:"\f210"}.fa-forumbee:before{content:"\f211"}.fa-leanpub:before{content:"\f212"}.fa-sellsy:before{content:"\f213"}.fa-shirtsinbulk:before{content:"\f214"}.fa-simplybuilt:before{content:"\f215"}.fa-skyatlas:before{content:"\f216"}.fa-cart-plus:before{content:"\f217"}.fa-cart-arrow-down:before{content:"\f218"}.fa-diamond:before{content:"\f219"}.fa-ship:before{content:"\f21a"}.fa-user-secret:before{content:"\f21b"}.fa-motorcycle:before{content:"\f21c"}.fa-street-view:before{content:"\f21d"}.fa-heartbeat:before{content:"\f21e"}.fa-venus:before{content:"\f221"}.fa-mars:before{content:"\f222"}.fa-mercury:before{content:"\f223"}.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-venus-double:before{content:"\f226"}.fa-mars-double:before{content:"\f227"}.fa-venus-mars:before{content:"\f228"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-neuter:before{content:"\f22c"}.fa-facebook-official:before{content:"\f230"}.fa-pinterest-p:before{content:"\f231"}.fa-whatsapp:before{content:"\f232"}.fa-server:before{content:"\f233"}.fa-user-plus:before{content:"\f234"}.fa-user-times:before{content:"\f235"}.fa-hotel:before,.fa-bed:before{content:"\f236"}.fa-viacoin:before{content:"\f237"}.fa-train:before{content:"\f238"}.fa-subway:before{content:"\f239"}.fa-medium:before{content:"\f23a"} \ No newline at end of file + */@font-face{font-family:'FontAwesome';src:url('../fonts/fontawesome-webfont.eot?v=4.3.0');src:url('../fonts/fontawesome-webfont.eot?#iefix&v=4.3.0') format('embedded-opentype'),url('../fonts/fontawesome-webfont.woff2?v=4.3.0') format('woff2'),url('../fonts/fontawesome-webfont.woff?v=4.3.0') format('woff'),url('../fonts/fontawesome-webfont.ttf?v=4.3.0') format('truetype'),url('../fonts/fontawesome-webfont.svg?v=4.3.0#fontawesomeregular') format('svg');font-weight:normal;font-style:normal}.fa{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;transform:translate(0, 0)}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2);-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=3);-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-remove:before,.fa-close:before,.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook-f:before,.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-desc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-asc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before,.fa-gratipay:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"}.fa-space-shuttle:before{content:"\f197"}.fa-slack:before{content:"\f198"}.fa-envelope-square:before{content:"\f199"}.fa-wordpress:before{content:"\f19a"}.fa-openid:before{content:"\f19b"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:"\f19c"}.fa-mortar-board:before,.fa-graduation-cap:before{content:"\f19d"}.fa-yahoo:before{content:"\f19e"}.fa-google:before{content:"\f1a0"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-square:before{content:"\f1a2"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-stumbleupon:before{content:"\f1a4"}.fa-delicious:before{content:"\f1a5"}.fa-digg:before{content:"\f1a6"}.fa-pied-piper:before{content:"\f1a7"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-drupal:before{content:"\f1a9"}.fa-joomla:before{content:"\f1aa"}.fa-language:before{content:"\f1ab"}.fa-fax:before{content:"\f1ac"}.fa-building:before{content:"\f1ad"}.fa-child:before{content:"\f1ae"}.fa-paw:before{content:"\f1b0"}.fa-spoon:before{content:"\f1b1"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-recycle:before{content:"\f1b8"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-tree:before{content:"\f1bb"}.fa-spotify:before{content:"\f1bc"}.fa-deviantart:before{content:"\f1bd"}.fa-soundcloud:before{content:"\f1be"}.fa-database:before{content:"\f1c0"}.fa-file-pdf-o:before{content:"\f1c1"}.fa-file-word-o:before{content:"\f1c2"}.fa-file-excel-o:before{content:"\f1c3"}.fa-file-powerpoint-o:before{content:"\f1c4"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:"\f1c5"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:"\f1c6"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:"\f1c7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\f1c8"}.fa-file-code-o:before{content:"\f1c9"}.fa-vine:before{content:"\f1ca"}.fa-codepen:before{content:"\f1cb"}.fa-jsfiddle:before{content:"\f1cc"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"\f1cd"}.fa-circle-o-notch:before{content:"\f1ce"}.fa-ra:before,.fa-rebel:before{content:"\f1d0"}.fa-ge:before,.fa-empire:before{content:"\f1d1"}.fa-git-square:before{content:"\f1d2"}.fa-git:before{content:"\f1d3"}.fa-hacker-news:before{content:"\f1d4"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-qq:before{content:"\f1d6"}.fa-wechat:before,.fa-weixin:before{content:"\f1d7"}.fa-send:before,.fa-paper-plane:before{content:"\f1d8"}.fa-send-o:before,.fa-paper-plane-o:before{content:"\f1d9"}.fa-history:before{content:"\f1da"}.fa-genderless:before,.fa-circle-thin:before{content:"\f1db"}.fa-header:before{content:"\f1dc"}.fa-paragraph:before{content:"\f1dd"}.fa-sliders:before{content:"\f1de"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-bomb:before{content:"\f1e2"}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:"\f1e3"}.fa-tty:before{content:"\f1e4"}.fa-binoculars:before{content:"\f1e5"}.fa-plug:before{content:"\f1e6"}.fa-slideshare:before{content:"\f1e7"}.fa-twitch:before{content:"\f1e8"}.fa-yelp:before{content:"\f1e9"}.fa-newspaper-o:before{content:"\f1ea"}.fa-wifi:before{content:"\f1eb"}.fa-calculator:before{content:"\f1ec"}.fa-paypal:before{content:"\f1ed"}.fa-google-wallet:before{content:"\f1ee"}.fa-cc-visa:before{content:"\f1f0"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-bell-slash:before{content:"\f1f6"}.fa-bell-slash-o:before{content:"\f1f7"}.fa-trash:before{content:"\f1f8"}.fa-copyright:before{content:"\f1f9"}.fa-at:before{content:"\f1fa"}.fa-eyedropper:before{content:"\f1fb"}.fa-paint-brush:before{content:"\f1fc"}.fa-birthday-cake:before{content:"\f1fd"}.fa-area-chart:before{content:"\f1fe"}.fa-pie-chart:before{content:"\f200"}.fa-line-chart:before{content:"\f201"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-bicycle:before{content:"\f206"}.fa-bus:before{content:"\f207"}.fa-ioxhost:before{content:"\f208"}.fa-angellist:before{content:"\f209"}.fa-cc:before{content:"\f20a"}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:"\f20b"}.fa-meanpath:before{content:"\f20c"}.fa-buysellads:before{content:"\f20d"}.fa-connectdevelop:before{content:"\f20e"}.fa-dashcube:before{content:"\f210"}.fa-forumbee:before{content:"\f211"}.fa-leanpub:before{content:"\f212"}.fa-sellsy:before{content:"\f213"}.fa-shirtsinbulk:before{content:"\f214"}.fa-simplybuilt:before{content:"\f215"}.fa-skyatlas:before{content:"\f216"}.fa-cart-plus:before{content:"\f217"}.fa-cart-arrow-down:before{content:"\f218"}.fa-diamond:before{content:"\f219"}.fa-ship:before{content:"\f21a"}.fa-user-secret:before{content:"\f21b"}.fa-motorcycle:before{content:"\f21c"}.fa-street-view:before{content:"\f21d"}.fa-heartbeat:before{content:"\f21e"}.fa-venus:before{content:"\f221"}.fa-mars:before{content:"\f222"}.fa-mercury:before{content:"\f223"}.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-venus-double:before{content:"\f226"}.fa-mars-double:before{content:"\f227"}.fa-venus-mars:before{content:"\f228"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-neuter:before{content:"\f22c"}.fa-facebook-official:before{content:"\f230"}.fa-pinterest-p:before{content:"\f231"}.fa-whatsapp:before{content:"\f232"}.fa-server:before{content:"\f233"}.fa-user-plus:before{content:"\f234"}.fa-user-times:before{content:"\f235"}.fa-hotel:before,.fa-bed:before{content:"\f236"}.fa-viacoin:before{content:"\f237"}.fa-train:before{content:"\f238"}.fa-subway:before{content:"\f239"}.fa-medium:before{content:"\f23a"} diff --git a/website/font-awesome/fonts/fontawesome-webfont.svg b/website/font-awesome/fonts/fontawesome-webfont.svg index 1ee89d436..0dad44510 100644 --- a/website/font-awesome/fonts/fontawesome-webfont.svg +++ b/website/font-awesome/fonts/fontawesome-webfont.svg @@ -562,4 +562,4 @@ - \ No newline at end of file + diff --git a/website/font-awesome/less/variables.less b/website/font-awesome/less/variables.less index d526064c8..de36788e9 100644 --- a/website/font-awesome/less/variables.less +++ b/website/font-awesome/less/variables.less @@ -603,4 +603,3 @@ @fa-var-youtube: "\f167"; @fa-var-youtube-play: "\f16a"; @fa-var-youtube-square: "\f166"; - diff --git a/website/font-awesome/scss/_variables.scss b/website/font-awesome/scss/_variables.scss index 9b7210e23..b637ff1bf 100644 --- a/website/font-awesome/scss/_variables.scss +++ b/website/font-awesome/scss/_variables.scss @@ -603,4 +603,3 @@ $fa-var-yen: "\f157"; $fa-var-youtube: "\f167"; $fa-var-youtube-play: "\f16a"; $fa-var-youtube-square: "\f166"; - diff --git a/website/fonts/glyphicons-halflings-regular.svg b/website/fonts/glyphicons-halflings-regular.svg index 94fb5490a..187805af6 100644 --- a/website/fonts/glyphicons-halflings-regular.svg +++ b/website/fonts/glyphicons-halflings-regular.svg @@ -285,4 +285,4 @@ - \ No newline at end of file + diff --git a/website/js/bootstrap.min.js b/website/js/bootstrap.min.js index c6d36920b..7c567aabe 100644 --- a/website/js/bootstrap.min.js +++ b/website/js/bootstrap.min.js @@ -4,4 +4,4 @@ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) */ if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery");+function(a){"use strict";var b=a.fn.jquery.split(" ")[0].split(".");if(b[0]<2&&b[1]<9||1==b[0]&&9==b[1]&&b[2]<1)throw new Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher")}(jQuery),+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){return a(b.target).is(this)?b.handleObj.handler.apply(this,arguments):void 0}})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new d(this)),"string"==typeof b&&e[b].call(c)})}var c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.3.2",d.TRANSITION_DURATION=150,d.prototype.close=function(b){function c(){g.detach().trigger("closed.bs.alert").remove()}var e=a(this),f=e.attr("data-target");f||(f=e.attr("href"),f=f&&f.replace(/.*(?=#[^\s]*$)/,""));var g=a(f);b&&b.preventDefault(),g.length||(g=e.closest(".alert")),g.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(g.removeClass("in"),a.support.transition&&g.hasClass("fade")?g.one("bsTransitionEnd",c).emulateTransitionEnd(d.TRANSITION_DURATION):c())};var e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c,d.prototype.close)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof b&&b;e||d.data("bs.button",e=new c(this,f)),"toggle"==b?e.toggle():b&&e.setState(b)})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.3.2",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",null==f.resetText&&d.data("resetText",d[e]()),setTimeout(a.proxy(function(){d[e](null==f[b]?this.options[b]:f[b]),"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c))},this),0)},c.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")&&(c.prop("checked")&&this.$element.hasClass("active")?a=!1:b.find(".active").removeClass("active")),a&&c.prop("checked",!this.$element.hasClass("active")).trigger("change")}else this.$element.attr("aria-pressed",!this.$element.hasClass("active"));a&&this.$element.toggleClass("active")};var d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return a.fn.button=d,this},a(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(c){var d=a(c.target);d.hasClass("btn")||(d=d.closest(".btn")),b.call(d,"toggle"),c.preventDefault()}).on("focus.bs.button.data-api blur.bs.button.data-api",'[data-toggle^="button"]',function(b){a(b.target).closest(".btn").toggleClass("focus",/^focus(in)?$/.test(b.type))})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof b&&b),g="string"==typeof b?b:f.slide;e||d.data("bs.carousel",e=new c(this,f)),"number"==typeof b?e.to(b):g?e[g]():f.interval&&e.pause().cycle()})}var c=function(b,c){this.$element=a(b),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=this.sliding=this.interval=this.$active=this.$items=null,this.options.keyboard&&this.$element.on("keydown.bs.carousel",a.proxy(this.keydown,this)),"hover"==this.options.pause&&!("ontouchstart"in document.documentElement)&&this.$element.on("mouseenter.bs.carousel",a.proxy(this.pause,this)).on("mouseleave.bs.carousel",a.proxy(this.cycle,this))};c.VERSION="3.3.2",c.TRANSITION_DURATION=600,c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0,keyboard:!0},c.prototype.keydown=function(a){if(!/input|textarea/i.test(a.target.tagName)){switch(a.which){case 37:this.prev();break;case 39:this.next();break;default:return}a.preventDefault()}},c.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(a){return this.$items=a.parent().children(".item"),this.$items.index(a||this.$active)},c.prototype.getItemForDirection=function(a,b){var c=this.getItemIndex(b),d="prev"==a&&0===c||"next"==a&&c==this.$items.length-1;if(d&&!this.options.wrap)return b;var e="prev"==a?-1:1,f=(c+e)%this.$items.length;return this.$items.eq(f)},c.prototype.to=function(a){var b=this,c=this.getItemIndex(this.$active=this.$element.find(".item.active"));return a>this.$items.length-1||0>a?void 0:this.sliding?this.$element.one("slid.bs.carousel",function(){b.to(a)}):c==a?this.pause().cycle():this.slide(a>c?"next":"prev",this.$items.eq(a))},c.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){return this.sliding?void 0:this.slide("next")},c.prototype.prev=function(){return this.sliding?void 0:this.slide("prev")},c.prototype.slide=function(b,d){var e=this.$element.find(".item.active"),f=d||this.getItemForDirection(b,e),g=this.interval,h="next"==b?"left":"right",i=this;if(f.hasClass("active"))return this.sliding=!1;var j=f[0],k=a.Event("slide.bs.carousel",{relatedTarget:j,direction:h});if(this.$element.trigger(k),!k.isDefaultPrevented()){if(this.sliding=!0,g&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var l=a(this.$indicators.children()[this.getItemIndex(f)]);l&&l.addClass("active")}var m=a.Event("slid.bs.carousel",{relatedTarget:j,direction:h});return a.support.transition&&this.$element.hasClass("slide")?(f.addClass(b),f[0].offsetWidth,e.addClass(h),f.addClass(h),e.one("bsTransitionEnd",function(){f.removeClass([b,h].join(" ")).addClass("active"),e.removeClass(["active",h].join(" ")),i.sliding=!1,setTimeout(function(){i.$element.trigger(m)},0)}).emulateTransitionEnd(c.TRANSITION_DURATION)):(e.removeClass("active"),f.addClass("active"),this.sliding=!1,this.$element.trigger(m)),g&&this.cycle(),this}};var d=a.fn.carousel;a.fn.carousel=b,a.fn.carousel.Constructor=c,a.fn.carousel.noConflict=function(){return a.fn.carousel=d,this};var e=function(c){var d,e=a(this),f=a(e.attr("data-target")||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""));if(f.hasClass("carousel")){var g=a.extend({},f.data(),e.data()),h=e.attr("data-slide-to");h&&(g.interval=!1),b.call(f,g),h&&f.data("bs.carousel").to(h),c.preventDefault()}};a(document).on("click.bs.carousel.data-api","[data-slide]",e).on("click.bs.carousel.data-api","[data-slide-to]",e),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var c=a(this);b.call(c,c.data())})})}(jQuery),+function(a){"use strict";function b(b){var c,d=b.attr("data-target")||(c=b.attr("href"))&&c.replace(/.*(?=#[^\s]+$)/,"");return a(d)}function c(b){return this.each(function(){var c=a(this),e=c.data("bs.collapse"),f=a.extend({},d.DEFAULTS,c.data(),"object"==typeof b&&b);!e&&f.toggle&&"show"==b&&(f.toggle=!1),e||c.data("bs.collapse",e=new d(this,f)),"string"==typeof b&&e[b]()})}var d=function(b,c){this.$element=a(b),this.options=a.extend({},d.DEFAULTS,c),this.$trigger=a(this.options.trigger).filter('[href="#'+b.id+'"], [data-target="#'+b.id+'"]'),this.transitioning=null,this.options.parent?this.$parent=this.getParent():this.addAriaAndCollapsedClass(this.$element,this.$trigger),this.options.toggle&&this.toggle()};d.VERSION="3.3.2",d.TRANSITION_DURATION=350,d.DEFAULTS={toggle:!0,trigger:'[data-toggle="collapse"]'},d.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},d.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var b,e=this.$parent&&this.$parent.children(".panel").children(".in, .collapsing");if(!(e&&e.length&&(b=e.data("bs.collapse"),b&&b.transitioning))){var f=a.Event("show.bs.collapse");if(this.$element.trigger(f),!f.isDefaultPrevented()){e&&e.length&&(c.call(e,"hide"),b||e.data("bs.collapse",null));var g=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[g](0).attr("aria-expanded",!0),this.$trigger.removeClass("collapsed").attr("aria-expanded",!0),this.transitioning=1;var h=function(){this.$element.removeClass("collapsing").addClass("collapse in")[g](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return h.call(this);var i=a.camelCase(["scroll",g].join("-"));this.$element.one("bsTransitionEnd",a.proxy(h,this)).emulateTransitionEnd(d.TRANSITION_DURATION)[g](this.$element[0][i])}}}},d.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse in").attr("aria-expanded",!1),this.$trigger.addClass("collapsed").attr("aria-expanded",!1),this.transitioning=1;var e=function(){this.transitioning=0,this.$element.removeClass("collapsing").addClass("collapse").trigger("hidden.bs.collapse")};return a.support.transition?void this.$element[c](0).one("bsTransitionEnd",a.proxy(e,this)).emulateTransitionEnd(d.TRANSITION_DURATION):e.call(this)}}},d.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()},d.prototype.getParent=function(){return a(this.options.parent).find('[data-toggle="collapse"][data-parent="'+this.options.parent+'"]').each(a.proxy(function(c,d){var e=a(d);this.addAriaAndCollapsedClass(b(e),e)},this)).end()},d.prototype.addAriaAndCollapsedClass=function(a,b){var c=a.hasClass("in");a.attr("aria-expanded",c),b.toggleClass("collapsed",!c).attr("aria-expanded",c)};var e=a.fn.collapse;a.fn.collapse=c,a.fn.collapse.Constructor=d,a.fn.collapse.noConflict=function(){return a.fn.collapse=e,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(d){var e=a(this);e.attr("data-target")||d.preventDefault();var f=b(e),g=f.data("bs.collapse"),h=g?"toggle":a.extend({},e.data(),{trigger:this});c.call(f,h)})}(jQuery),+function(a){"use strict";function b(b){b&&3===b.which||(a(e).remove(),a(f).each(function(){var d=a(this),e=c(d),f={relatedTarget:this};e.hasClass("open")&&(e.trigger(b=a.Event("hide.bs.dropdown",f)),b.isDefaultPrevented()||(d.attr("aria-expanded","false"),e.removeClass("open").trigger("hidden.bs.dropdown",f)))}))}function c(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}function d(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new g(this)),"string"==typeof b&&d[b].call(c)})}var e=".dropdown-backdrop",f='[data-toggle="dropdown"]',g=function(b){a(b).on("click.bs.dropdown",this.toggle)};g.VERSION="3.3.2",g.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=c(e),g=f.hasClass("open");if(b(),!g){"ontouchstart"in document.documentElement&&!f.closest(".navbar-nav").length&&a('