diff --git a/CHANGELOG.md b/CHANGELOG.md index f43961dd..2389f1ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,24 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [2.6.1] - 2021-03-22 +### Added +- Added Name tag to EIPNat in Network.template +- Added support for Milan and Cape Town +- EBS volumes provisioned for DCV sessions (Windows/Linux) are now tagged properly +- Support for Graviton2 instances +- Ability to disable web APIs via @disabled decorator + +### Changed +- Updated EFA to 1.11.1 +- Updated Python 3.7.1 to Python 3.7.9 +- Updated awscli, boto3, and botocore to support instances announced at Re:Invent 2020 +- Use new gp3 volumes instead of gp2 since they're more cost effective and provide 3000 IOPS baseline +- Removed SchedulerPublicIPAllocation from Scheduler.template as it's no longer used +- Updated CentOS, ALI2 and RHEL76 AMIs +- Instances with NVME instance store don't become unresponsive post-restart due to filesystem checks enforcement +- ElasticSearch is now deployed in private subnets + ## [2.6.0] - 2020-10-29 ### Added - Users can now launch Windows instances with DCV diff --git a/NOTICE.txt b/NOTICE.txt index 267b67df..35307cb9 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,5 +1,5 @@ Scale Out Computing on AWS -Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, diff --git a/README.adoc b/README.adoc index 8d8489db..9e6a0b11 100644 --- a/README.adoc +++ b/README.adoc @@ -7,34 +7,7 @@ https://awslabs.github.io/scale-out-computing-on-aws/[https://awslabs.github.io/ == :rocket: How to install Scale-Out Computing on AWS -=== 1-Click installer - -Visit https://aws.amazon.com/solutions/scale-out-computing-on-aws[https://aws.amazon.com/solutions/scale-out-computing-on-aws] - -:warning:1-Click installer is great for PoC or demos. For production workload, it's recommended to go with "Custom Build" instead. - - -=== Custom Build - -. Clone this git repository -+ -```bash -git clone https://github.com/awslabs/scale-out-computing-on-aws -``` - -. Run the following command to create your build (support Python2 and Python3): -+ -```bash -python source/manual_build.py -``` - -. Output will be created under `source/dist/` - -. Upload `source/dist/` folder to your own S3 bucket - -. Launch CloudFormation and use `scale-out-computing-on-aws.template` as base template - -Refer to https://awslabs.github.io/scale-out-computing-on-aws/install-soca-cluster/[https://awslabs.github.io/scale-out-computing-on-aws/install-soca-cluster/] for installation instructions. +Refer to https://awslabs.github.io/scale-out-computing-on-aws/tutorials/install-soca-cluster/[https://awslabs.github.io/scale-out-computing-on-aws/tutorials/install-soca-cluster/] for installation instructions. == :pencil2: File Structure Scale-Out Computing on AWS project consists in a collection of CloudFormation templates, Shell scripts and Python code. @@ -65,7 +38,7 @@ Scale-Out Computing on AWS project consists in a collection of CloudFormation te *** -Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/deployment/build-open-source-dist.sh b/deployment/build-open-source-dist.sh deleted file mode 100755 index 7fd23d40..00000000 --- a/deployment/build-open-source-dist.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/bash -# -# This assumes all of the OS-level configuration has been completed and git repo has already been cloned -# -# This script should be run from the repo's deployment directory -# cd deployment -# ./build-s3-dist.sh solution-name -# -# Paramenters: -# - solution-name: name of the solution for consistency - -# Check to see if input has been provided: -if [ -z "$1" ]; then - echo "Please provide the trademark approved solution name for the open source package." - echo "For example: ./build-open-source-dist.sh trademarked-solution-name" - exit 1 -fi - -# Get reference for all important folders -source_template_dir="$PWD" -dist_dir="$source_template_dir/open-source" -dist_template_dir="$dist_dir/deployment" -source_dir="$source_template_dir/../source" - -echo "------------------------------------------------------------------------------" -echo "[Init] Clean old open-source folder" -echo "------------------------------------------------------------------------------" -echo "rm -rf $dist_dir" -rm -rf $dist_dir -echo "mkdir -p $dist_dir" -mkdir -p $dist_dir -echo "mkdir -p $dist_template_dir" -mkdir -p $dist_template_dir - -echo "------------------------------------------------------------------------------" -echo "[Packing] Build Script" -echo "------------------------------------------------------------------------------" -echo "cp $source_template_dir/build-s3-dist.sh $dist_template_dir" -cp $source_template_dir/build-s3-dist.sh $dist_template_dir -echo "cp $source_template_dir/run-unit-tests.sh $dist_template_dir" -cp $source_template_dir/run-unit-tests.sh $dist_template_dir - -echo "------------------------------------------------------------------------------" -echo "[Packing] Source Folder" -echo "------------------------------------------------------------------------------" -echo "cp -r $source_dir $dist_dir" -cp -r $source_dir $dist_dir -echo "cp $source_template_dir/../LICENSE.txt $dist_dir" -cp $source_template_dir/../LICENSE.txt $dist_dir -echo "cp $source_template_dir/../NOTICE.txt $dist_dir" -cp $source_template_dir/../NOTICE.txt $dist_dir -echo "cp $source_template_dir/../README.md $dist_dir" -cp $source_template_dir/../README.md $dist_dir -echo "cp $source_template_dir/../CODE_OF_CONDUCT.md $dist_dir" -cp $source_template_dir/../CODE_OF_CONDUCT.md $dist_dir -echo "cp $source_template_dir/../CONTRIBUTING.md $dist_dir" -cp $source_template_dir/../CONTRIBUTING.md $dist_dir -echo "cp $source_template_dir/../CHANGELOG.md $dist_dir" -cp $source_template_dir/../CHANGELOG.md $dist_dir -echo "cp $source_template_dir/../THIRD_PARTY_LICENSES.txt $dist_dir" -cp $source_template_dir/../THIRD_PARTY_LICENSES.txt $dist_dir - -echo "------------------------------------------------------------------------------" -echo "[Packing] Create GitHub (open-source) zip file" -echo "------------------------------------------------------------------------------" -echo "cd $dist_dir" -cd $dist_dir -echo "zip -q -r9 ../$1.zip *" -zip -q -r9 ../$1.zip * -echo "Clean up open-source folder" -echo "rm -rf *" -rm -rf * -echo "mv ../$1.zip ." -mv ../$1.zip . -echo "Completed building $1.zip dist" \ No newline at end of file diff --git a/deployment/build-s3-dist.sh b/deployment/build-s3-dist.sh deleted file mode 100755 index 5b9f941f..00000000 --- a/deployment/build-s3-dist.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash -# -# This assumes all of the OS-level configuration has been completed and git repo has already been cloned -# -# This script should be run from the repo's deployment directory -# cd deployment -# ./build-s3-dist.sh source-bucket-base-name solution-name version-code -# -# Paramenters: -# - source-bucket-base-name: Name for the S3 bucket location where the template will source the Lambda -# code from. The template will append '-[region_name]' to this bucket name. -# For example: ./build-s3-dist.sh solutions my-solution v1.0.0 -# The template will then expect the source code to be located in the solutions-[region_name] bucket -# -# - solution-name: name of the solution for consistency -# -# - version-code: version of the package - -# Check to see if input has been provided: -if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then - echo "Please provide the base source bucket name, trademark approved solution name and version where the lambda code will eventually reside." - echo "For example: ./build-s3-dist.sh solutions trademarked-solution-name v1.0.0" - exit 1 -fi - -# Get reference for all important folders -template_dir="$PWD" -template_dist_dir="$template_dir/global-s3-assets" -build_dist_dir="$template_dir/regional-s3-assets" -source_dir="$template_dir/../source" - -echo "------------------------------------------------------------------------------" -echo "[Init] Clean old dist, node_modules and bower_components folders" -echo "------------------------------------------------------------------------------" -echo "rm -rf $template_dist_dir" -rm -rf $template_dist_dir -echo "mkdir -p $template_dist_dir" -mkdir -p $template_dist_dir -echo "rm -rf $build_dist_dir" -rm -rf $build_dist_dir -echo "mkdir -p $build_dist_dir" -mkdir -p $build_dist_dir - -echo "------------------------------------------------------------------------------" -echo "[Packing] Global Assets" -echo "------------------------------------------------------------------------------" -echo "------------------------------------------------------------------------------" -echo "[Packing] Copy all templates for CfnNagScan and force .template extension" -echo "------------------------------------------------------------------------------" -echo "mkdir -p $template_dist_dir" -mkdir -p $template_dist_dir -echo "cp ../source/scale-out-computing-on-aws.template $template_dist_dir/" -cp ../source/scale-out-computing-on-aws.template $template_dist_dir/ -echo "cp ../source/install-with-existing-resources.template $template_dist_dir/" -cp ../source/install-with-existing-resources.template $template_dist_dir/ -echo "cp ../source/README.txt $template_dist_dir/" -cp ../source/README.txt $template_dist_dir/ - - -echo "Updating code source bucket in template with $1-reference" -replace="s/%%BUCKET_NAME%%/$1-reference/g" -echo "sed -i '' -e $replace $template_dist_dir/scale-out-computing-on-aws.template" -sed -i '' -e $replace $template_dist_dir/*.template -replace="s/%%SOLUTION_NAME%%/$2/g" -echo "sed -i '' -e $replace $template_dist_dir/scale-out-computing-on-aws.template" -sed -i '' -e $replace $template_dist_dir/*.template -replace="s/%%VERSION%%/$3/g" -echo "sed -i '' -e $replace $template_dist_dir/scale-out-computing-on-aws.template" -sed -i '' -e $replace $template_dist_dir/*.template -echo "cp -r $source_dir/scripts $template_dist_dir" -cp -r $source_dir/scripts $template_dist_dir -echo "cp -r $source_dir/templates $template_dist_dir" -cp -r $source_dir/templates $template_dist_dir - -echo "tar -czf $template_dist_dir/soca.tar.gz $source_dir/soca" -cd $source_dir/soca -tar -czf $template_dist_dir/soca.tar.gz * - - -echo "------------------------------------------------------------------------------" -echo "[Packing] Regional Assets" -echo "------------------------------------------------------------------------------" -echo "cp -r $source_dir/scripts $build_dist_dir" -cp -r $source_dir/scripts $build_dist_dir -echo "cp -r $source_dir/templates $build_dist_dir" -cp -r $source_dir/templates $build_dist_dir diff --git a/deployment/run-unit-tests.sh b/deployment/run-unit-tests.sh deleted file mode 100755 index efa50c8f..00000000 --- a/deployment/run-unit-tests.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -# -# This assumes all of the OS-level configuration has been completed and git repo has already been cloned -# -# This script should be run from the repo's deployment directory -# cd deployment -# ./run-unit-tests.sh -# -echo "Unit tests have moved to internal/content_scan.sh" \ No newline at end of file diff --git a/docs/imgs/disable-feature-1.png b/docs/imgs/disable-feature-1.png new file mode 100644 index 00000000..aee5671e Binary files /dev/null and b/docs/imgs/disable-feature-1.png differ diff --git a/docs/index.md b/docs/index.md index a68c8fb9..c87e649e 100644 --- a/docs/index.md +++ b/docs/index.md @@ -10,14 +10,14 @@ Scale-Out Computing on AWS is a solution that helps customers more easily deploy This solution is designed to provide a production ready reference implementation to be a starting point for deploying an AWS environment to run scale-out workloads, allowing you to focus on running simulations designed to solve complex computational problems. ____ ## Easy installation -[Installation of your Scale-Out Computing on AWS cluster](install-soca-cluster/) is fully automated and managed by CloudFormation +[Installation of your Scale-Out Computing on AWS cluster](tutorials/install-soca-cluster/) is fully automated and managed by CloudFormation !!!info "Did you know?" - You can have multiple Scale-Out Computing on AWS clusters on the same AWS account - Scale-Out Computing on AWS comes with a list of unique tags, making resource tracking easy for AWS Administrators ## Access your cluster in 1 click -You can [access your Scale-Out Computing on AWS cluster](access-soca-cluster/) either using DCV (Desktop Cloud Visualization)[^1] or through SSH. +You can [access your Scale-Out Computing on AWS cluster](tutorials/access-soca-cluster/) either using DCV (Desktop Cloud Visualization)[^1] or through SSH. [^1]: [DCV](https://docs.aws.amazon.com/dcv/latest/adminguide/what-is-dcv.html) is a remote visualization technology that enables users to easily and securely connect to graphic-intensive 3D applications hosted on a remote high-performance server.* @@ -38,7 +38,7 @@ user@host$ qsub myscript.sh ~~~ !!!info - - [Check our Web-Based utility to generate you submission command](job-configuration-generator/) + - [Check our Web-Based utility to generate you submission command](tutorials/job-configuration-generator/) - [Refer to this page for tutorial and examples](tutorials/launch-your-first-job/) - [Refer to this page to list all supported parameters](tutorials/integration-ec2-job-parameters/) - Jobs can also be submitted [via HTTP API](web-interface/control-hpc-job-with-http-web-rest-api/) or [via web interface](web-interface/submit-hpc-jobs-web-based-interface/) @@ -63,8 +63,8 @@ Customers can integrate their Centos7/Rhel7/AmazonLinux2 AMI automatically by si ## Web User Interface Scale-Out Computing on AWS includes a simple web ui designed to simplify user interactions such as: -- [Start/Stop DCV sessions in 1 click](access-soca-cluster/#graphical-access-using-dcv) -- [Download private key in both PEM or PPK format](access-soca-cluster/#ssh-access) +- [Start/Stop DCV sessions in 1 click](tutorials/access-soca-cluster/#graphical-access-using-dcv) +- [Download private key in both PEM or PPK format](tutorials/access-soca-cluster/#ssh-access) - [Check the queue and job status in real-time](web-interface/manage-ldap-users/) - [Add/Remove LDAP users ](web-interface/manage-ldap-users/) - [Access the analytic dashboard](web-interface/my-activity/) diff --git a/docs/overrides/partials/footer.html b/docs/overrides/partials/footer.html index d62106e6..ff05515f 100644 --- a/docs/overrides/partials/footer.html +++ b/docs/overrides/partials/footer.html @@ -44,7 +44,7 @@ {% endif %} Made with Material for MkDocs
- Privacy | Site terms | © 2020, Amazon Web Services, Inc. or its affiliates. All rights reserved. + Privacy | Site terms | © 2021, Amazon Web Services, Inc. or its affiliates. All rights reserved. diff --git a/docs/security/update-soca-dns-ssl-certificate.md b/docs/security/update-soca-dns-ssl-certificate.md index 556a3beb..cb1f00b8 100644 --- a/docs/security/update-soca-dns-ssl-certificate.md +++ b/docs/security/update-soca-dns-ssl-certificate.md @@ -73,7 +73,7 @@ Make sure your browser is detecting your new SSL certificate correctly. ![](../imgs/cert-9.png) -Finally, [create a new DCV session](../../access-soca-cluster/#graphical-access-using-dcv) and verify the endpoint is using your new DNS name +Finally, [create a new DCV session](../../web-interface/create-virtual-desktops/) and verify the endpoint is using your new DNS name ![](../imgs/cert-11.png) diff --git a/docs/access-soca-cluster.md b/docs/tutorials/access-soca-cluster.md similarity index 91% rename from docs/access-soca-cluster.md rename to docs/tutorials/access-soca-cluster.md index de75c051..76e06a8a 100644 --- a/docs/access-soca-cluster.md +++ b/docs/tutorials/access-soca-cluster.md @@ -9,7 +9,7 @@ title: How to access Scale-Out Computing on AWS To access your Scale-Out Computing on AWS cluster using SSH protocol, simply click "SSH Access" on the left sidebar and follow the instructions. Scale-Out Computing on AWS will let you download your private key either in PEM or PPK format. -![](imgs/access-1.png) +![](../imgs/access-1.png) !!!info "SSH to an instance in a Private Subnet" If you need to access an instance that is in a Private (non-routable) Subnet, you can use ssh-agent to do this: @@ -35,4 +35,4 @@ To access your Scale-Out Computing on AWS cluster using SSH protocol, simply cli ## Graphical access using Windows/Linux virtual desktop -Refer to [this page to learn how to launch your own Windows/Linux session and access SOCA via your virtual desktop](../web-interface/create-virtual-desktops/) \ No newline at end of file +Refer to [this page to learn how to launch your own Windows/Linux session and access SOCA via your virtual desktop](../../web-interface/create-virtual-desktops/) \ No newline at end of file diff --git a/docs/install-soca-cluster.md b/docs/tutorials/install-soca-cluster.md similarity index 91% rename from docs/install-soca-cluster.md rename to docs/tutorials/install-soca-cluster.md index 6b5a3153..7de2d238 100644 --- a/docs/install-soca-cluster.md +++ b/docs/tutorials/install-soca-cluster.md @@ -6,6 +6,8 @@ title: Install your Scale-Out Computing on AWS cluster You can use the [1-Click installer for quick proof-of-concept (PoC), demo and/or development work](https://aws.amazon.com/solutions/scale-out-computing-on-aws/). This installer is hosted on an AWS controlled S3 bucket and customization is limited, so we recommend downloading building your own SOCA (see below) for your production. Always refers to the Github repository for the latest SOCA version. +[1-Click Install](https://console.aws.amazon.com/cloudformation/home?region=us-east-1#/stacks/new?&templateURL=https://s3.amazonaws.com/solutions-reference/scale-out-computing-on-aws/latest/scale-out-computing-on-aws.template){: .md-button } + ## Download Scale-Out Computing on AWS @@ -73,18 +75,18 @@ Download the tarball from [https://github.com/awslabs/scale-out-computing-on-aws Go to your Amazon S3 console and click "Create Bucket" -![](imgs/install-1.png) +![](../imgs/install-1.png) Choose a name and a region then click "Create" -![](imgs/install-2.png) +![](../imgs/install-2.png) !!! warning "Avoid un-necessary charge" It's recommended to create your bucket in the same region as your are planning to use Scale-Out Computing on AWS to avoid Cross-Regions charge ( See Data Transfer ) Once your bucket is created, select it and click "Upload". Simply drag and drop your build folder (`r6l1` in this example) to upload the content of the folder to S3. -![](imgs/install-3.png) +![](../imgs/install-3.png) !!! info You can use the same bucket to host multiple Scale-Out Computing on AWS clusters @@ -94,11 +96,11 @@ Once your bucket is created, select it and click "Upload". Simply drag and drop On your S3 bucket, click on the folder you just uploaded. -![](imgs/install-4.png) +![](../imgs/install-4.png) Your install template is located under `//scale-out-computing-on-aws.template`. Click on the object to retrieve the "Object URL" -![](imgs/install-5.png) +![](../imgs/install-5.png) !!! info "Want to use your existing AWS resources?" Refer to `install-with-existing-resources.template` if you want to use Scale-Out Computing on AWS with your existing resources. @@ -109,7 +111,7 @@ Your install template is located under `//scale-out-co Clicking on the link will open the CloudFormation console and pre-fill the **Install Location** parameters: -![](imgs/install-6.png) +![](../imgs/install-6.png) Under stack details, choose the stack name (do not use uppercase or it will break your ElasticSearch cluster). @@ -122,7 +124,7 @@ Under stack details, choose the stack name (do not use uppercase or it will brea - LDAP Parameters: Create a default LDAP user -![](imgs/install-7.png) +![](../imgs/install-7.png) !!!warning "Marketplace AMIs" If you choose to use the CentOS 7 image, [you must subscribe to CentOS 7 in the AWS Marketplace](https://aws.amazon.com/marketplace/pp/B00O7WM7QW/), to allow the installer to access the AMI during installation. @@ -134,21 +136,21 @@ This solution supports a heterogeneous environment. After installation, administ Click Next two times and make sure to check "Capabilities" section. One done simply click "Create Stack". The installation procedure will take about 45 minutes. -![](imgs/install-8.png) +![](../imgs/install-8.png) !!! info "CREATE_FAILED" If you hit any issue during the installation, refer to the 'CREATE_FAILED' component and find the root cause by referring at "Physical ID" - ![](imgs/install-12.png) + ![](../imgs/install-12.png) ## Post Install Verifications Wait for CloudFormation stacks to be "CREATE_COMPLETE", then select your base stack and click "Outputs" -![](imgs/install-9.png) +![](../imgs/install-9.png) Output tabs give you information about the SSH IP for the master, link to the web interface or ElasticSearch. -![](imgs/install-10.png) +![](../imgs/install-10.png) Even though Cloudformation resources are created, your environment might not be completely ready. To confirm whether or not Scale-Out Computing on AWS is ready, try to SSH to the scheduler IP. If your Scale-Out Computing on AWS cluster is not ready, your SSH will be rejected as shown below: @@ -182,7 +184,7 @@ Cluster: soca-cluster-v1 At this point, you will be able to access the web interface and log in with the default LDAP user you specified at launch creation -![](imgs/install-11.png) +![](../imgs/install-11.png) ## What if SSH port (22) is blocked by your IT? @@ -190,15 +192,15 @@ Scale-Out Computing on AWS supports [AWS Session Manager](https://docs.aws.amazo First, access your AWS EC2 Console and select your Scheduler instance, then click "Connect" button -![](imgs/session-1.png){: style="height:250x;width:500px"} +![](../imgs/session-1.png){: style="height:250x;width:500px"} Select "Session Manager" and click Connect -![](imgs/session-2.png){: style="height:300px;width:550px"} +![](../imgs/session-2.png){: style="height:300px;width:550px"} You now have access to a secure shell directly within your browser -![](imgs/session-3.png) +![](../imgs/session-3.png) ## Enable Termination Protection @@ -244,5 +246,5 @@ When enabled, the following information is collected and sent to AWS: ## What's next ? -Learn [how to access your cluster](access-soca-cluster.md), [how to submit your first job](tutorials/launch-your-first-job.md) or even [how to change your Scale-Out Computing on AWS DNS](security/update-soca-dns-ssl-certificate.md) to match your personal domain name. +Learn [how to access your cluster](../tutorials/access-soca-cluster.md), [how to submit your first job](../tutorials/launch-your-first-job.md) or even [how to change your Scale-Out Computing on AWS DNS](../security/update-soca-dns-ssl-certificate.md) to match your personal domain name. diff --git a/docs/tutorials/integration-ec2-job-parameters.md b/docs/tutorials/integration-ec2-job-parameters.md index f56d50e4..65c3a617 100644 --- a/docs/tutorials/integration-ec2-job-parameters.md +++ b/docs/tutorials/integration-ec2-job-parameters.md @@ -8,7 +8,7 @@ Below is a list of parameters you can specify when you request your simulation t !!!info If you don't specify them, your job will use the default values configured for your queue (see `/apps/soca/$SOCA_CONFIGURATION/cluster_manager/settings/queue_mapping.yml`) ____ - You can use [the web-based simulator](../../job-configuration-generator/) to generate your qsub command very easily. + You can use [the web-based simulator](../job-configuration-generator/) to generate your qsub command very easily. ## Compute @@ -191,12 +191,13 @@ Below is a list of parameters you can specify when you request your simulation t ##### Mount existing FSx -- Description: Mount an existing FSx to all compute nodes if `fsx_lustre` points to a FSx filesystem ID -- Example: `-l fsx_lustre=fs-xxxx` +- Description: Mount an existing FSx to all compute nodes if `fsx_lustre` points to a FSx filesystem's DNS name +- Example: `-l fsx_lustre=fs-xxxx.fsx.region.amazonaws.com` !!!info - FSx partitions are mounted as `/fsx`. This can be changed if needed - Make sure your FSx for Luster configuration is correct (use SOCA VPC and correct IAM roles) + - [Make sure to use the Filesytem's DNS name](../../storage/launch-job-with-fsx/#how-to-connect-to-a-permanentexisting-fsx) #### fsx_lustre_size diff --git a/docs/job-configuration-generator.md b/docs/tutorials/job-configuration-generator.md similarity index 82% rename from docs/job-configuration-generator.md rename to docs/tutorials/job-configuration-generator.md index a32b0545..8a897a96 100644 --- a/docs/job-configuration-generator.md +++ b/docs/tutorials/job-configuration-generator.md @@ -5,9 +5,9 @@ title: Job Submission Generator !!!info "Automatic parameter selection" - - You can manually specify parameters at job submission using the command below. If needed, all parameters [can also be automatically configured at queue level](../tutorials/integration-ec2-job-parameters/#how-to-use-custom-parameters). + - You can manually specify parameters at job submission using the command below. If needed, all parameters [can also be automatically configured at queue level](../../tutorials/integration-ec2-job-parameters/#how-to-use-custom-parameters). - Job will use the default parameters configured for its queue unless the parameters are explicitly specified during submission (**job parameters override queue parameters**). - - [Refer to this page](../tutorials/launch-your-first-job/#examples) for additional examples. + - [Refer to this page](../../tutorials/launch-your-first-job/#examples) for additional examples. @@ -98,24 +98,24 @@ myscript.sh

Compute parameters:

- Documentation + Documentation
Must be a number greater than 0
- Documentation + Documentation - Documentation + Documentation
Image name must start with "ami-"
- Documentation + Documentation
Must be centos7, rhel7 or amazonlinux2
@@ -125,20 +125,20 @@ myscript.sh - Documentation + Documentation
Subnet name must start with "sub-"
- Documentation + Documentation
Spot Price must be a float (eg 1.2) or auto (match OD price)
- Documentation + Documentation
Must be a number
@@ -147,7 +147,7 @@ myscript.sh
{{spot_allocation_error_price}}
- Documentation + Documentation
Must be either lowest-cost (default) or capacity-optimized
@@ -160,14 +160,14 @@ myscript.sh - Documentation + Documentation
Root Size must be a number
- Documentation + Documentation
Scratch Size must be a number
@@ -175,7 +175,7 @@ myscript.sh - Documentation + Documentation
Provisioned IO/s must be a number
@@ -183,20 +183,20 @@ myscript.sh - Documentation + Documentation - Documentation + Documentation
Size must be a number

Flags:

- I want to use EFA Documentation
- I do not want to use Placement Group (enabled by default) Documentation
- I want to enable HyperThreading (disabled by default) Documentation
- I want to retain my EBS disks (disabled by default) Documentation
- I want my job to only run on Reserved instances Documentation
+ I want to use EFA Documentation
+ I do not want to use Placement Group (enabled by default) Documentation
+ I want to enable HyperThreading (disabled by default) Documentation
+ I want to retain my EBS disks (disabled by default) Documentation
+ I want my job to only run on Reserved instances Documentation
diff --git a/docs/tutorials/launch-your-first-job.md b/docs/tutorials/launch-your-first-job.md index 72524cd9..ab483e59 100644 --- a/docs/tutorials/launch-your-first-job.md +++ b/docs/tutorials/launch-your-first-job.md @@ -7,7 +7,7 @@ title: Launch your first job * Jobs start on average 5 minutes after submission (this value may differ depending on the number and type of compute resource you need to be provisioned). [You can reduce this cold-start by pre-configuring your AMI](../../tutorials/reduce-compute-node-launch-time-with-custom-ami/) * Nodes are ephemeral and tie to a given job id. If needed, [you can launch 'AlwaysOn' instances](../../tutorials/launch-always-on-instances/) that will be running 24/7. * If your simulation requires a lot of disk I/O, [it's recommended to use high performance SSD-NVMe](../../tutorials/integration-ec2-job-parameters/#storage) disks (using /scratch location) and not default $HOME path - * Use [the web-based simulator](../../job-configuration-generator/) to generate your qsub/script command. + * Use [the web-based simulator](../../tutorials/job-configuration-generator/) to generate your qsub/script command. !!!success "Web Based Job Submission" In addition of regular qsub, SOCA supports [web based job submission](../../web-interface/submit-hpc-jobs-web-based-interface/) as well as via [HTTP REST API](../../web-interface/control-hpc-job-with-http-web-rest-api/) @@ -187,7 +187,7 @@ The web ui will also reflect this change. ## Examples !!!example "Job Submission Simulator" - Use [the web-based simulator](../../job-configuration-generator/) to generate your qsub/script command. + Use [the web-based simulator](../../tutorials/job-configuration-generator/) to generate your qsub/script command. !!!info "How to set a parameter" - In a script: #PBS -l parameter_name=parameter_value,parameter_name_2=parameter_value_2 diff --git a/docs/web-interface/control-hpc-job-with-http-web-rest-api.md b/docs/web-interface/control-hpc-job-with-http-web-rest-api.md index 698307a1..63f87c86 100644 --- a/docs/web-interface/control-hpc-job-with-http-web-rest-api.md +++ b/docs/web-interface/control-hpc-job-with-http-web-rest-api.md @@ -135,5 +135,5 @@ If the command is valid, you will receive a validation message: This time the output will return an error: ~~~json -{"succes": false, "message": "Unable to retrieve Job ID (job may have terminated and is no longer in the queue)"} -~~~ \ No newline at end of file +{"success": false, "message": "Unable to retrieve Job ID (job may have terminated and is no longer in the queue)"} +~~~ diff --git a/docs/web-interface/disable-api.md b/docs/web-interface/disable-api.md new file mode 100644 index 00000000..7286c867 --- /dev/null +++ b/docs/web-interface/disable-api.md @@ -0,0 +1,85 @@ +--- +title: Disable API +--- + +If required, SOCA administrators can disable web API or views by using `@disabled` decorator + +## Disable an API + +First, let's confirm a user can submit a job via the `/api/scheduler/job` endpoint: + +~~~bash hl_lines="6" + curl -k -X POST \ +> -H "X-SOCA-TOKEN: xxx" \ +> -H "X-SOCA-USER: mickael" \ +> -F payload="IyEvYmluL2Jhc2gKI1BCUyAtTiB0ZXN0am9iCiNQQlMgLVYgLWogb2UgLW8gdGVzdGpvYl9vdXRwdXQucWxvZwojUEJTIC1QIG15cHJvamVjdAojUEJTIC1xIG5vcm1hbAojUEJTIC1sIG5vZGVzPTEsaW5zdGFuY2VfdHlwZT1jNS5sYXJnZQovYmluL2VjaG8gIkhlbGxvIFdvcmxkIgo=" \ +> https://xxx.us-west-2.elb.amazonaws.com/api/scheduler/job +{"success": true, "message": "0"} +~~~ + +Edit `/apps/soca/$SOCA_CONFIGURATION/cluter_web_ui/api/v1/scheduler/pbspro/job.py` and import the new decorator + +~~~python +from decorators import disabled +~~~ + +Locate the API you want to disable and replace the current decorator with `@disabled` + +Before: +~~~python +@private_api +def post(self): + // code +~~~ + +After: +~~~python hl_lines="1" +@disabled +def post(self): + // code +~~~ + +Restart SOCA web interface via `socawebui.sh stop/start` and validate you cannot use the API anymore + +~~~bash hl_lines="6" + curl -k -X POST \ +> -H "X-SOCA-TOKEN: xxx" \ +> -H "X-SOCA-USER: mickael" \ +> -F payload="IyEvYmluL2Jhc2gKI1BCUyAtTiB0ZXN0am9iCiNQQlMgLVYgLWogb2UgLW8gdGVzdGpvYl9vdXRwdXQucWxvZwojUEJTIC1QIG15cHJvamVjdAojUEJTIC1xIG5vcm1hbAojUEJTIC1sIG5vZGVzPTEsaW5zdGFuY2VfdHlwZT1jNS5sYXJnZQovYmluL2VjaG8gIkhlbGxvIFdvcmxkIgo=" \ +> https://xxx.us-west-2.elb.amazonaws.com/api/scheduler/job +{"success": false, "message": "This API has been disabled by your Administrator"} +~~~ + +If you want to re-enable the API, simply configure the decorator back to its previous version (`@private_api`). +Restart the web interface again and verify the API is now enabled: +```bash hl_lines="6" + curl -k -X POST \ +> -H "X-SOCA-TOKEN: xxx" \ +> -H "X-SOCA-USER: mickael" \ +> -F payload="IyEvYmluL2Jhc2gKI1BCUyAtTiB0ZXN0am9iCiNQQlMgLVYgLWogb2UgLW8gdGVzdGpvYl9vdXRwdXQucWxvZwojUEJTIC1QIG15cHJvamVjdAojUEJTIC1xIG5vcm1hbAojUEJTIC1sIG5vZGVzPTEsaW5zdGFuY2VfdHlwZT1jNS5sYXJnZQovYmluL2VjaG8gIkhlbGxvIFdvcmxkIgo=" \ +> https://xxx.us-west-2.elb.amazonaws.com/api/scheduler/job +{"success": true, "message": "1"} +``` + +## Disable a view + +Process is very similar, locate the HTTP view you want to restrict. For example edit `/apps/soca/$SOCA_CONFIGURATION/cluster_web_ui/views/remote_desktop.py` + +Import the new decorator + +~~~python +from decorators import login_required, disabled +~~~ + +Then replace the current decorator of the view you want to restrict with `@disabled` + +~~~python hl_lines="2" +@remote_desktop.route('/remote_desktop', methods=['GET']) +@disabled +def index(): + // code +~~~ + +Restart the Web UI. Accessing the view will now redirect you back to your homepage + +![](../imgs/disable-feature-1.png) diff --git a/docs/web-interface/index.md b/docs/web-interface/index.md index 1cbd5233..f206b638 100755 --- a/docs/web-interface/index.md +++ b/docs/web-interface/index.md @@ -4,7 +4,7 @@ Scale-Out Computing on AWS includes a simple web ui designed to simplify user interactions such as: - [Start/Stop virtual desktops (Windows/Linux) sessions in 1 click](../web-interface/create-virtual-desktops/) -- [Download private key in both PEM or PPK format](../access-soca-cluster/#ssh-access) +- [Download private key in both PEM or PPK format](../tutorials/access-soca-cluster/#ssh-access) - [Check the queue and job status in real-time](../web-interface/manage-ldap-users/) - [Add/Remove LDAP users ](../web-interface/manage-ldap-users/) - [Access the analytic dashboard](../web-interface/my-activity/) diff --git a/mkdocs.yml b/mkdocs.yml index 7f323cef..e363329f 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -8,10 +8,14 @@ site_name: Scale-Out Computing on AWS Knowledge Base theme: - name: 'material' - custom_dir: 'docs/overrides' - features: - - tabs + name: 'material' + palette: + scheme: default + custom_dir: 'docs/overrides' + features: + - navigation.tabs + - navigation.tracking + - navigation.sections # Repository repo_name: 'awslabs/scale-out-computing-on-aws' diff --git a/source/manual_build.py b/source/manual_build.py index 7d8e91b1..e9511ea8 100644 --- a/source/manual_build.py +++ b/source/manual_build.py @@ -6,12 +6,13 @@ import argparse from shutil import make_archive, copy, copytree + def upload_objects(s3, bucket_name, s3_prefix, directory_name): try: my_bucket = s3.Bucket(bucket_name) for path, subdirs, files in os.walk(directory_name): - path = path.replace("\\","/") - directory = path.replace(directory_name,"") + path = path.replace("\\", "/") + directory = path.replace(directory_name.replace("\\", "/"), "") for file in files: print("%s[+] Uploading %s to s3://%s/%s%s%s" % (fg('green'), os.path.join(path, file), bucket_name, s3_prefix, directory+'/'+file, attr('reset'))) my_bucket.upload_file(os.path.join(path, file), s3_prefix+directory+'/'+file) @@ -24,7 +25,7 @@ def get_input(prompt): if sys.version_info[0] >= 3: response = input(prompt) else: - #Python 2 + # Python 2 response = raw_input(prompt) return response @@ -33,10 +34,18 @@ def get_input(prompt): from colored import fg, bg, attr import boto3 from requests import get + import requests.exceptions from botocore.client import ClientError from botocore.exceptions import ProfileNotFound except ImportError: - print(" > You must have 'colored', 'boto3' and 'requests' installed. Run 'pip install boto3 colored requests'") + print(" > You must have , 'colored', 'boto3' and 'requests' installed. Run 'pip install boto3 colored requests massedit' or 'pip install -r requirements.txt' first") + exit(1) + + if os.name == "nt": + print("%sSorry, Windows builds are currently not supported. Please use a UNIX system if you want to do a custom build\n%s" % (fg('yellow'), attr('reset'))) + print("%s=== How to install SOCA on Windows ===%s" % (fg('yellow'), attr('reset'))) + print("%s1 - Download the latest release (RELEASE-.tar.gz) from https://github.com/awslabs/scale-out-computing-on-aws/releases%s" % (fg('yellow'), attr('reset'))) + print("%s2 - Install SOCA via https://awslabs.github.io/scale-out-computing-on-aws/tutorials/install-soca-cluster/#option-2-download-the-latest-release-targz%s" % (fg('yellow'), attr('reset'))) exit(1) parser = argparse.ArgumentParser(description='Build & Upload SOCA CloudFormation resources.') @@ -63,22 +72,27 @@ def get_input(prompt): session = boto3.session.Session(profile_name=args.profile) s3 = session.resource('s3', region_name=region) except ProfileNotFound: - print(" > Profile %s not found. Check ~/.aws/credentials file." % args.profile) + print("%s> Profile %s not found. Check ~/.aws/credentials file.%s" % (fg('red'), args.profile, attr('reset'))) exit(1) + else: s3 = boto3.resource('s3', region_name=region) s3.meta.client.head_bucket(Bucket=bucket) s3_bucket_exists = True except ClientError as e: - print(" > The bucket "+ bucket + " does not exist or you have no access.") - print(e) - print(" > Building locally but not uploading to S3") + print("%s > The bucket %s does not exist or you have no access.%s" % (fg('red'), bucket, attr('reset'))) + print("%s %s %s" % (fg('red'), e, attr('reset'))) + print("%s> Building locally but not uploading to S3%s" % (fg('yellow'), attr('reset'))) # Detect Client IP - get_client_ip = get("https://ifconfig.co/json") - if get_client_ip.status_code == 200: - client_ip = get_client_ip.json()['ip'] + '/32' - else: + try: + get_client_ip = get("https://ifconfig.co/json",) + if get_client_ip.status_code == 200: + client_ip = get_client_ip.json()['ip'] + '/32' + else: + client_ip = '' + except requests.exceptions.RequestException as e: + print("Unable to determine client IP") client_ip = '' build_path = os.path.dirname(os.path.realpath(__file__)) @@ -106,8 +120,6 @@ def get_input(prompt): print(" > Creating archive for build id: " + unique_id) make_archive('dist/' + output_prefix, 'gztar', build_folder) - - if s3_bucket_exists: print("====== Upload to S3 ======\n") print(" > Uploading required files ... ") @@ -128,7 +140,7 @@ def get_input(prompt): print("3: Launch CloudFormation and use scale-out-computing-on-aws.template as base template") print("4: Enter your cluster information.") - print("\n\nFor more information: https://awslabs.github.io/scale-out-computing-on-aws/install-soca-cluster/") + print("\n\nFor more information: https://awslabs.github.io/scale-out-computing-on-aws/tutorials/install-soca-cluster/") diff --git a/source/requirements.txt b/source/requirements.txt index 0ddfb83f..2b1f917e 100644 --- a/source/requirements.txt +++ b/source/requirements.txt @@ -1,3 +1,3 @@ boto3 colored -requests +requests \ No newline at end of file diff --git a/source/scale-out-computing-on-aws.template b/source/scale-out-computing-on-aws.template index fcd0ca8a..e86f36af 100644 --- a/source/scale-out-computing-on-aws.template +++ b/source/scale-out-computing-on-aws.template @@ -1,5 +1,5 @@ AWSTemplateFormatVersion: 2010-09-09 -Description: (SO0072) - Scale-Out Computing on AWS. Template version 2.6.0 +Description: (SO0072) - Scale-Out Computing on AWS. Template version 2.6.1 Metadata: AWS::CloudFormation::Interface: ParameterGroups: @@ -121,7 +121,7 @@ Mappings: Info: Data: ClusterIdPrefix: soca - Version: 2.6.0 + Version: 2.6.1 User: centos7: centos amazonlinux2: ec2-user @@ -130,78 +130,85 @@ Mappings: RegionMap: - ap-east-1: # Hong Kong + af-south-1: + amazonlinux2: ami-0c6e605ab94c1af57 + centos7: ami-0b761332115c38669 + ap-east-1: + amazonlinux2: ami-d60844a7 + centos7: ami-09611bd6fa5dd0e3d rhel7: ami-1a453e6b - centos7: ami-68e59c19 - amazonlinux2: ami-570c7726 - ap-northeast-1: # Tokyo - rhel7: ami-00b95502a4d51a07e - centos7: ami-045f38c93733dd48d - amazonlinux2: ami-0c3fd0f5d33134a76 - ap-northeast-2: # Seoul - rhel7: ami-041b16ca28f036753 - centos7: ami-06cf2a72dadf92410 - amazonlinux2: ami-095ca789e0549777d - ap-south-1: # Mumbai - rhel7: ami-0963937a03c01ecd4 - centos7: ami-02e60be79e78fef21 - amazonlinux2: ami-0d2692b6acea72ee6 - ap-southeast-1: # Singapore - rhel7: ami-055c55112e25b1f1f - centos7: ami-0b4dd9d65556cac22 - amazonlinux2: ami-01f7527546b557442 - ap-southeast-2: # Sydney - rhel7: ami-036b423b657376f5b - centos7: ami-08bd00d7713a39e7d - amazonlinux2: ami-0dc96254d5535925f - ca-central-1: # Canada - rhel7: ami-06ca3c0058d0275b3 - centos7: ami-033e6106180a626d0 - amazonlinux2: ami-0d4ae09ec9361d8ac - eu-central-1: # Frankfurt - rhel7: ami-09de4a4c670389e4b - centos7: ami-04cf43aca3e6f3de3 - amazonlinux2: ami-0cc293023f983ed53 - eu-north-1: # Stockholm - rhel7: ami-66f67f18 - centos7: ami-5ee66f20 - amazonlinux2: ami-3f36be41 - eu-west-1: # Dublin - rhel7: ami-0202869bdd0fc8c75 - centos7: ami-0ff760d16d9497662 - amazonlinux2: ami-0bbc25e23a7640b9b - eu-west-2: # London - rhel7: ami-0188c0c5eddd2d032 - centos7: ami-0eab3a90fc693af19 - amazonlinux2: ami-0d8e27447ec2c8410 - eu-west-3: # Paris - rhel7: ami-0c4224e392ec4e440 - centos7: ami-0e1ab783dc9489f34 - amazonlinux2: ami-0adcddd3324248c4c - me-south-1: # Bahrain - rhel7: AMI_NOT_ADDED_YET # /todo Update AMI ID when available - centos7: ami-08529c51dbe004acb - amazonlinux2: ami-0624cbc1598d12691 - us-east-1: # Virginia - rhel7: ami-000db10762d0c4c05 - centos7: ami-02eac2c0129f6376b - amazonlinux2: ami-0b898040803850657 - us-east-2: # Ohio - rhel7: ami-094720ddca649952f - centos7: ami-0f2b4fc905b0bd1f1 - amazonlinux2: ami-0d8f6eb4f641ef691 - us-west-1: # Northern California - rhel7: ami-04642fc8fca1e8e67 - centos7: ami-074e2d6769f445be5 - amazonlinux2: ami-056ee704806822732 - us-west-2: # Oregon - rhel7: ami-036affea69a1101c9 - centos7: ami-01ed306a12b7d1c96 - amazonlinux2: ami-082b5a644766e0e6f - sa-east-1: # Sao Paulo - rhel7: ami-05c1c16cac05a7c0b - centos7: ami-0b8d86d4bf91850af - amazonlinux2: ami-058943e7d9b9cabfb + ap-northeast-1: + amazonlinux2: ami-01748a72bed07727c + centos7: ami-0ddea5e0f69c193a4 + rhel7: ami-0e3e6ca71a19ccf06 + ap-northeast-2: + amazonlinux2: ami-0094965d55b3bb1ff + centos7: ami-0e4214f08b51e23cc + rhel7: ami-0f84aff229263c1fc + ap-south-1: + amazonlinux2: ami-04b1ddd35fd71475a + centos7: ami-0ffc7af9c06de0077 + rhel7: ami-0b105c57e305d9064 + ap-southeast-1: + amazonlinux2: ami-00b8d9cb8a7161e41 + centos7: ami-0adfdaea54d40922b + rhel7: ami-031290b4bd9eaa715 + ap-southeast-2: + amazonlinux2: ami-06ce513624b435a22 + centos7: ami-03d56f451ca110e99 + rhel7: ami-06d2821bfc76dcda3 + ca-central-1: + amazonlinux2: ami-0c3e7f50c89a372ae + centos7: ami-0a7c5b189b6460115 + rhel7: ami-0a43efe505004e592 + eu-central-1: + amazonlinux2: ami-03c3a7e4263fd998c + centos7: ami-08b6d44b4f6f7b279 + rhel7: ami-0fc86555914f6a9f2 + eu-north-1: + amazonlinux2: ami-02cb52d7ba9887a93 + centos7: ami-0358414bac2039369 + rhel7: ami-8833bbf6 + eu-south-1: + amazonlinux2: ami-080807541452b0410 + centos7: ami-0fe3899b62205176a + rhel7: ami-004f2ac6013e4fcfb + eu-west-1: + amazonlinux2: ami-01720b5f421cf0179 + centos7: ami-04f5641b0d178a27a + rhel7: ami-04c89a19fea29f1f0 + eu-west-2: + amazonlinux2: ami-0e80a462ede03e653 + centos7: ami-0b22fcaf3564fb0c9 + rhel7: ami-06fe0c124aedcef5f + eu-west-3: + amazonlinux2: ami-00798d7180f25aac2 + centos7: ami-072ec828dae86abe5 + rhel7: ami-08295de7534115935 + me-south-1: + amazonlinux2: ami-0032aa87bb75498ea + centos7: ami-0ac17dcdd6f6f4eb6 + rhel7: ami-0e845cba4071a4a1a + sa-east-1: + amazonlinux2: ami-022082b7f1da62478 + centos7: ami-02334c45dd95ca1fc + rhel7: ami-06efd558d6a5fb959 + us-east-1: + amazonlinux2: ami-0be2609ba883822ec + centos7: ami-00e87074e52e6c9f9 + rhel7: ami-08a7d2bfef687328f + us-east-2: + amazonlinux2: ami-0a0ad6b70e61be944 + centos7: ami-00f8e2c955f7ffa9b + rhel7: ami-0e166e72fda655c63 + us-west-1: + amazonlinux2: ami-03130878b60947df3 + centos7: ami-08d2d8b00f270d03b + rhel7: ami-056efb42b219f9abb + us-west-2: + amazonlinux2: ami-0a36eb8fadc976275 + centos7: ami-0686851c4e7b1a8e1 + rhel7: ami-02deb4589e0f0d95e Conditions: UseCustomAMI: !Not [!Equals [!Ref CustomAMI, ""]] @@ -239,6 +246,14 @@ Resources: - logs:PutLogEvents Resource: - !Join [ "", [ "arn:", !Ref "AWS::Partition", ":logs:", !Ref "AWS::Region", ":", !Ref "AWS::AccountId", ":log-group:/aws/lambda/", !Join [ "-", [ !FindInMap [ Info, Data, ClusterIdPrefix ], !Ref "AWS::StackName"] ], "*"] ] + - Effect: Allow + Action: + - iam:ListRoles + Resource: "*" + Condition: + "StringEqualsIfExists": + "aws:PrincipalAccount": !Sub "${AWS::AccountId}" + CheckPreRequisiteLambda: Type: AWS::Lambda::Function @@ -257,6 +272,7 @@ Resources: ZipFile: !Sub | import cfnresponse import re + import boto3 ''' Check SOCA Pre-Requisite ''' @@ -296,10 +312,18 @@ Resources: PublicSubnetMaskBits = 32 - int(VPCCidrPrefixBits) - int(PublicSubnetMaskPrefixBits) if PublicSubnetMaskBits > 6: PublicSubnetMaskBits = 6 PrivateSubnetMaskBits = 32 - int(VPCCidrPrefixBits) - int(PrivateSubnetMaskPrefixBits) + + iam_client = boto3.client('iam') + es_roles = iam_client.list_roles(PathPrefix='/aws-service-role/es.amazonaws.com') + if len(es_roles['Roles']) == 0: + CreateESServiceRole = "True" + else: + CreateESServiceRole = "False" responseData = {'ClusterId': clusterId.lower(), 'PublicSubnetMaskBits': PublicSubnetMaskBits, - 'PrivateSubnetMaskBits': PrivateSubnetMaskBits} + 'PrivateSubnetMaskBits': PrivateSubnetMaskBits, + 'CreateESServiceRole': CreateESServiceRole} cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, 'Pre-Requisites OK') @@ -340,6 +364,7 @@ Resources: S3InstallFolder: !Ref S3InstallFolder SchedulerPublicIP: !GetAtt Network.Outputs.SchedulerPublicIP EIPNat: !GetAtt Network.Outputs.EIPNat + CreateESServiceRole: !GetAtt CheckSOCAPreRequisite.CreateESServiceRole TemplateURL: !Join [ "/", [!Sub "https://s3.${AWS::URLSuffix}", !Ref S3InstallBucket, !Ref S3InstallFolder, "templates/Security.template"] ] TimeoutInMinutes: 30 @@ -380,27 +405,27 @@ Resources: UserName: !Ref UserName UserPassword: !Ref UserPassword SchedulerPublicIP: !GetAtt Network.Outputs.SchedulerPublicIP - SchedulerPublicIPAllocation: !GetAtt Network.Outputs.SchedulerPublicIPAllocation TemplateURL: !Join [ "/", [!Sub "https://s3.${AWS::URLSuffix}", !Ref S3InstallBucket, !Ref S3InstallFolder, "templates/Scheduler.template"] ] TimeoutInMinutes: 60 Analytics: - DependsOn: Scheduler + DependsOn: Security Type: AWS::CloudFormation::Stack Properties: Parameters: - SchedulerSecurityGroup: !GetAtt Security.Outputs.SchedulerSecurityGroup - PublicSubnet1: !GetAtt Network.Outputs.PublicSubnet1 + ComputeNodeSecurityGroup: !GetAtt Security.Outputs.ComputeNodeSecurityGroup + VpcId: !GetAtt Network.Outputs.VpcId + PrivateSubnet1: !GetAtt Network.Outputs.PrivateSubnet1 + PrivateSubnet2: !GetAtt Network.Outputs.PrivateSubnet2 ClusterId: !GetAtt CheckSOCAPreRequisite.ClusterId - ClientIp: !Ref ClientIp - SchedulerPublicIP: !GetAtt Network.Outputs.SchedulerPublicIP - EIPNat: !GetAtt Network.Outputs.EIPNat TemplateURL: !Join [ "/", [!Sub "https://s3.${AWS::URLSuffix}", !Ref S3InstallBucket, !Ref S3InstallFolder, "templates/Analytics.template"] ] TimeoutInMinutes: 30 Viewer: - DependsOn: Analytics + DependsOn: + - Scheduler + - Analytics Type: AWS::CloudFormation::Stack Properties: Parameters: @@ -413,6 +438,7 @@ Resources: SchedulerInstanceId: !GetAtt Scheduler.Outputs.SchedulerInstanceId SchedulerIAMRole: !GetAtt Security.Outputs.SchedulerIAMRole LambdaACMIAMRoleArn: !GetAtt Security.Outputs.LambdaACMIAMRoleArn + ESDomainIPAddresses: !GetAtt Analytics.Outputs.ESDomainIPAddresses TemplateURL: !Join [ "/", [!Sub "https://s3.${AWS::URLSuffix}", !Ref S3InstallBucket, !Ref S3InstallFolder, "templates/Viewer.template"] ] TimeoutInMinutes: 30 @@ -466,7 +492,7 @@ Outputs: LDAPMasterPassword: Value: /root/OpenLdapAdminPassword.txt AnalyticsDashboard: - Value: !Join [ "", [ "https://", !GetAtt Analytics.Outputs.ESDomainEndpoint, "/_plugin/kibana/"]] + Value: !Join [ "", [ "https://", !GetAtt Viewer.Outputs.LoadBalancerDNSName, "/_plugin/kibana/"]] ConnectionString: Value: !Join [ "", [ "ssh -i ", !Ref SSHKeyPair, ".pem ", !FindInMap [ Info, User, !Ref BaseOS ], "@", !GetAtt Network.Outputs.SchedulerPublicIP]] WebUserInterface: diff --git a/source/scripts/config.cfg b/source/scripts/config.cfg index 3ae357c8..f26aca57 100644 --- a/source/scripts/config.cfg +++ b/source/scripts/config.cfg @@ -1,8 +1,8 @@ # Python -PYTHON_VERSION="3.7.1" -PYTHON_TGZ="Python-3.7.1.tgz" -PYTHON_URL="https://www.python.org/ftp/python/3.7.1/Python-3.7.1.tgz" -PYTHON_HASH="99f78ecbfc766ea449c4d9e7eda19e83" +PYTHON_VERSION="3.7.9" +PYTHON_TGZ="Python-3.7.9.tgz" +PYTHON_URL="https://www.python.org/ftp/python/3.7.9/Python-3.7.9.tgz" +PYTHON_HASH="bcd9f22cf531efc6f06ca6b9b2919bd4" # Scheduler OPENPBS_VERSION="20.0.1" @@ -17,22 +17,30 @@ OPENMPI_URL="https://download.open-mpi.org/release/open-mpi/v4.0/openmpi-4.0.1.t OPENMPI_HASH="c72d9eb908a0f60e3155698a646cde38" # DCV -DCV_VERSION="2020.1-9012-el7-x86_64" -DCV_TGZ="nice-dcv-2020.1-9012-el7-x86_64.tgz" -DCV_URL="https://d1uj6qtbmh3dt5.cloudfront.net/2020.1/Servers/nice-dcv-2020.1-9012-el7-x86_64.tgz" -DCV_HASH="bbb715b47c0e47711deef1870c70120e" +DCV_X86_64_VERSION="2020.1-9012-el7-x86_64" +DCV_X86_64_TGZ="nice-dcv-2020.1-9012-el7-x86_64.tgz" +DCV_X86_64_URL="https://d1uj6qtbmh3dt5.cloudfront.net/2020.1/Servers/nice-dcv-2020.1-9012-el7-x86_64.tgz" +DCV_X86_64_HASH="bbb715b47c0e47711deef1870c70120e" +DCV_AARCH64_VERSION="2020.1-9012-el7-aarch64" +DCV_AARCH64_TGZ="nice-dcv-2020.1-9012-el7-aarch64.tgz" +DCV_AARCH64_URL="https://d1uj6qtbmh3dt5.cloudfront.net/2020.1/Servers/nice-dcv-2020.1-9012-el7-aarch64.tgz" +DCV_AARCH64_HASH="c16f4f1ea253170a980ec31c69e13b07" # EFA -EFA_VERSION="1.9.3" -EFA_TGZ="aws-efa-installer-1.9.3.tar.gz" -EFA_URL="https://efa-installer.amazonaws.com/aws-efa-installer-1.9.3.tar.gz" -EFA_HASH="95755765a097802d3e6d5018d1a5d3d6" +EFA_VERSION="1.11.1" +EFA_TGZ="aws-efa-installer-1.11.1.tar.gz" +EFA_URL="https://efa-installer.amazonaws.com/aws-efa-installer-1.11.1.tar.gz" +EFA_HASH="026b0d9a0a48780cc7406bd51997b1c0" # Metric Beat METRICBEAST_RPM="metricbeat-oss-7.6.2-x86_64.rpm" METRICBEAT_URL="https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-oss-7.6.2-x86_64.rpm" METRICBEAT_HASH="631a7e53a47c53b092f64db9cd8a96a8" +# SSM +SSM_X86_64_URL="https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm" +SSM_AARCH64_URL="https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_arm64/amazon-ssm-agent.rpm" + # Default LDAP base LDAP_BASE="DC=soca,DC=local" diff --git a/source/scripts/requirements.txt b/source/scripts/requirements.txt index d6905778..50d5cc8e 100644 --- a/source/scripts/requirements.txt +++ b/source/scripts/requirements.txt @@ -1,16 +1,16 @@ -awscli==1.18.154 +awscli==1.19.38 apscheduler==3.6.3 tzlocal==2.1 asn1crypto==1.3.0 -boto3==1.15.13 -botocore==1.18.13 +boto3==1.17.38 +botocore==1.20.38 cachetools==4.1.0 cffi==1.14.0 cfn-flip==1.2.2 Click==7.0 colorama==0.3.9 -cryptography==3.2 -docutils==0.16 +cryptography==3.3.2 +docutils==0.15.2 ecdsa==0.15 elasticsearch==6.3.1 Flask==1.0.3 @@ -23,7 +23,7 @@ flask-swagger==0.2.14 Flask-WTF==0.14.3 gunicorn==19.9.0 itsdangerous==1.1.0 -Jinja2==2.11.1 +Jinja2==2.11.3 jmespath==0.9.4 MarkupSafe==1.1.1 prettytable==0.7.2 @@ -36,10 +36,10 @@ python-jose==3.1.0 python-ldap==3.2.0 python-pam==1.8.4 pytz==2019.1 -PyYAML==5.2 +PyYAML==5.4 requests==2.23.0 requests-aws4auth==0.9 -rsa==3.4.2 +rsa==4.1 s3transfer==0.3.3 six==1.14.0 SQLAlchemy==1.3.15 diff --git a/source/soca/cluster_analytics/job_tracking.py b/source/soca/cluster_analytics/job_tracking.py index 3759e112..aece7372 100755 --- a/source/soca/cluster_analytics/job_tracking.py +++ b/source/soca/cluster_analytics/job_tracking.py @@ -261,7 +261,7 @@ def read_file(filename): # Update EBS rate for your region # EBS Formulas: https://aws.amazon.com/ebs/pricing/ - ebs_gp2_storage = 0.1 # $ per gb per month + ebs_gp3_storage = 0.08 # $ per gb per month ebs_io1_storage = 0.125 # $ per gb per month provisionied_io = 0.065 # IOPS per month fsx_lustre = 0.000194 # GB per hour @@ -275,14 +275,14 @@ def read_file(filename): tmp['estimated_price_fsx_lustre'] = 0 if 'root_size' in tmp.keys(): - tmp['estimated_price_storage_root_size'] = ((int(tmp['root_size']) * ebs_gp2_storage * simulation_time_seconds_with_penalty) / (86400 * 30)) * tmp['nodect'] + tmp['estimated_price_storage_root_size'] = ((int(tmp['root_size']) * ebs_gp3_storage * simulation_time_seconds_with_penalty) / (86400 * 30)) * tmp['nodect'] if 'scratch_size' in tmp.keys(): if 'scratch_iops' in tmp.keys(): tmp['estimated_price_storage_scratch_size'] = ((int(tmp['scratch_size']) * ebs_io1_storage * simulation_time_seconds_with_penalty) / (86400 * 30)) * tmp['nodect'] tmp['estimated_price_storage_scratch_iops'] = ((int(tmp['scratch_iops']) * provisionied_io * simulation_time_seconds_with_penalty) / (86400 * 30)) * tmp['nodect'] else: - tmp['estimated_price_storage_scratch_size'] = ((int(tmp['scratch_size']) * ebs_gp2_storage * simulation_time_seconds_with_penalty) / (86400 * 30)) * tmp['nodect'] + tmp['estimated_price_storage_scratch_size'] = ((int(tmp['scratch_size']) * ebs_gp3_storage * simulation_time_seconds_with_penalty) / (86400 * 30)) * tmp['nodect'] if 'fsx_lustre_bucket' in tmp.keys(): if tmp['fsx_lustre_bucket'] != 'false': diff --git a/source/soca/cluster_manager/add_nodes.py b/source/soca/cluster_manager/add_nodes.py index f80ecb59..3a821a55 100644 --- a/source/soca/cluster_manager/add_nodes.py +++ b/source/soca/cluster_manager/add_nodes.py @@ -6,6 +6,7 @@ import sys import uuid import boto3 +from math import ceil from botocore.exceptions import ClientError sys.path.append(os.path.dirname(__file__)) @@ -19,6 +20,32 @@ servicequotas = boto3.client("service-quotas") aligo_configuration = configuration.get_aligo_configuration() +def find_running_cpus_per_instance(instance_list): + running_vcpus = 0 + token = True + next_token = '' + while token is True: + response = ec2.describe_instances( + Filters=[ + {'Name': 'instance-type', 'Values': instance_list}, + {'Name': 'instance-state-name', 'Values': ['running', 'pending']}], + MaxResults=1000, + NextToken=next_token, + ) + try: + next_token = response['NextToken'] + except KeyError: + token = False + for reservation in response['Reservations']: + for instance in reservation['Instances']: + if "CpuOptions" in instance.keys(): + running_vcpus += instance["CpuOptions"]["CoreCount"] * 2 + else: + if 'xlarge' in instance["InstanceType"]: + running_vcpus += 4 + else: + running_vcpus += 2 + return running_vcpus def verify_ri_saving_availabilities(instance_type, instance_type_info): if instance_type not in instance_type_info.keys(): @@ -60,8 +87,8 @@ def verify_ri_saving_availabilities(instance_type, instance_type_info): for reservation in get_ri_count["ReservedInstances"]: instance_type_info[instance_type]["current_ri_purchased"] += reservation["InstanceCount"] - print("Detected {} running {} instance ".format(instance_type_info[instance_type]["current_instance_in_use"],instance_type)) - print("Detected {} RI for {} instance ".format(instance_type_info[instance_type]["current_ri_purchased"], instance_type)) + #print("Detected {} running {} instance ".format(instance_type_info[instance_type]["current_instance_in_use"],instance_type)) + #print("Detected {} RI for {} instance ".format(instance_type_info[instance_type]["current_ri_purchased"], instance_type)) return instance_type_info def verify_vcpus_limit(instance_type, desired_capacity, quota_info): @@ -123,58 +150,13 @@ def verify_vcpus_limit(instance_type, desired_capacity, quota_info): if not quota_info or instance_type not in quota_info.keys(): all_instances_available = ec2._service_model.shape_for('InstanceType').enum all_instances_for_quota = [instance_family for x in instances_family_allowed_in_quota for instance_family in all_instances_available if instance_family.startswith(x.rstrip().lstrip())] - # get all running instance - token = True - next_token = '' - while token is True: - response = ec2.describe_instances( - Filters=[ - # Describe instance as a limit of 200 filters - {'Name': 'instance-type', 'Values': all_instances_for_quota[0:150]}, - {'Name': 'instance-state-name', 'Values': ['running', 'pending']}], - MaxResults=1000, - NextToken=next_token, - ) - try: - next_token = response['NextToken'] - except KeyError: - token = False - for reservation in response['Reservations']: - for instance in reservation['Instances']: - if "CpuOptions" in instance.keys(): - running_vcpus += instance["CpuOptions"]["CoreCount"] * 2 - else: - if 'xlarge' in instance["InstanceType"]: - running_vcpus += 4 - else: - running_vcpus += 2 - - # Describe instance as a limit of 200 filters - if len(all_instances_for_quota) > 150: - token = True - next_token = '' - while token is True: - response = ec2.describe_instances( - Filters=[ - {'Name': 'instance-type', 'Values': all_instances_for_quota[150:]}, - {'Name': 'instance-state-name', 'Values': ['running', 'pending']}], - MaxResults=1000, - NextToken=next_token, - ) - try: - next_token = response['NextToken'] - except KeyError: - token = False - - for reservation in response['Reservations']: - for instance in reservation['Instances']: - if "CpuOptions" in instance.keys(): - running_vcpus += instance["CpuOptions"]["CoreCount"] * 2 - else: - if 'xlarge' in instance["InstanceType"]: - running_vcpus += 4 - else: - running_vcpus += 2 + required_api_calls = ceil(len(all_instances_for_quota) / 190) + for i in range(0, required_api_calls): + # DescribeInstances has a limit of 200 attributes per filter + instances_to_check = all_instances_for_quota[i * 190:(i + 1) * 190] + if instances_to_check: + running_vcpus += find_running_cpus_per_instance(instances_to_check) + else: running_vcpus = quota_info[instance_type]["vcpus_provisioned"] @@ -414,15 +396,21 @@ def check_config(**kwargs): # if placement group is True and more than 1 subnet is defined, force default to 1 subnet kwargs['subnet_id'] = [kwargs['subnet_id'][0]] - cpus_count_pattern = re.search(r'[.](\d+)', kwargs['instance_type'][0]) - if cpus_count_pattern: - kwargs['core_count'] = int(cpus_count_pattern.group(1)) * 2 - else: - if 'xlarge' in kwargs['instance_type'][0]: - kwargs['core_count'] = 2 + # Check core_count and ht_support + try: + instance_attributes = ec2.describe_instance_types(InstanceTypes=[kwargs['instance_type'][0]]) + if len(instance_attributes['InstanceTypes']) == 0: + error = return_message('Unable to check instance: ' + kwargs['instance_type'][0]) + else: + kwargs['core_count'] = instance_attributes['InstanceTypes'][0]['VCpuInfo']['DefaultCores'] + if instance_attributes['InstanceTypes'][0]['VCpuInfo']['DefaultThreadsPerCore'] == 1: + # Set ht_support to False for instances with DefaultThreadsPerCore = 1 (e.g. graviton) + kwargs['ht_support'] = False + except ClientError as e: + if e.response['Error'].get('Code') == 'InvalidInstanceType': + error = return_message('InvalidInstanceType: ' + kwargs['instance_type'][0]) else: - kwargs['core_count'] = 1 - + error = return_message('Unable to check instance: ' + kwargs['instance_type'][0]) # Validate Spot Allocation Strategy mapping = { @@ -492,21 +480,27 @@ def check_config(**kwargs): error = return_message('spot_price must be either "auto" or a float value"') # Validate EFA - if kwargs['efa_support'] not in [True, False]: - kwargs['efa_support'] = False - else: - if kwargs['efa_support'] is True: - for instance_type in kwargs['instance_type']: - check_efa_support = ec2.describe_instance_types( - InstanceTypes=[instance_type], - Filters=[ - {"Name": "network-info.efa-supported", - "Values": ["true"]} - ] - ) - - if len(check_efa_support["InstanceTypes"]) == 0: - error = return_message('You have requested EFA support but your instance (' + instance_type + ') does not support EFA') + try: + if kwargs['efa_support'] not in [True, False]: + kwargs['efa_support'] = False + else: + if kwargs['efa_support'] is True: + for instance_type in kwargs['instance_type']: + check_efa_support = ec2.describe_instance_types( + InstanceTypes=[instance_type], + Filters=[ + {"Name": "network-info.efa-supported", + "Values": ["true"]} + ] + ) + + if len(check_efa_support["InstanceTypes"]) == 0: + error = return_message('You have requested EFA support but your instance (' + instance_type + ') does not support EFA') + except ClientError as e: + if e.response['Error'].get('Code') == 'InvalidInstanceType': + error = return_message('InvalidInstanceType: ' + kwargs['instance_type']) + else: + error = return_message('Unable to check EFA support for instance: ' + kwargs['instance_type']) # Validate Keep EBS if kwargs['keep_ebs'] not in [True, False]: diff --git a/source/soca/cluster_manager/cloudformation_builder.py b/source/soca/cluster_manager/cloudformation_builder.py index 86623679..f986d7d5 100644 --- a/source/soca/cluster_manager/cloudformation_builder.py +++ b/source/soca/cluster_manager/cloudformation_builder.py @@ -52,7 +52,7 @@ def main(**params): # Metadata t = Template() t.set_version("2010-09-09") - t.set_description("(SOCA) - Base template to deploy compute nodes. Version 2.6.0") + t.set_description("(SOCA) - Base template to deploy compute nodes. Version 2.6.1") allow_anonymous_data_collection = params["MetricCollectionAnonymous"] debug = False mip_usage = False @@ -208,7 +208,7 @@ def main(**params): DeviceName="/dev/xvda" if params["BaseOS"] == "amazonlinux2" else "/dev/sda1", Ebs=EBSBlockDevice( VolumeSize=params["RootSize"], - VolumeType="gp2", + VolumeType="gp3", DeleteOnTermination="false" if params["KeepEbs"] is True else "true", Encrypted=True)) ] @@ -218,7 +218,7 @@ def main(**params): DeviceName="/dev/xvdbx", Ebs=EBSBlockDevice( VolumeSize=params["ScratchSize"], - VolumeType="io1" if int(params["VolumeTypeIops"]) > 0 else "gp2", + VolumeType="io1" if int(params["VolumeTypeIops"]) > 0 else "gp3", Iops=params["VolumeTypeIops"] if int(params["VolumeTypeIops"]) > 0 else Ref("AWS::NoValue"), DeleteOnTermination="false" if params["KeepEbs"] is True else "true", Encrypted=True)) diff --git a/source/soca/cluster_manager/dcv_alb_manager.py b/source/soca/cluster_manager/dcv_alb_manager.py index 7d0e72be..96c2110a 100644 --- a/source/soca/cluster_manager/dcv_alb_manager.py +++ b/source/soca/cluster_manager/dcv_alb_manager.py @@ -137,7 +137,7 @@ def get_current_listener_rules(listener_arn): rules = {} priority_taken = [] for rule in elbv2_client.describe_rules(ListenerArn=listener_arn)['Rules']: - if rule['Priority'] != 'default': + if rule['Priority'] != 'default' and rule['Priority'] != '1': priority_taken.append(int(rule['Priority'])) for condition in rule['Conditions']: condition_list = [] @@ -239,4 +239,4 @@ def return_alb_listener(alb_arn): # handle case where TG is already deleted print(err) pass - print('Cleaning complete') \ No newline at end of file + print('Cleaning complete') diff --git a/source/soca/cluster_node_bootstrap/ComputeNode.sh b/source/soca/cluster_node_bootstrap/ComputeNode.sh index 2c09d5c2..22db5276 100644 --- a/source/soca/cluster_node_bootstrap/ComputeNode.sh +++ b/source/soca/cluster_node_bootstrap/ComputeNode.sh @@ -12,7 +12,12 @@ fi service pbs stop # Install SSM -yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm +machine=$(uname -m) +if [[ $machine == "x86_64" ]]; then + yum install -y $SSM_X86_64_URL +elif [[ $machine == "aarch64" ]]; then + yum install -y $SSM_AARCH64_URL +fi systemctl enable amazon-ssm-agent systemctl restart amazon-ssm-agent @@ -86,7 +91,7 @@ else # If only 1 instance store, mfks as ext4 echo "Detected 1 NVMe device available, formatting as ext4 .." mkfs -t ext4 $VOLUME_LIST - echo "$VOLUME_LIST /scratch ext4 defaults 0 0" >> /etc/fstab + echo "$VOLUME_LIST /scratch ext4 defaults,nofail 0 0" >> /etc/fstab elif [[ $VOLUME_COUNT -gt 1 ]]; then # if more than 1 instance store disks, raid them ! @@ -96,7 +101,7 @@ else echo yes | mdadm --create -f --verbose --level=0 --raid-devices=$VOLUME_COUNT /dev/$DEVICE_NAME ${VOLUME_LIST[@]} mkfs -t ext4 /dev/$DEVICE_NAME mdadm --detail --scan | tee -a /etc/mdadm.conf - echo "/dev/$DEVICE_NAME /scratch ext4 defaults 0 0" >> /etc/fstab + echo "/dev/$DEVICE_NAME /scratch ext4 defaults,nofail 0 0" >> /etc/fstab else echo "All volumes detected already have a partition or mount point and can't be used as scratch devices" fi diff --git a/source/soca/cluster_node_bootstrap/ComputeNodeInstallDCV.sh b/source/soca/cluster_node_bootstrap/ComputeNodeInstallDCV.sh index 14316275..fd974ad8 100644 --- a/source/soca/cluster_node_bootstrap/ComputeNodeInstallDCV.sh +++ b/source/soca/cluster_node_bootstrap/ComputeNodeInstallDCV.sh @@ -40,18 +40,28 @@ then fi # Download and Install DCV +echo "Install DCV" cd ~ -wget $DCV_URL -if [[ $(md5sum $DCV_TGZ | awk '{print $1}') != $DCV_HASH ]]; then - echo -e "FATAL ERROR: Checksum for DCV failed. File may be compromised." > /etc/motd - exit 1 +machine=$(uname -m) +if [[ $machine == "x86_64" ]]; then + wget $DCV_X86_64_URL + if [[ $(md5sum $DCV_X86_64_TGZ | awk '{print $1}') != $DCV_X86_64_HASH ]]; then + echo -e "FATAL ERROR: Checksum for DCV failed. File may be compromised." > /etc/motd + exit 1 + fi + tar zxvf $DCV_X86_64_TGZ + cd nice-dcv-$DCV_X86_64_VERSION +elif [[ $machine == "aarch64" ]]; then + wget $DCV_AARCH64_URL + if [[ $(md5sum $DCV_AARCH64_TGZ | awk '{print $1}') != $DCV_AARCH64_HASH ]]; then + echo -e "FATAL ERROR: Checksum for DCV failed. File may be compromised." > /etc/motd + exit 1 + fi + tar zxvf $DCV_AARCH64_TGZ + cd nice-dcv-$DCV_AARCH64_VERSION fi - -# Install DCV server and Xdcv -tar zxvf $DCV_TGZ -cd nice-dcv-$DCV_VERSION -rpm -ivh nice-xdcv-*.rpm --nodeps -rpm -ivh nice-dcv-server*.rpm --nodeps +rpm -ivh nice-xdcv-*.${machine}.rpm --nodeps +rpm -ivh nice-dcv-server*.${machine}.rpm --nodeps # Enable DCV support for USB remotization yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-6.noarch.rpm diff --git a/source/soca/cluster_node_bootstrap/windows/ComputeNodeInstallDCVWindows.ps b/source/soca/cluster_node_bootstrap/windows/ComputeNodeInstallDCVWindows.ps index 57d12a22..6c9b17e5 100644 Binary files a/source/soca/cluster_node_bootstrap/windows/ComputeNodeInstallDCVWindows.ps and b/source/soca/cluster_node_bootstrap/windows/ComputeNodeInstallDCVWindows.ps differ diff --git a/source/soca/cluster_web_ui/api/v1/scheduler/pbspro/job.py b/source/soca/cluster_web_ui/api/v1/scheduler/pbspro/job.py index ae2e1243..b33a3bce 100644 --- a/source/soca/cluster_web_ui/api/v1/scheduler/pbspro/job.py +++ b/source/soca/cluster_web_ui/api/v1/scheduler/pbspro/job.py @@ -64,7 +64,7 @@ def get(self): return {"success": True, "message": job_info["Jobs"][job_key]}, 200 except Exception as err: - return {"succes": False, "message": "Unable to retrieve Job ID (job may have terminated and is no longer in the queue)"}, 210 + return {"success": False, "message": "Unable to retrieve Job ID (job may have terminated and is no longer in the queue)"}, 210 except Exception as err: return {"success": False, "message": "Unknown error: " + str(err)}, 500 @@ -165,7 +165,7 @@ def post(self): return {"success": True, "message": "Your Linux command has been executed successfully. Output (if any) can be accessed on "+job_output_path+""}, 200 except subprocess.CalledProcessError as e: - return {"succes": False, + return {"success": False, "message": { "error": "Unable to submit the job. Please verify your script file (eg: malformed inputs, syntax error, extra space in the PBS variables ...) or refer to the 'stderr' message.", "stderr": '{}'.format(e.stderr.decode(sys.getfilesystemencoding())), @@ -174,7 +174,7 @@ def post(self): }, 500 except Exception as err: - return {"succes": False, "message": {"error": "Unable to run Qsub command.", + return {"success": False, "message": {"error": "Unable to run Qsub command.", "trace": str(err), "job_script": str(payload)}}, 500 @@ -233,7 +233,7 @@ def delete(self): delete_job = subprocess.check_output(shlex.split(qdel_command)) return {"success": True, "message": "Job deleted"} except Exception as err: - return {"succes": False, "message": "Unable to execute qsub command: " + str(err)}, 500 + return {"success": False, "message": "Unable to execute qdel command: " + str(err)}, 500 except Exception as err: return {"success": False, "message": "Unknown error: " + str(err)}, 500 diff --git a/source/soca/cluster_web_ui/api/v1/scheduler/pbspro/jobs.py b/source/soca/cluster_web_ui/api/v1/scheduler/pbspro/jobs.py index 04f07fa6..41cfe080 100644 --- a/source/soca/cluster_web_ui/api/v1/scheduler/pbspro/jobs.py +++ b/source/soca/cluster_web_ui/api/v1/scheduler/pbspro/jobs.py @@ -52,7 +52,7 @@ def get(self): return {"success": True, "message": job_for_user["Jobs"]}, 200 except Exception as err: - return {"succes": False, "message": "Unable to retrieve Job ID (job may have terminated and is no longer in the queue)"}, 500 + return {"success": False, "message": "Unable to retrieve Job ID (job may have terminated and is no longer in the queue)"}, 500 except Exception as err: return {"success": False, "message": "Unknown error: " + str(err)}, 500 diff --git a/source/soca/cluster_web_ui/app.py b/source/soca/cluster_web_ui/app.py index 0db5ee6f..94a48107 100644 --- a/source/soca/cluster_web_ui/app.py +++ b/source/soca/cluster_web_ui/app.py @@ -37,6 +37,7 @@ from views.my_files import my_files from views.submit_job import submit_job from scheduled_tasks.clean_tmp_folders import clean_tmp_folders +from scheduled_tasks.validate_db_permissions import validate_db_permissions from scheduled_tasks.manage_dcv_instances_lifecycle import auto_terminate_stopped_instance, schedule_auto_start, schedule_auto_stop from flask_wtf.csrf import CSRFProtect from config import app_config @@ -47,6 +48,8 @@ from flask_apscheduler import APScheduler #from apscheduler.schedulers.background import BackgroundScheduler from models import db +import os +import stat app = Flask(__name__) @@ -139,6 +142,12 @@ def page_not_found(e): class Config(object): JOBS = [ + { + 'id': 'validate_db_permissions', + 'func': validate_db_permissions, + 'trigger': 'interval', + 'minutes': 60 + }, { 'id': 'auto_terminate_stopped_instance', 'func': auto_terminate_stopped_instance, @@ -225,6 +234,8 @@ class Config(object): db.app = app db.init_app(app) db.create_all() + basedir = os.path.abspath(os.path.dirname(__file__)) + os.chmod(os.path.join(basedir, "db.sqlite"), stat.S_IWUSR + stat.S_IRUSR) app_session = Session(app) app_session.app.session_interface.db.create_all() app.config.from_object(Config()) diff --git a/source/soca/cluster_web_ui/dcv_cloudformation_builder.py b/source/soca/cluster_web_ui/dcv_cloudformation_builder.py index c3b10eec..19ef8d5a 100644 --- a/source/soca/cluster_web_ui/dcv_cloudformation_builder.py +++ b/source/soca/cluster_web_ui/dcv_cloudformation_builder.py @@ -38,7 +38,7 @@ def main(**launch_parameters): 'Ebs': { 'DeleteOnTermination': True, 'VolumeSize': 30 if launch_parameters["disk_size"] is False else int(launch_parameters["disk_size"]), - 'VolumeType': 'gp2', + 'VolumeType': 'gp3', 'Encrypted': True} }] instance.ImageId = launch_parameters["image_id"] diff --git a/source/soca/cluster_web_ui/decorators.py b/source/soca/cluster_web_ui/decorators.py index 7703b9be..aa2c7f76 100644 --- a/source/soca/cluster_web_ui/decorators.py +++ b/source/soca/cluster_web_ui/decorators.py @@ -131,4 +131,16 @@ def check_admin(): else: return redirect('/login') - return check_admin \ No newline at end of file + return check_admin + + +def disabled(f): + @wraps(f) + def disable_feature(*args, **kwargs): + if "api" in request.path: + return {"success": False, "message": "This API has been disabled by your Administrator"}, 401 + else: + flash("Sorry this feature has been disabled by your Administrator.", "error") + return redirect("/") + + return disable_feature \ No newline at end of file diff --git a/source/soca/cluster_web_ui/models.py b/source/soca/cluster_web_ui/models.py index 4837878e..4ef4baff 100644 --- a/source/soca/cluster_web_ui/models.py +++ b/source/soca/cluster_web_ui/models.py @@ -110,8 +110,8 @@ def as_dict(self): class AmiList(db.Model): id = db.Column(db.Integer, primary_key=True, autoincrement=True) - ami_id = db.Column(db.String(17), nullable=False) - ami_type = db.Column(db.String(7), nullable=False) + ami_id = db.Column(db.String(255), nullable=False) + ami_type = db.Column(db.String(255), nullable=False) ami_label = db.Column(db.Text, nullable=False) created_on = db.Column(db.DateTime) is_active = db.Column(db.Boolean, nullable=False) diff --git a/source/soca/cluster_web_ui/scheduled_tasks/validate_db_permissions.py b/source/soca/cluster_web_ui/scheduled_tasks/validate_db_permissions.py new file mode 100644 index 00000000..68153c58 --- /dev/null +++ b/source/soca/cluster_web_ui/scheduled_tasks/validate_db_permissions.py @@ -0,0 +1,16 @@ +import os +import stat +import logging +logger = logging.getLogger("scheduled_tasks") + + +def validate_db_permissions(): + # Ensure db.sqlite permissions are always 600 + logger.info(f"validate_db_permissions") + db_sqlite = os.path.abspath(os.path.dirname(__file__) + "/../db.sqlite") + check_stat = os.stat(db_sqlite) + oct_perm = oct(check_stat.st_mode) + logger.info(f"validate_db_permissions: Detected permission {oct_perm} for {db_sqlite} with last 3 digits {oct_perm[-3:]}") + if oct_perm[-3:] != '600': + logger.info("validate_db_permissions: Updated permission back to 600") + os.chmod(db_sqlite, stat.S_IWUSR + stat.S_IRUSR) \ No newline at end of file diff --git a/source/soca/cluster_web_ui/static/css/jquery-ui-lightness.-precss b/source/soca/cluster_web_ui/static/css/jquery-ui-lightness.-precss deleted file mode 100644 index ca6f5c72..00000000 --- a/source/soca/cluster_web_ui/static/css/jquery-ui-lightness.-precss +++ /dev/null @@ -1,1179 +0,0 @@ -/*! jQuery UI - v1.10.4 - 2014-01-17 -* http://jqueryui.com -* Includes: jquery.ui.core.css, jquery.ui.accordion.css, jquery.ui.autocomplete.css, jquery.ui.button.css, jquery.ui.datepicker.css, jquery.ui.dialog.css, jquery.ui.menu.css, jquery.ui.progressbar.css, jquery.ui.resizable.css, jquery.ui.selectable.css, jquery.ui.slider.css, jquery.ui.spinner.css, jquery.ui.tabs.css, jquery.ui.tooltip.css, jquery.ui.theme.css -* To view and modify this theme, visit http://jqueryui.com/themeroller/?ffDefault=Trebuchet%20MS%2CTahoma%2CVerdana%2CArial%2Csans-serif&fwDefault=bold&fsDefault=1.1em&cornerRadius=4px&bgColorHeader=f6a828&bgTextureHeader=gloss_wave&bgImgOpacityHeader=35&borderColorHeader=e78f08&fcHeader=ffffff&iconColorHeader=ffffff&bgColorContent=eeeeee&bgTextureContent=highlight_soft&bgImgOpacityContent=100&borderColorContent=dddddd&fcContent=333333&iconColorContent=222222&bgColorDefault=f6f6f6&bgTextureDefault=glass&bgImgOpacityDefault=100&borderColorDefault=cccccc&fcDefault=1c94c4&iconColorDefault=ef8c08&bgColorHover=fdf5ce&bgTextureHover=glass&bgImgOpacityHover=100&borderColorHover=fbcb09&fcHover=c77405&iconColorHover=ef8c08&bgColorActive=ffffff&bgTextureActive=glass&bgImgOpacityActive=65&borderColorActive=fbd850&fcActive=eb8f00&iconColorActive=ef8c08&bgColorHighlight=ffe45c&bgTextureHighlight=highlight_soft&bgImgOpacityHighlight=75&borderColorHighlight=fed22f&fcHighlight=363636&iconColorHighlight=228ef1&bgColorError=b81900&bgTextureError=diagonals_thick&bgImgOpacityError=18&borderColorError=cd0a0a&fcError=ffffff&iconColorError=ffd27a&bgColorOverlay=666666&bgTextureOverlay=diagonals_thick&bgImgOpacityOverlay=20&opacityOverlay=50&bgColorShadow=000000&bgTextureShadow=flat&bgImgOpacityShadow=10&opacityShadow=20&thicknessShadow=5px&offsetTopShadow=-5px&offsetLeftShadow=-5px&cornerRadiusShadow=5px -* Copyright 2014 jQuery Foundation and other contributors; Licensed MIT */ - -/* Layout helpers -----------------------------------*/ -.ui-helper-hidden { - display: none; -} -.ui-helper-hidden-accessible { - border: 0; - clip: rect(0 0 0 0); - height: 1px; - margin: -1px; - overflow: hidden; - padding: 0; - position: absolute; - width: 1px; -} -.ui-helper-reset { - margin: 0; - padding: 0; - border: 0; - outline: 0; - line-height: 1.3; - text-decoration: none; - font-size: 100%; - list-style: none; -} -.ui-helper-clearfix:before, -.ui-helper-clearfix:after { - content: ""; - display: table; - border-collapse: collapse; -} -.ui-helper-clearfix:after { - clear: both; -} -.ui-helper-clearfix { - min-height: 0; /* support: IE7 */ -} -.ui-helper-zfix { - width: 100%; - height: 100%; - top: 0; - left: 0; - position: absolute; - opacity: 0; - filter:Alpha(Opacity=0); -} - -.ui-front { - z-index: 100; -} - - -/* Interaction Cues -----------------------------------*/ -.ui-state-disabled { - cursor: default !important; -} - - -/* Icons -----------------------------------*/ - -/* states and images */ -.ui-icon { - display: block; - text-indent: -99999px; - overflow: hidden; - background-repeat: no-repeat; -} - - -/* Misc visuals -----------------------------------*/ - -/* Overlays */ -.ui-widget-overlay { - position: fixed; - top: 0; - left: 0; - width: 100%; - height: 100%; -} -.ui-accordion .ui-accordion-header { - display: block; - cursor: pointer; - position: relative; - margin-top: 2px; - padding: .5em .5em .5em .7em; - min-height: 0; /* support: IE7 */ -} -.ui-accordion .ui-accordion-icons { - padding-left: 2.2em; -} -.ui-accordion .ui-accordion-noicons { - padding-left: .7em; -} -.ui-accordion .ui-accordion-icons .ui-accordion-icons { - padding-left: 2.2em; -} -.ui-accordion .ui-accordion-header .ui-accordion-header-icon { - position: absolute; - left: .5em; - top: 50%; - margin-top: -8px; -} -.ui-accordion .ui-accordion-content { - padding: 1em 2.2em; - border-top: 0; - overflow: auto; -} -.ui-autocomplete { - position: absolute; - top: 0; - left: 0; - cursor: default; -} -.ui-button { - display: inline-block; - position: relative; - padding: 0; - line-height: normal; - margin-right: .1em; - cursor: pointer; - vertical-align: middle; - text-align: center; - overflow: visible; /* removes extra width in IE */ -} -.ui-button, -.ui-button:link, -.ui-button:visited, -.ui-button:hover, -.ui-button:active { - text-decoration: none; -} -/* to make room for the icon, a width needs to be set here */ -.ui-button-icon-only { - width: 2.2em; -} -/* button elements seem to need a little more width */ -button.ui-button-icon-only { - width: 2.4em; -} -.ui-button-icons-only { - width: 3.4em; -} -button.ui-button-icons-only { - width: 3.7em; -} - -/* button text element */ -.ui-button .ui-button-text { - display: block; - line-height: normal; -} -.ui-button-text-only .ui-button-text { - padding: .4em 1em; -} -.ui-button-icon-only .ui-button-text, -.ui-button-icons-only .ui-button-text { - padding: .4em; - text-indent: -9999999px; -} -.ui-button-text-icon-primary .ui-button-text, -.ui-button-text-icons .ui-button-text { - padding: .4em 1em .4em 2.1em; -} -.ui-button-text-icon-secondary .ui-button-text, -.ui-button-text-icons .ui-button-text { - padding: .4em 2.1em .4em 1em; -} -.ui-button-text-icons .ui-button-text { - padding-left: 2.1em; - padding-right: 2.1em; -} -/* no icon support for input elements, provide padding by default */ -input.ui-button { - padding: .4em 1em; -} - -/* button icon element(s) */ -.ui-button-icon-only .ui-icon, -.ui-button-text-icon-primary .ui-icon, -.ui-button-text-icon-secondary .ui-icon, -.ui-button-text-icons .ui-icon, -.ui-button-icons-only .ui-icon { - position: absolute; - top: 50%; - margin-top: -8px; -} -.ui-button-icon-only .ui-icon { - left: 50%; - margin-left: -8px; -} -.ui-button-text-icon-primary .ui-button-icon-primary, -.ui-button-text-icons .ui-button-icon-primary, -.ui-button-icons-only .ui-button-icon-primary { - left: .5em; -} -.ui-button-text-icon-secondary .ui-button-icon-secondary, -.ui-button-text-icons .ui-button-icon-secondary, -.ui-button-icons-only .ui-button-icon-secondary { - right: .5em; -} - -/* button sets */ -.ui-buttonset { - margin-right: 7px; -} -.ui-buttonset .ui-button { - margin-left: 0; - margin-right: -.3em; -} - -/* workarounds */ -/* reset extra padding in Firefox, see h5bp.com/l */ -input.ui-button::-moz-focus-inner, -button.ui-button::-moz-focus-inner { - border: 0; - padding: 0; -} -.ui-datepicker { - width: 17em; - padding: .2em .2em 0; - display: none; -} -.ui-datepicker .ui-datepicker-header { - position: relative; - padding: .2em 0; -} -.ui-datepicker .ui-datepicker-prev, -.ui-datepicker .ui-datepicker-next { - position: absolute; - top: 2px; - width: 1.8em; - height: 1.8em; -} -.ui-datepicker .ui-datepicker-prev-hover, -.ui-datepicker .ui-datepicker-next-hover { - top: 1px; -} -.ui-datepicker .ui-datepicker-prev { - left: 2px; -} -.ui-datepicker .ui-datepicker-next { - right: 2px; -} -.ui-datepicker .ui-datepicker-prev-hover { - left: 1px; -} -.ui-datepicker .ui-datepicker-next-hover { - right: 1px; -} -.ui-datepicker .ui-datepicker-prev span, -.ui-datepicker .ui-datepicker-next span { - display: block; - position: absolute; - left: 50%; - margin-left: -8px; - top: 50%; - margin-top: -8px; -} -.ui-datepicker .ui-datepicker-title { - margin: 0 2.3em; - line-height: 1.8em; - text-align: center; -} -.ui-datepicker .ui-datepicker-title select { - font-size: 1em; - margin: 1px 0; -} -.ui-datepicker select.ui-datepicker-month, -.ui-datepicker select.ui-datepicker-year { - width: 49%; -} -.ui-datepicker table { - width: 100%; - font-size: .9em; - border-collapse: collapse; - margin: 0 0 .4em; -} -.ui-datepicker th { - padding: .7em .3em; - text-align: center; - font-weight: bold; - border: 0; -} -.ui-datepicker td { - border: 0; - padding: 1px; -} -.ui-datepicker td span, -.ui-datepicker td a { - display: block; - padding: .2em; - text-align: right; - text-decoration: none; -} -.ui-datepicker .ui-datepicker-buttonpane { - background-image: none; - margin: .7em 0 0 0; - padding: 0 .2em; - border-left: 0; - border-right: 0; - border-bottom: 0; -} -.ui-datepicker .ui-datepicker-buttonpane button { - float: right; - margin: .5em .2em .4em; - cursor: pointer; - padding: .2em .6em .3em .6em; - width: auto; - overflow: visible; -} -.ui-datepicker .ui-datepicker-buttonpane button.ui-datepicker-current { - float: left; -} - -/* with multiple calendars */ -.ui-datepicker.ui-datepicker-multi { - width: auto; -} -.ui-datepicker-multi .ui-datepicker-group { - float: left; -} -.ui-datepicker-multi .ui-datepicker-group table { - width: 95%; - margin: 0 auto .4em; -} -.ui-datepicker-multi-2 .ui-datepicker-group { - width: 50%; -} -.ui-datepicker-multi-3 .ui-datepicker-group { - width: 33.3%; -} -.ui-datepicker-multi-4 .ui-datepicker-group { - width: 25%; -} -.ui-datepicker-multi .ui-datepicker-group-last .ui-datepicker-header, -.ui-datepicker-multi .ui-datepicker-group-middle .ui-datepicker-header { - border-left-width: 0; -} -.ui-datepicker-multi .ui-datepicker-buttonpane { - clear: left; -} -.ui-datepicker-row-break { - clear: both; - width: 100%; - font-size: 0; -} - -/* RTL support */ -.ui-datepicker-rtl { - direction: rtl; -} -.ui-datepicker-rtl .ui-datepicker-prev { - right: 2px; - left: auto; -} -.ui-datepicker-rtl .ui-datepicker-next { - left: 2px; - right: auto; -} -.ui-datepicker-rtl .ui-datepicker-prev:hover { - right: 1px; - left: auto; -} -.ui-datepicker-rtl .ui-datepicker-next:hover { - left: 1px; - right: auto; -} -.ui-datepicker-rtl .ui-datepicker-buttonpane { - clear: right; -} -.ui-datepicker-rtl .ui-datepicker-buttonpane button { - float: left; -} -.ui-datepicker-rtl .ui-datepicker-buttonpane button.ui-datepicker-current, -.ui-datepicker-rtl .ui-datepicker-group { - float: right; -} -.ui-datepicker-rtl .ui-datepicker-group-last .ui-datepicker-header, -.ui-datepicker-rtl .ui-datepicker-group-middle .ui-datepicker-header { - border-right-width: 0; - border-left-width: 1px; -} -.ui-dialog { - overflow: hidden; - position: absolute; - top: 0; - left: 0; - padding: .2em; - outline: 0; -} -.ui-dialog .ui-dialog-titlebar { - padding: .4em 1em; - position: relative; -} -.ui-dialog .ui-dialog-title { - float: left; - margin: .1em 0; - white-space: nowrap; - width: 90%; - overflow: hidden; - text-overflow: ellipsis; -} -.ui-dialog .ui-dialog-titlebar-close { - position: absolute; - right: .3em; - top: 50%; - width: 20px; - margin: -10px 0 0 0; - padding: 1px; - height: 20px; -} -.ui-dialog .ui-dialog-content { - position: relative; - border: 0; - padding: .5em 1em; - background: none; - overflow: auto; -} -.ui-dialog .ui-dialog-buttonpane { - text-align: left; - border-width: 1px 0 0 0; - background-image: none; - margin-top: .5em; - padding: .3em 1em .5em .4em; -} -.ui-dialog .ui-dialog-buttonpane .ui-dialog-buttonset { - float: right; -} -.ui-dialog .ui-dialog-buttonpane button { - margin: .5em .4em .5em 0; - cursor: pointer; -} -.ui-dialog .ui-resizable-se { - width: 12px; - height: 12px; - right: -5px; - bottom: -5px; - background-position: 16px 16px; -} -.ui-draggable .ui-dialog-titlebar { - cursor: move; -} -.ui-menu { - list-style: none; - padding: 2px; - margin: 0; - display: block; - outline: none; -} -.ui-menu .ui-menu { - margin-top: -3px; - position: absolute; -} -.ui-menu .ui-menu-item { - margin: 0; - padding: 0; - width: 100%; - /* support: IE10, see #8844 */ - list-style-image: url(data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7); -} -.ui-menu .ui-menu-divider { - margin: 5px -2px 5px -2px; - height: 0; - font-size: 0; - line-height: 0; - border-width: 1px 0 0 0; -} -.ui-menu .ui-menu-item a { - text-decoration: none; - display: block; - padding: 2px .4em; - line-height: 1.5; - min-height: 0; /* support: IE7 */ - font-weight: normal; -} -.ui-menu .ui-menu-item a.ui-state-focus, -.ui-menu .ui-menu-item a.ui-state-active { - font-weight: normal; - margin: -1px; -} - -.ui-menu .ui-state-disabled { - font-weight: normal; - margin: .4em 0 .2em; - line-height: 1.5; -} -.ui-menu .ui-state-disabled a { - cursor: default; -} - -/* icon support */ -.ui-menu-icons { - position: relative; -} -.ui-menu-icons .ui-menu-item a { - position: relative; - padding-left: 2em; -} - -/* left-aligned */ -.ui-menu .ui-icon { - position: absolute; - top: .2em; - left: .2em; -} - -/* right-aligned */ -.ui-menu .ui-menu-icon { - position: static; - float: right; -} -.ui-progressbar { - height: 2em; - text-align: left; - overflow: hidden; -} -.ui-progressbar .ui-progressbar-value { - margin: -1px; - height: 100%; -} -.ui-progressbar .ui-progressbar-overlay { - background: url("images/animated-overlay.gif"); - height: 100%; - filter: alpha(opacity=25); - opacity: 0.25; -} -.ui-progressbar-indeterminate .ui-progressbar-value { - background-image: none; -} -.ui-resizable { - position: relative; -} -.ui-resizable-handle { - position: absolute; - font-size: 0.1px; - display: block; -} -.ui-resizable-disabled .ui-resizable-handle, -.ui-resizable-autohide .ui-resizable-handle { - display: none; -} -.ui-resizable-n { - cursor: n-resize; - height: 7px; - width: 100%; - top: -5px; - left: 0; -} -.ui-resizable-s { - cursor: s-resize; - height: 7px; - width: 100%; - bottom: -5px; - left: 0; -} -.ui-resizable-e { - cursor: e-resize; - width: 7px; - right: -5px; - top: 0; - height: 100%; -} -.ui-resizable-w { - cursor: w-resize; - width: 7px; - left: -5px; - top: 0; - height: 100%; -} -.ui-resizable-se { - cursor: se-resize; - width: 12px; - height: 12px; - right: 1px; - bottom: 1px; -} -.ui-resizable-sw { - cursor: sw-resize; - width: 9px; - height: 9px; - left: -5px; - bottom: -5px; -} -.ui-resizable-nw { - cursor: nw-resize; - width: 9px; - height: 9px; - left: -5px; - top: -5px; -} -.ui-resizable-ne { - cursor: ne-resize; - width: 9px; - height: 9px; - right: -5px; - top: -5px; -} -.ui-selectable-helper { - position: absolute; - z-index: 100; - border: 1px dotted black; -} -.ui-slider { - position: relative; - text-align: left; -} -.ui-slider .ui-slider-handle { - position: absolute; - z-index: 2; - width: 1.2em; - height: 1.2em; - cursor: default; -} -.ui-slider .ui-slider-range { - position: absolute; - z-index: 1; - font-size: .7em; - display: block; - border: 0; - background-position: 0 0; -} - -/* For IE8 - See #6727 */ -.ui-slider.ui-state-disabled .ui-slider-handle, -.ui-slider.ui-state-disabled .ui-slider-range { - filter: inherit; -} - -.ui-slider-horizontal { - height: .8em; -} -.ui-slider-horizontal .ui-slider-handle { - top: -.3em; - margin-left: -.6em; -} -.ui-slider-horizontal .ui-slider-range { - top: 0; - height: 100%; -} -.ui-slider-horizontal .ui-slider-range-min { - left: 0; -} -.ui-slider-horizontal .ui-slider-range-max { - right: 0; -} - -.ui-slider-vertical { - width: .8em; - height: 100px; -} -.ui-slider-vertical .ui-slider-handle { - left: -.3em; - margin-left: 0; - margin-bottom: -.6em; -} -.ui-slider-vertical .ui-slider-range { - left: 0; - width: 100%; -} -.ui-slider-vertical .ui-slider-range-min { - bottom: 0; -} -.ui-slider-vertical .ui-slider-range-max { - top: 0; -} -.ui-spinner { - position: relative; - display: inline-block; - overflow: hidden; - padding: 0; - vertical-align: middle; -} -.ui-spinner-input { - border: none; - background: none; - color: inherit; - padding: 0; - margin: .2em 0; - vertical-align: middle; - margin-left: .4em; - margin-right: 22px; -} -.ui-spinner-button { - width: 16px; - height: 50%; - font-size: .5em; - padding: 0; - margin: 0; - text-align: center; - position: absolute; - cursor: default; - display: block; - overflow: hidden; - right: 0; -} -/* more specificity required here to override default borders */ -.ui-spinner a.ui-spinner-button { - border-top: none; - border-bottom: none; - border-right: none; -} -/* vertically center icon */ -.ui-spinner .ui-icon { - position: absolute; - margin-top: -8px; - top: 50%; - left: 0; -} -.ui-spinner-up { - top: 0; -} -.ui-spinner-down { - bottom: 0; -} - -/* TR overrides */ -.ui-spinner .ui-icon-triangle-1-s { - /* need to fix icons sprite */ - background-position: -65px -16px; -} -.ui-tabs { - position: relative;/* position: relative prevents IE scroll bug (element with position: relative inside container with overflow: auto appear as "fixed") */ - padding: .2em; -} -.ui-tabs .ui-tabs-nav { - margin: 0; - padding: .2em .2em 0; -} -.ui-tabs .ui-tabs-nav li { - list-style: none; - float: left; - position: relative; - top: 0; - margin: 1px .2em 0 0; - border-bottom-width: 0; - padding: 0; - white-space: nowrap; -} -.ui-tabs .ui-tabs-nav .ui-tabs-anchor { - float: left; - padding: .5em 1em; - text-decoration: none; -} -.ui-tabs .ui-tabs-nav li.ui-tabs-active { - margin-bottom: -1px; - padding-bottom: 1px; -} -.ui-tabs .ui-tabs-nav li.ui-tabs-active .ui-tabs-anchor, -.ui-tabs .ui-tabs-nav li.ui-state-disabled .ui-tabs-anchor, -.ui-tabs .ui-tabs-nav li.ui-tabs-loading .ui-tabs-anchor { - cursor: text; -} -.ui-tabs-collapsible .ui-tabs-nav li.ui-tabs-active .ui-tabs-anchor { - cursor: pointer; -} -.ui-tabs .ui-tabs-panel { - display: block; - border-width: 0; - padding: 1em 1.4em; - background: none; -} -/* -.ui-tooltip { - padding: 8px; - position: absolute; - z-index: 9999; - max-width: 300px; - -webkit-box-shadow: 0 0 5px #aaa; - box-shadow: 0 0 5px #aaa; -} -body .ui-tooltip { - border-width: 2px; -}*/ - -/* Component containers -----------------------------------*/ -.ui-widget { - font-family: Trebuchet MS,Tahoma,Verdana,Arial,sans-serif; - font-size: 1.1em; -} -.ui-widget .ui-widget { - font-size: 1em; -} -.ui-widget input, -.ui-widget select, -.ui-widget textarea, -.ui-widget button { - font-family: Trebuchet MS,Tahoma,Verdana,Arial,sans-serif; - font-size: 1em; -} -.ui-widget-content { - border: 1px solid #dddddd; - background: #eeeeee url(images/ui-bg_highlight-soft_100_eeeeee_1x100.png) 50% top repeat-x; - color: #333333; -} -.ui-widget-content a { - color: #333333; -} -.ui-widget-header { - border: 1px solid #e78f08; - background: #f6a828 url(images/ui-bg_gloss-wave_35_f6a828_500x100.png) 50% 50% repeat-x; - color: #ffffff; - font-weight: bold; -} -.ui-widget-header a { - color: #ffffff; -} - -/* Interaction states -----------------------------------*/ -.ui-state-default, -.ui-widget-content .ui-state-default, -.ui-widget-header .ui-state-default { - border: 1px solid #cccccc; - background: #f6f6f6 url(images/ui-bg_glass_100_f6f6f6_1x400.png) 50% 50% repeat-x; - font-weight: bold; - color: #1c94c4; -} -.ui-state-default a, -.ui-state-default a:link, -.ui-state-default a:visited { - color: #1c94c4; - text-decoration: none; -} -.ui-state-hover, -.ui-widget-content .ui-state-hover, -.ui-widget-header .ui-state-hover, -.ui-state-focus, -.ui-widget-content .ui-state-focus, -.ui-widget-header .ui-state-focus { - border: 1px solid #fbcb09; - background: #fdf5ce url(images/ui-bg_glass_100_fdf5ce_1x400.png) 50% 50% repeat-x; - font-weight: bold; - color: #c77405; -} -.ui-state-hover a, -.ui-state-hover a:hover, -.ui-state-hover a:link, -.ui-state-hover a:visited, -.ui-state-focus a, -.ui-state-focus a:hover, -.ui-state-focus a:link, -.ui-state-focus a:visited { - color: #c77405; - text-decoration: none; -} -.ui-state-active, -.ui-widget-content .ui-state-active, -.ui-widget-header .ui-state-active { - border: 1px solid #fbd850; - background: #ffffff url(images/ui-bg_glass_65_ffffff_1x400.png) 50% 50% repeat-x; - font-weight: bold; - color: #eb8f00; -} -.ui-state-active a, -.ui-state-active a:link, -.ui-state-active a:visited { - color: #eb8f00; - text-decoration: none; -} - -/* Interaction Cues -----------------------------------*/ -.ui-state-highlight, -.ui-widget-content .ui-state-highlight, -.ui-widget-header .ui-state-highlight { - border: 1px solid #fed22f; - background: #ffe45c url(images/ui-bg_highlight-soft_75_ffe45c_1x100.png) 50% top repeat-x; - color: #363636; -} -.ui-state-highlight a, -.ui-widget-content .ui-state-highlight a, -.ui-widget-header .ui-state-highlight a { - color: #363636; -} -.ui-state-error, -.ui-widget-content .ui-state-error, -.ui-widget-header .ui-state-error { - border: 1px solid #cd0a0a; - background: #b81900 url(images/ui-bg_diagonals-thick_18_b81900_40x40.png) 50% 50% repeat; - color: #ffffff; -} -.ui-state-error a, -.ui-widget-content .ui-state-error a, -.ui-widget-header .ui-state-error a { - color: #ffffff; -} -.ui-state-error-text, -.ui-widget-content .ui-state-error-text, -.ui-widget-header .ui-state-error-text { - color: #ffffff; -} -.ui-priority-primary, -.ui-widget-content .ui-priority-primary, -.ui-widget-header .ui-priority-primary { - font-weight: bold; -} -.ui-priority-secondary, -.ui-widget-content .ui-priority-secondary, -.ui-widget-header .ui-priority-secondary { - opacity: .7; - filter:Alpha(Opacity=70); - font-weight: normal; -} -.ui-state-disabled, -.ui-widget-content .ui-state-disabled, -.ui-widget-header .ui-state-disabled { - opacity: .35; - filter:Alpha(Opacity=35); - background-image: none; -} -.ui-state-disabled .ui-icon { - filter:Alpha(Opacity=35); /* For IE8 - See #6059 */ -} - -/* Icons -----------------------------------*/ - -/* states and images */ -.ui-icon { - width: 16px; - height: 16px; -} -.ui-icon, -.ui-widget-content .ui-icon { - background-image: url(images/ui-icons_222222_256x240.png); -} -.ui-widget-header .ui-icon { - background-image: url(images/ui-icons_ffffff_256x240.png); -} -.ui-state-default .ui-icon { - background-image: url(images/ui-icons_ef8c08_256x240.png); -} -.ui-state-hover .ui-icon, -.ui-state-focus .ui-icon { - background-image: url(images/ui-icons_ef8c08_256x240.png); -} -.ui-state-active .ui-icon { - background-image: url(images/ui-icons_ef8c08_256x240.png); -} -.ui-state-highlight .ui-icon { - background-image: url(images/ui-icons_228ef1_256x240.png); -} -.ui-state-error .ui-icon, -.ui-state-error-text .ui-icon { - background-image: url(images/ui-icons_ffd27a_256x240.png); -} - -/* positioning */ -.ui-icon-blank { background-position: 16px 16px; } -.ui-icon-carat-1-n { background-position: 0 0; } -.ui-icon-carat-1-ne { background-position: -16px 0; } -.ui-icon-carat-1-e { background-position: -32px 0; } -.ui-icon-carat-1-se { background-position: -48px 0; } -.ui-icon-carat-1-s { background-position: -64px 0; } -.ui-icon-carat-1-sw { background-position: -80px 0; } -.ui-icon-carat-1-w { background-position: -96px 0; } -.ui-icon-carat-1-nw { background-position: -112px 0; } -.ui-icon-carat-2-n-s { background-position: -128px 0; } -.ui-icon-carat-2-e-w { background-position: -144px 0; } -.ui-icon-triangle-1-n { background-position: 0 -16px; } -.ui-icon-triangle-1-ne { background-position: -16px -16px; } -.ui-icon-triangle-1-e { background-position: -32px -16px; } -.ui-icon-triangle-1-se { background-position: -48px -16px; } -.ui-icon-triangle-1-s { background-position: -64px -16px; } -.ui-icon-triangle-1-sw { background-position: -80px -16px; } -.ui-icon-triangle-1-w { background-position: -96px -16px; } -.ui-icon-triangle-1-nw { background-position: -112px -16px; } -.ui-icon-triangle-2-n-s { background-position: -128px -16px; } -.ui-icon-triangle-2-e-w { background-position: -144px -16px; } -.ui-icon-arrow-1-n { background-position: 0 -32px; } -.ui-icon-arrow-1-ne { background-position: -16px -32px; } -.ui-icon-arrow-1-e { background-position: -32px -32px; } -.ui-icon-arrow-1-se { background-position: -48px -32px; } -.ui-icon-arrow-1-s { background-position: -64px -32px; } -.ui-icon-arrow-1-sw { background-position: -80px -32px; } -.ui-icon-arrow-1-w { background-position: -96px -32px; } -.ui-icon-arrow-1-nw { background-position: -112px -32px; } -.ui-icon-arrow-2-n-s { background-position: -128px -32px; } -.ui-icon-arrow-2-ne-sw { background-position: -144px -32px; } -.ui-icon-arrow-2-e-w { background-position: -160px -32px; } -.ui-icon-arrow-2-se-nw { background-position: -176px -32px; } -.ui-icon-arrowstop-1-n { background-position: -192px -32px; } -.ui-icon-arrowstop-1-e { background-position: -208px -32px; } -.ui-icon-arrowstop-1-s { background-position: -224px -32px; } -.ui-icon-arrowstop-1-w { background-position: -240px -32px; } -.ui-icon-arrowthick-1-n { background-position: 0 -48px; } -.ui-icon-arrowthick-1-ne { background-position: -16px -48px; } -.ui-icon-arrowthick-1-e { background-position: -32px -48px; } -.ui-icon-arrowthick-1-se { background-position: -48px -48px; } -.ui-icon-arrowthick-1-s { background-position: -64px -48px; } -.ui-icon-arrowthick-1-sw { background-position: -80px -48px; } -.ui-icon-arrowthick-1-w { background-position: -96px -48px; } -.ui-icon-arrowthick-1-nw { background-position: -112px -48px; } -.ui-icon-arrowthick-2-n-s { background-position: -128px -48px; } -.ui-icon-arrowthick-2-ne-sw { background-position: -144px -48px; } -.ui-icon-arrowthick-2-e-w { background-position: -160px -48px; } -.ui-icon-arrowthick-2-se-nw { background-position: -176px -48px; } -.ui-icon-arrowthickstop-1-n { background-position: -192px -48px; } -.ui-icon-arrowthickstop-1-e { background-position: -208px -48px; } -.ui-icon-arrowthickstop-1-s { background-position: -224px -48px; } -.ui-icon-arrowthickstop-1-w { background-position: -240px -48px; } -.ui-icon-arrowreturnthick-1-w { background-position: 0 -64px; } -.ui-icon-arrowreturnthick-1-n { background-position: -16px -64px; } -.ui-icon-arrowreturnthick-1-e { background-position: -32px -64px; } -.ui-icon-arrowreturnthick-1-s { background-position: -48px -64px; } -.ui-icon-arrowreturn-1-w { background-position: -64px -64px; } -.ui-icon-arrowreturn-1-n { background-position: -80px -64px; } -.ui-icon-arrowreturn-1-e { background-position: -96px -64px; } -.ui-icon-arrowreturn-1-s { background-position: -112px -64px; } -.ui-icon-arrowrefresh-1-w { background-position: -128px -64px; } -.ui-icon-arrowrefresh-1-n { background-position: -144px -64px; } -.ui-icon-arrowrefresh-1-e { background-position: -160px -64px; } -.ui-icon-arrowrefresh-1-s { background-position: -176px -64px; } -.ui-icon-arrow-4 { background-position: 0 -80px; } -.ui-icon-arrow-4-diag { background-position: -16px -80px; } -.ui-icon-extlink { background-position: -32px -80px; } -.ui-icon-newwin { background-position: -48px -80px; } -.ui-icon-refresh { background-position: -64px -80px; } -.ui-icon-shuffle { background-position: -80px -80px; } -.ui-icon-transfer-e-w { background-position: -96px -80px; } -.ui-icon-transferthick-e-w { background-position: -112px -80px; } -.ui-icon-folder-collapsed { background-position: 0 -96px; } -.ui-icon-folder-open { background-position: -16px -96px; } -.ui-icon-document { background-position: -32px -96px; } -.ui-icon-document-b { background-position: -48px -96px; } -.ui-icon-note { background-position: -64px -96px; } -.ui-icon-mail-closed { background-position: -80px -96px; } -.ui-icon-mail-open { background-position: -96px -96px; } -.ui-icon-suitcase { background-position: -112px -96px; } -.ui-icon-comment { background-position: -128px -96px; } -.ui-icon-person { background-position: -144px -96px; } -.ui-icon-print { background-position: -160px -96px; } -.ui-icon-trash { background-position: -176px -96px; } -.ui-icon-locked { background-position: -192px -96px; } -.ui-icon-unlocked { background-position: -208px -96px; } -.ui-icon-bookmark { background-position: -224px -96px; } -.ui-icon-tag { background-position: -240px -96px; } -.ui-icon-home { background-position: 0 -112px; } -.ui-icon-flag { background-position: -16px -112px; } -.ui-icon-calendar { background-position: -32px -112px; } -.ui-icon-cart { background-position: -48px -112px; } -.ui-icon-pencil { background-position: -64px -112px; } -.ui-icon-clock { background-position: -80px -112px; } -.ui-icon-disk { background-position: -96px -112px; } -.ui-icon-calculator { background-position: -112px -112px; } -.ui-icon-zoomin { background-position: -128px -112px; } -.ui-icon-zoomout { background-position: -144px -112px; } -.ui-icon-search { background-position: -160px -112px; } -.ui-icon-wrench { background-position: -176px -112px; } -.ui-icon-gear { background-position: -192px -112px; } -.ui-icon-heart { background-position: -208px -112px; } -.ui-icon-star { background-position: -224px -112px; } -.ui-icon-link { background-position: -240px -112px; } -.ui-icon-cancel { background-position: 0 -128px; } -.ui-icon-plus { background-position: -16px -128px; } -.ui-icon-plusthick { background-position: -32px -128px; } -.ui-icon-minus { background-position: -48px -128px; } -.ui-icon-minusthick { background-position: -64px -128px; } -.ui-icon-close { background-position: -80px -128px; } -.ui-icon-closethick { background-position: -96px -128px; } -.ui-icon-key { background-position: -112px -128px; } -.ui-icon-lightbulb { background-position: -128px -128px; } -.ui-icon-scissors { background-position: -144px -128px; } -.ui-icon-clipboard { background-position: -160px -128px; } -.ui-icon-copy { background-position: -176px -128px; } -.ui-icon-contact { background-position: -192px -128px; } -.ui-icon-image { background-position: -208px -128px; } -.ui-icon-video { background-position: -224px -128px; } -.ui-icon-script { background-position: -240px -128px; } -.ui-icon-alert { background-position: 0 -144px; } -.ui-icon-info { background-position: -16px -144px; } -.ui-icon-notice { background-position: -32px -144px; } -.ui-icon-help { background-position: -48px -144px; } -.ui-icon-check { background-position: -64px -144px; } -.ui-icon-bullet { background-position: -80px -144px; } -.ui-icon-radio-on { background-position: -96px -144px; } -.ui-icon-radio-off { background-position: -112px -144px; } -.ui-icon-pin-w { background-position: -128px -144px; } -.ui-icon-pin-s { background-position: -144px -144px; } -.ui-icon-play { background-position: 0 -160px; } -.ui-icon-pause { background-position: -16px -160px; } -.ui-icon-seek-next { background-position: -32px -160px; } -.ui-icon-seek-prev { background-position: -48px -160px; } -.ui-icon-seek-end { background-position: -64px -160px; } -.ui-icon-seek-start { background-position: -80px -160px; } -/* ui-icon-seek-first is deprecated, use ui-icon-seek-start instead */ -.ui-icon-seek-first { background-position: -80px -160px; } -.ui-icon-stop { background-position: -96px -160px; } -.ui-icon-eject { background-position: -112px -160px; } -.ui-icon-volume-off { background-position: -128px -160px; } -.ui-icon-volume-on { background-position: -144px -160px; } -.ui-icon-power { background-position: 0 -176px; } -.ui-icon-signal-diag { background-position: -16px -176px; } -.ui-icon-signal { background-position: -32px -176px; } -.ui-icon-battery-0 { background-position: -48px -176px; } -.ui-icon-battery-1 { background-position: -64px -176px; } -.ui-icon-battery-2 { background-position: -80px -176px; } -.ui-icon-battery-3 { background-position: -96px -176px; } -.ui-icon-circle-plus { background-position: 0 -192px; } -.ui-icon-circle-minus { background-position: -16px -192px; } -.ui-icon-circle-close { background-position: -32px -192px; } -.ui-icon-circle-triangle-e { background-position: -48px -192px; } -.ui-icon-circle-triangle-s { background-position: -64px -192px; } -.ui-icon-circle-triangle-w { background-position: -80px -192px; } -.ui-icon-circle-triangle-n { background-position: -96px -192px; } -.ui-icon-circle-arrow-e { background-position: -112px -192px; } -.ui-icon-circle-arrow-s { background-position: -128px -192px; } -.ui-icon-circle-arrow-w { background-position: -144px -192px; } -.ui-icon-circle-arrow-n { background-position: -160px -192px; } -.ui-icon-circle-zoomin { background-position: -176px -192px; } -.ui-icon-circle-zoomout { background-position: -192px -192px; } -.ui-icon-circle-check { background-position: -208px -192px; } -.ui-icon-circlesmall-plus { background-position: 0 -208px; } -.ui-icon-circlesmall-minus { background-position: -16px -208px; } -.ui-icon-circlesmall-close { background-position: -32px -208px; } -.ui-icon-squaresmall-plus { background-position: -48px -208px; } -.ui-icon-squaresmall-minus { background-position: -64px -208px; } -.ui-icon-squaresmall-close { background-position: -80px -208px; } -.ui-icon-grip-dotted-vertical { background-position: 0 -224px; } -.ui-icon-grip-dotted-horizontal { background-position: -16px -224px; } -.ui-icon-grip-solid-vertical { background-position: -32px -224px; } -.ui-icon-grip-solid-horizontal { background-position: -48px -224px; } -.ui-icon-gripsmall-diagonal-se { background-position: -64px -224px; } -.ui-icon-grip-diagonal-se { background-position: -80px -224px; } - - -/* Misc visuals -----------------------------------*/ - -/* Corner radius */ -.ui-corner-all, -.ui-corner-top, -.ui-corner-left, -.ui-corner-tl { - border-top-left-radius: 4px; -} -.ui-corner-all, -.ui-corner-top, -.ui-corner-right, -.ui-corner-tr { - border-top-right-radius: 4px; -} -.ui-corner-all, -.ui-corner-bottom, -.ui-corner-left, -.ui-corner-bl { - border-bottom-left-radius: 4px; -} -.ui-corner-all, -.ui-corner-bottom, -.ui-corner-right, -.ui-corner-br { - border-bottom-right-radius: 4px; -} - -/* Overlays */ -.ui-widget-overlay { - background: #666666 url(images/ui-bg_diagonals-thick_20_666666_40x40.png) 50% 50% repeat; - opacity: .5; - filter: Alpha(Opacity=50); -} -.ui-widget-shadow { - margin: -5px 0 0 -5px; - padding: 5px; - background: #000000 url(images/ui-bg_flat_10_000000_40x100.png) 50% 50% repeat-x; - opacity: .2; - filter: Alpha(Opacity=20); - border-radius: 5px; -} diff --git a/source/soca/cluster_web_ui/static/css/jquery-ui-slider-pips.min.css b/source/soca/cluster_web_ui/static/css/jquery-ui-slider-pips.min.css deleted file mode 100644 index 7badd717..00000000 --- a/source/soca/cluster_web_ui/static/css/jquery-ui-slider-pips.min.css +++ /dev/null @@ -1,4 +0,0 @@ -/*! jQuery-ui-Slider-Pips - v1.11.4 - 2016-09-04 -* Copyright (c) 2016 Simon Goellner ; Licensed MIT */ - -.ui-slider-horizontal.ui-slider-pips{margin-bottom:1.4em}.ui-slider-pips .ui-slider-label,.ui-slider-pips .ui-slider-pip-hide{display:none}.ui-slider-pips .ui-slider-pip-label .ui-slider-label{display:block}.ui-slider-pips .ui-slider-pip{width:2em;height:1em;line-height:1em;position:absolute;font-size:0.8em;color:#999;overflow:visible;text-align:center;top:20px;left:20px;margin-left:-1em;cursor:pointer;-webkit-touch-callout:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.ui-state-disabled.ui-slider-pips .ui-slider-pip{cursor:default}.ui-slider-pips .ui-slider-line{background:#999;width:1px;height:3px;position:absolute;left:50%}.ui-slider-pips .ui-slider-label{position:absolute;top:5px;left:50%;margin-left:-1em;width:2em}.ui-slider-pips:not(.ui-slider-disabled) .ui-slider-pip:hover .ui-slider-label{color:black;font-weight:bold}.ui-slider-vertical.ui-slider-pips{margin-bottom:1em;margin-right:2em}.ui-slider-vertical.ui-slider-pips .ui-slider-pip{text-align:left;top:auto;left:20px;margin-left:0;margin-bottom:-0.5em}.ui-slider-vertical.ui-slider-pips .ui-slider-line{width:3px;height:1px;position:absolute;top:50%;left:0}.ui-slider-vertical.ui-slider-pips .ui-slider-label{top:50%;left:0.5em;margin-left:0;margin-top:-0.5em;width:2em}.ui-slider-float .ui-slider-handle:focus,.ui-slider-float .ui-slider-handle.ui-state-focus .ui-slider-tip-label,.ui-slider-float .ui-slider-handle:focus .ui-slider-tip,.ui-slider-float .ui-slider-handle.ui-state-focus .ui-slider-tip-label,.ui-slider-float .ui-slider-handle:focus .ui-slider-tip-label .ui-slider-float .ui-slider-handle.ui-state-focus .ui-slider-tip-label{outline:none}.ui-slider-float .ui-slider-tip,.ui-slider-float .ui-slider-tip-label{position:absolute;visibility:hidden;top:-40px;display:block;width:34px;margin-left:-18px;left:50%;height:20px;line-height:20px;background:white;border-radius:3px;border:1px solid #888;text-align:center;font-size:12px;opacity:0;color:#333;-webkit-transition-property:opacity, top, visibility;transition-property:opacity, top, visibility;-webkit-transition-timing-function:ease-in;transition-timing-function:ease-in;-webkit-transition-duration:200ms, 200ms, 0ms;transition-duration:200ms, 200ms, 0ms;-webkit-transition-delay:0ms, 0ms, 200ms;transition-delay:0ms, 0ms, 200ms}.ui-slider-float .ui-slider-handle:hover .ui-slider-tip,.ui-slider-float .ui-slider-handle.ui-state-hover .ui-slider-tip,.ui-slider-float .ui-slider-handle:focus .ui-slider-tip,.ui-slider-float .ui-slider-handle.ui-state-focus .ui-slider-tip,.ui-slider-float .ui-slider-handle.ui-state-active .ui-slider-tip,.ui-slider-float .ui-slider-pip:hover .ui-slider-tip-label{opacity:1;top:-30px;visibility:visible;-webkit-transition-timing-function:ease-out;transition-timing-function:ease-out;-webkit-transition-delay:200ms, 200ms, 0ms;transition-delay:200ms, 200ms, 0ms}.ui-slider-float .ui-slider-pip .ui-slider-tip-label{top:42px}.ui-slider-float .ui-slider-pip:hover .ui-slider-tip-label{top:32px;font-weight:normal}.ui-slider-float .ui-slider-tip:after,.ui-slider-float .ui-slider-pip .ui-slider-tip-label:after{content:" ";width:0;height:0;border:5px solid rgba(255,255,255,0);border-top-color:#fff;position:absolute;bottom:-10px;left:50%;margin-left:-5px}.ui-slider-float .ui-slider-tip:before,.ui-slider-float .ui-slider-pip .ui-slider-tip-label:before{content:" ";width:0;height:0;border:5px solid rgba(255,255,255,0);border-top-color:#888;position:absolute;bottom:-11px;left:50%;margin-left:-5px}.ui-slider-float .ui-slider-pip .ui-slider-tip-label:after{border:5px solid rgba(255,255,255,0);border-bottom-color:#fff;top:-10px}.ui-slider-float .ui-slider-pip .ui-slider-tip-label:before{border:5px solid rgba(255,255,255,0);border-bottom-color:#888;top:-11px}.ui-slider-vertical.ui-slider-float .ui-slider-tip,.ui-slider-vertical.ui-slider-float .ui-slider-tip-label{top:50%;margin-top:-11px;width:34px;margin-left:0px;left:-60px;color:#333;-webkit-transition-duration:200ms, 200ms, 0;transition-duration:200ms, 200ms, 0;-webkit-transition-property:opacity, left, visibility;transition-property:opacity, left, visibility;-webkit-transition-delay:0, 0, 200ms;transition-delay:0, 0, 200ms}.ui-slider-vertical.ui-slider-float .ui-slider-handle:hover .ui-slider-tip,.ui-slider-vertical.ui-slider-float .ui-slider-handle.ui-state-hover .ui-slider-tip,.ui-slider-vertical.ui-slider-float .ui-slider-handle:focus .ui-slider-tip,.ui-slider-vertical.ui-slider-float .ui-slider-handle.ui-state-focus .ui-slider-tip,.ui-slider-vertical.ui-slider-float .ui-slider-handle.ui-state-active .ui-slider-tip,.ui-slider-vertical.ui-slider-float .ui-slider-pip:hover .ui-slider-tip-label{top:50%;margin-top:-11px;left:-50px}.ui-slider-vertical.ui-slider-float .ui-slider-pip .ui-slider-tip-label{left:47px}.ui-slider-vertical.ui-slider-float .ui-slider-pip:hover .ui-slider-tip-label{left:37px}.ui-slider-vertical.ui-slider-float .ui-slider-tip:after,.ui-slider-vertical.ui-slider-float .ui-slider-pip .ui-slider-tip-label:after{border:5px solid rgba(255,255,255,0);border-left-color:#fff;border-top-color:transparent;position:absolute;bottom:50%;margin-bottom:-5px;right:-10px;margin-left:0;top:auto;left:auto}.ui-slider-vertical.ui-slider-float .ui-slider-tip:before,.ui-slider-vertical.ui-slider-float .ui-slider-pip .ui-slider-tip-label:before{border:5px solid rgba(255,255,255,0);border-left-color:#888;border-top-color:transparent;position:absolute;bottom:50%;margin-bottom:-5px;right:-11px;margin-left:0;top:auto;left:auto}.ui-slider-vertical.ui-slider-float .ui-slider-pip .ui-slider-tip-label:after{border:5px solid rgba(255,255,255,0);border-right-color:#fff;right:auto;left:-10px}.ui-slider-vertical.ui-slider-float .ui-slider-pip .ui-slider-tip-label:before{border:5px solid rgba(255,255,255,0);border-right-color:#888;right:auto;left:-11px}.ui-slider-pips [class*=ui-slider-pip-initial]{font-weight:bold;color:#14CA82}.ui-slider-pips .ui-slider-pip-initial-2{color:#1897C9}.ui-slider-pips [class*=ui-slider-pip-selected]{font-weight:bold;color:#FF7A00}.ui-slider-pips .ui-slider-pip-inrange{color:black}.ui-slider-pips .ui-slider-pip-selected-2{color:#E70081}.ui-slider-pips [class*=ui-slider-pip-selected] .ui-slider-line,.ui-slider-pips .ui-slider-pip-inrange .ui-slider-line{background:black} diff --git a/source/soca/cluster_web_ui/static/js/jquery-ui-slider-pips.min.js b/source/soca/cluster_web_ui/static/js/jquery-ui-slider-pips.min.js deleted file mode 100644 index a866ead0..00000000 --- a/source/soca/cluster_web_ui/static/js/jquery-ui-slider-pips.min.js +++ /dev/null @@ -1,4 +0,0 @@ -/*! jQuery-ui-Slider-Pips - v1.11.4 - 2016-09-04 -* Copyright (c) 2016 Simon Goellner ; Licensed MIT */ - -!function(e){"use strict";var i={pips:function(i){function l(i){var l,s,t,a,n,r=[],o=0;if(u.values()&&u.values().length){for(t=u.values(),a=e.map(t,function(e){return Math.abs(e-i)}),n=Math.min.apply(Math,a),l=0;lt[1]?o=r[1]:iv[0]&&tt||"max"===u.options.range&&t>f)&&(p+=" ui-slider-pip-inrange");return d="horizontal"===u.options.orientation?"left: "+s:"bottom: "+s,''+g.formatLabel(l)+""}var n,r,o,p,d,u=this,f="",c=u._valueMin(),v=u._valueMax(),h=(v-c)/u.options.step,m=u.element.find(".ui-slider-handle"),g={first:"label",last:"label",rest:"pip",labels:!1,prefix:"",suffix:"",step:h>100?Math.floor(.05*h):1,formatLabel:function(e){return this.prefix+e+this.suffix}};if("object"!==e.type(i)&&"undefined"!==e.type(i))return void("destroy"===i?s():"refresh"===i&&u.element.slider("pips",u.element.data("pips-options")));e.extend(g,i),u.element.data("pips-options",g),u.options.pipStep=Math.abs(Math.round(g.step))||1,u.element.off(".selectPip").addClass("ui-slider-pips").find(".ui-slider-pip").remove();var b={single:function(i){this.resetClasses(),d.filter(".ui-slider-pip-"+this.classLabel(i)).addClass("ui-slider-pip-selected"),u.options.range&&d.each(function(l,s){var t=e(s).children(".ui-slider-label").data("value");("min"===u.options.range&&i>t||"max"===u.options.range&&t>i)&&e(s).addClass("ui-slider-pip-inrange")})},range:function(i){for(this.resetClasses(),n=0;ni[0]&&to;o+=u.options.pipStep)f+=a(o);for(f+=a("last"),u.element.append(f),d=u.element.find(".ui-slider-pip"),p=e._data(u.element.get(0),"events").mousedown&&e._data(u.element.get(0),"events").mousedown.length?e._data(u.element.get(0),"events").mousedown:u.element.data("mousedown-handlers"),u.element.data("mousedown-handlers",p.slice()),r=0;ro&&(o=n),o>r&&(o=r),p&&p.length)for(t=0;tr&&(p[t]=r);if(a.element.addClass("ui-slider-float").find(".ui-slider-tip, .ui-slider-tip-label").remove(),f.handle)for(d=s(a.values()&&a.values().length?p:[o]),t=0;t'+f.formatLabel(d[t])+""));f.pips&&a.element.find(".ui-slider-label").each(function(i,l){var t,a,n=e(l),r=[n.data("value")];t=f.formatLabel(s(r)[0]),a=e(''+t+"").insertAfter(n)}),"slide"!==f.event&&"slidechange"!==f.event&&"slide slidechange"!==f.event&&"slidechange slide"!==f.event&&(f.event="slidechange slide"),a.element.off(".sliderFloat").on(f.event+".sliderFloat",function(i,l){var t="array"===e.type(l.value)?l.value:[l.value],a=f.formatLabel(s(t)[0]);e(l.handle).find(".ui-slider-tip").html(a)})}};e.extend(!0,e.ui.slider.prototype,i)}(jQuery); \ No newline at end of file diff --git a/source/soca/cluster_web_ui/templates/common/horizontal_menu_bar.html b/source/soca/cluster_web_ui/templates/common/horizontal_menu_bar.html index b99149f1..99fca71b 100644 --- a/source/soca/cluster_web_ui/templates/common/horizontal_menu_bar.html +++ b/source/soca/cluster_web_ui/templates/common/horizontal_menu_bar.html @@ -1,5 +1,5 @@
-

Scale-Out Computing on AWS (version 2.6.0)

+

Scale-Out Computing on AWS (version 2.6.1)

Source Code Help and Support diff --git a/source/soca/cluster_web_ui/templates/remote_desktop.html b/source/soca/cluster_web_ui/templates/remote_desktop.html index fe4806d5..ca920316 100644 --- a/source/soca/cluster_web_ui/templates/remote_desktop.html +++ b/source/soca/cluster_web_ui/templates/remote_desktop.html @@ -184,7 +184,7 @@
{% if session_data.session_state == 'pending' %} diff --git a/source/soca/cluster_web_ui/templates/submit_job.html b/source/soca/cluster_web_ui/templates/submit_job.html index c904e594..4f4fd10c 100644 --- a/source/soca/cluster_web_ui/templates/submit_job.html +++ b/source/soca/cluster_web_ui/templates/submit_job.html @@ -41,7 +41,7 @@
- +

diff --git a/source/soca/cluster_web_ui/templates/submit_job_selected_application.html b/source/soca/cluster_web_ui/templates/submit_job_selected_application.html index f55ce5ca..56b10966 100644 --- a/source/soca/cluster_web_ui/templates/submit_job_selected_application.html +++ b/source/soca/cluster_web_ui/templates/submit_job_selected_application.html @@ -87,7 +87,7 @@

Theses numbers are just an estimate based on: -
  • Does not reflect any additional charges such as network or storage transfer or usage of io1 volume (default to gp2)
  • +
  • Does not reflect any additional charges such as network or storage transfer or usage of io1 volume (default to gp3)
  • Compute rate retrieved for your running region
  • FSx Persistent Baseline: (50 MB/s/TiB baseline, up to 1.3 GB/s/TiB burst)
  • FSx Scratch Baseline: (200 MB/s/TiB baseline, up to 1.3 GB/s/TiB burst)
  • diff --git a/source/soca/cluster_web_ui/views/dashboard.py b/source/soca/cluster_web_ui/views/dashboard.py index 3028d7e8..1056935a 100644 --- a/source/soca/cluster_web_ui/views/dashboard.py +++ b/source/soca/cluster_web_ui/views/dashboard.py @@ -11,6 +11,6 @@ @dashboard.route('/dashboard', methods=['GET']) @login_required def index(): - elastic_search_endpoint = read_secretmanager.get_soca_configuration()['ESDomainEndpoint'] - kibana_url = "https://" + elastic_search_endpoint + "/_plugin/kibana" - return render_template("dashboard.html", kibana_url=kibana_url) \ No newline at end of file + loadbalancer_dns_name = read_secretmanager.get_soca_configuration()['LoadBalancerDNSName'] + kibana_url = "https://" + loadbalancer_dns_name + "/_plugin/kibana" + return render_template("dashboard.html", kibana_url=kibana_url) diff --git a/source/soca/cluster_web_ui/views/my_activity.py b/source/soca/cluster_web_ui/views/my_activity.py index 66421e57..aeba29f9 100644 --- a/source/soca/cluster_web_ui/views/my_activity.py +++ b/source/soca/cluster_web_ui/views/my_activity.py @@ -23,7 +23,7 @@ def index(): start = (datetime.datetime.utcnow() - datetime.timedelta(days=timedelta)).strftime('%Y-%m-%d') user_kibana_url = False - elastic_search_endpoint = read_secretmanager.get_soca_configuration()['ESDomainEndpoint'] + loadbalancer_dns_name = read_secretmanager.get_soca_configuration()['LoadBalancerDNSName'] job_index = "https://" + elastic_search_endpoint + "/_search?q=type:index-pattern%20AND%20index-pattern.title:" + config.Config.KIBANA_JOB_INDEX get_job_index = get(job_index, verify=False) index_id = False @@ -43,9 +43,9 @@ def index(): if index_id is False: flash("Unable to retrieve index ID for {}. To do the initial setup, follow instructions available on https://awslabs.github.io/scale-out-computing-on-aws/analytics/monitor-cluster-activity/".format(config.Config.KIBANA_JOB_INDEX)) - user_kibana_url = "https://" + elastic_search_endpoint + "/_plugin/kibana/" + user_kibana_url = "https://" + loadbalancer_dns_name + "/_plugin/kibana/" else: - user_kibana_url = "https://"+elastic_search_endpoint+"/_plugin/kibana/app/kibana#/discover?_g=(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:'"+start+"T00:00:00.000Z',to:'"+end+"T23:59:59.000Z'))&_a=(columns:!(_source),filters:!(),index:'"+index_id+"',interval:auto,query:(language:kuery,query:'user:"+user+"'),sort:!(!(start_iso,desc)))" + user_kibana_url = "https://"+loadbalancer_dns_name+"/_plugin/kibana/app/kibana#/discover?_g=(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:'"+start+"T00:00:00.000Z',to:'"+end+"T23:59:59.000Z'))&_a=(columns:!(_source),filters:!(),index:'"+index_id+"',interval:auto,query:(language:kuery,query:'user:"+user+"'),sort:!(!(start_iso,desc)))" return render_template('my_activity.html', user_kibana_url=user_kibana_url, diff --git a/source/soca/cluster_web_ui/views/my_files.py b/source/soca/cluster_web_ui/views/my_files.py index fdf67ca3..3de5165b 100644 --- a/source/soca/cluster_web_ui/views/my_files.py +++ b/source/soca/cluster_web_ui/views/my_files.py @@ -245,13 +245,17 @@ def index(): try: for entry in os.scandir(path): if not entry.name.startswith("."): - filesystem[entry.name] = {"path": path + "/" + entry.name, - "uid": encrypt(path + "/" + entry.name, entry.stat().st_size)["message"], - "type": "folder" if entry.is_dir() else "file", - "st_size": convert_size(entry.stat().st_size), - "st_size_default": entry.stat().st_size, - "st_mtime": entry.stat().st_mtime - } + try: + filesystem[entry.name] = {"path": path + "/" + entry.name, + "uid": encrypt(path + "/" + entry.name, entry.stat().st_size)["message"], + "type": "folder" if entry.is_dir() else "file", + "st_size": convert_size(entry.stat().st_size), + "st_size_default": entry.stat().st_size, + "st_mtime": entry.stat().st_mtime + } + except Exception as err: + # most likely symbolic link pointing to wrong location + flash("{} returned an error and cannot be displayed: {}".format(entry.name, err)) cache[CACHE_FOLDER_CONTENT_PREFIX + path] = filesystem except Exception as err: diff --git a/source/soca/cluster_web_ui/views/remote_desktop.py b/source/soca/cluster_web_ui/views/remote_desktop.py index 3f7f61d5..7a20ae35 100644 --- a/source/soca/cluster_web_ui/views/remote_desktop.py +++ b/source/soca/cluster_web_ui/views/remote_desktop.py @@ -54,7 +54,7 @@ def can_launch_instance(launch_parameters): 'Ebs': { 'DeleteOnTermination': True, 'VolumeSize': 30 if launch_parameters["disk_size"] is False else int(launch_parameters["disk_size"]), - 'VolumeType': 'gp2', + 'VolumeType': 'gp3', 'Encrypted': True }, }, @@ -334,6 +334,12 @@ def create(): echo export "SOCA_INSTANCE_TYPE=$GET_INSTANCE_TYPE" >> /etc/environment echo export "SOCA_HOST_SYSTEM_LOG="/apps/soca/''' + str(soca_configuration['ClusterId']) + '''/cluster_node_bootstrap/logs/desktop/''' + str(session["user"]) + '''/''' + session_name + '''/$(hostname -s)"" >> /etc/environment echo export "AWS_DEFAULT_REGION="'''+region+'''"" >> /etc/environment +# Required for proper EBS tagging +echo export "SOCA_JOB_ID="''' + str(session_name) +'''"" >> /etc/environment +echo export "SOCA_JOB_OWNER="''' + str(session["user"]) + '''"" >> /etc/environment +echo export "SOCA_JOB_PROJECT="dcv"" >> /etc/environment +echo export "SOCA_JOB_QUEUE="dcv"" >> /etc/environment + source /etc/environment AWS=$(which aws) # Give yum permission to the user on this specific machine @@ -394,26 +400,24 @@ def create(): /bin/bash /apps/soca/$SOCA_CONFIGURATION/cluster_node_bootstrap/ComputeNode.sh ''' + soca_configuration['SchedulerPrivateDnsName'] + ''' >> $SOCA_HOST_SYSTEM_LOG/ComputeNode.sh.log 2>&1''' - - check_hibernation_support = client_ec2.describe_instance_types( - InstanceTypes=[instance_type], - Filters=[ - {"Name": "hibernation-supported", - "Values": ["true"]}] - ) - logger.info("Checking in {} support Hibernation : {}".format(instance_type, check_hibernation_support)) - if len(check_hibernation_support["InstanceTypes"]) == 0: - if config.Config.DCV_FORCE_INSTANCE_HIBERNATE_SUPPORT is True: - flash("Sorry your administrator limited DCV to instances that support hibernation mode
    Please choose a different type of instance.") - return redirect("/remote_desktop") - else: - hibernate_support = False - else: - hibernate_support = True - - if parameters["hibernate"] and not hibernate_support: - flash("Sorry you have selected {} with hibernation support, but this instance type does not support it. Either disable hibernation support or pick a different instance type".format(instance_type), "error") - return redirect("/remote_desktop") + if parameters["hibernate"]: + try: + check_hibernation_support = client_ec2.describe_instance_types( + InstanceTypes=[instance_type], + Filters=[ + {"Name": "hibernation-supported", + "Values": ["true"]}] + ) + logger.info("Checking in {} support Hibernation : {}".format(instance_type, check_hibernation_support)) + if len(check_hibernation_support["InstanceTypes"]) == 0: + if config.Config.DCV_FORCE_INSTANCE_HIBERNATE_SUPPORT is True: + flash("Sorry your administrator limited DCV to instances that support hibernation mode
    Please choose a different type of instance.") + return redirect("/remote_desktop") + else: + flash("Sorry you have selected {} with hibernation support, but this instance type does not support it. Either disable hibernation support or pick a different instance type".format(instance_type), "error") + return redirect("/remote_desktop") + except ClientError as e: + logger.error(f"Error while checking hibernation support due to {e}") launch_parameters = {"security_group_id": security_group_id, "instance_profile": instance_profile, diff --git a/source/soca/cluster_web_ui/views/remote_desktop_windows.py b/source/soca/cluster_web_ui/views/remote_desktop_windows.py index 934e9f67..ced4ebeb 100644 --- a/source/soca/cluster_web_ui/views/remote_desktop_windows.py +++ b/source/soca/cluster_web_ui/views/remote_desktop_windows.py @@ -46,7 +46,7 @@ def can_launch_instance(launch_parameters): 'Ebs': { 'DeleteOnTermination': True, 'VolumeSize': 30 if launch_parameters["disk_size"] is False else int(launch_parameters["disk_size"]), - 'VolumeType': 'gp2', + 'VolumeType': 'gp3', 'Encrypted': True }, }, @@ -319,33 +319,37 @@ def create(): user_data = user_data.replace("%SOCA_LoadBalancerDNSName%", soca_configuration['LoadBalancerDNSName']) user_data = user_data.replace("%SOCA_LOCAL_USER%", session["user"]) + # required for EBS tagging + user_data = user_data.replace("%SOCA_JOB_ID%", str(session_name)) + user_data = user_data.replace("%SOCA_JOB_OWNER%", session["user"]) + user_data = user_data.replace("%SOCA_JOB_PROJECT%", "dcv") + + if config.Config.DCV_WINDOWS_AUTOLOGON is True: user_data = user_data.replace("%SOCA_WINDOWS_AUTOLOGON%", "true") else: user_data = user_data.replace("%SOCA_WINDOWS_AUTOLOGON%", "false") + if parameters["hibernate"]: + try: + check_hibernation_support = client_ec2.describe_instance_types( + InstanceTypes=[instance_type], + Filters=[ + {"Name": "hibernation-supported", + "Values": ["true"]}] + ) + logger.info("Checking in {} support Hibernation : {}".format(instance_type, check_hibernation_support)) + if len(check_hibernation_support["InstanceTypes"]) == 0: + if config.Config.DCV_FORCE_INSTANCE_HIBERNATE_SUPPORT is True: + flash("Sorry your administrator limited DCV to instances that support hibernation mode
    Please choose a different type of instance.") + return redirect("/remote_desktop_windows") + else: + flash("Sorry you have selected {} with hibernation support, but this instance type does not support it. Either disable hibernation support or pick a different instance type".format(instance_type), "error") + return redirect("/remote_desktop_windows") + except ClientError as e: + logger.error(f"Error while checking hibernation support due to {e}") - check_hibernation_support = client_ec2.describe_instance_types( - InstanceTypes=[instance_type], - Filters=[ - {"Name": "hibernation-supported", - "Values": ["true"]}] - ) - logger.info("Checking in {} support Hibernation : {}".format(instance_type, check_hibernation_support)) - if len(check_hibernation_support["InstanceTypes"]) == 0: - if config.Config.DCV_FORCE_INSTANCE_HIBERNATE_SUPPORT is True: - flash("Sorry your administrator limited DCV to instances that support hibernation mode
    Please choose a different type of instance.") - return redirect("/remote_desktop_windows") - else: - hibernate_support = False - else: - hibernate_support = True - - if parameters["hibernate"] and not hibernate_support: - flash("Sorry you have selected {} with hibernation support, but this instance type does not support it. Either disable hibernation support or pick a different instance type".format(instance_type), "error") - return redirect("/remote_desktop_windows") - launch_parameters = {"security_group_id": security_group_id, "instance_profile": instance_profile, "instance_type": instance_type, diff --git a/source/templates/Analytics.template b/source/templates/Analytics.template index d7270222..c7092d60 100644 --- a/source/templates/Analytics.template +++ b/source/templates/Analytics.template @@ -1,22 +1,19 @@ AWSTemplateFormatVersion: 2010-09-09 Description: (SOCA) - Manage ELK stack Parameters: - SchedulerSecurityGroup: + ComputeNodeSecurityGroup: Type: String - PublicSubnet1: + VpcId: Type: String - ClusterId: - Type: String - - SchedulerPublicIP: + PrivateSubnet1: Type: String - EIPNat: + PrivateSubnet2: Type: String - ClientIp: + ClusterId: Type: String Resources: @@ -28,8 +25,10 @@ Resources: reason: "Domain Name is required if we want to restrict AccessPolicies to this resource only" Type: AWS::Elasticsearch::Domain Properties: - ElasticsearchVersion: 7.4 + ElasticsearchVersion: 7.9 DomainName: !Sub ${ClusterId} + DomainEndpointOptions: + EnforceHTTPS: True NodeToNodeEncryptionOptions: Enabled: True EncryptionAtRestOptions: @@ -51,12 +50,6 @@ Resources: Principal: AWS: '*' Action: 'es:ESHttp*' - Condition: - IpAddress: - aws:SourceIp: - - !Ref ClientIp - - !Sub ${SchedulerPublicIP}/32 - - !Sub ${EIPNat}/32 Resource: !Sub 'arn:${AWS::Partition}:es:${AWS::Region}:${AWS::AccountId}:domain/${ClusterId}/*' AdvancedOptions: @@ -68,25 +61,122 @@ Resources: - Key: soca:ClusterId Value: !Ref ClusterId - # Cloudformation does not support bind of ElasticSearchServiceLinkedRole to ElasticsearchDomain - # Because of this limitation we are restricted to non VPC only + VPCOptions: + SubnetIds: + - !Ref PrivateSubnet1 + - !Ref PrivateSubnet2 + SecurityGroupIds: + - !Ref ComputeNodeSecurityGroup + + GetESPrivateIPLambdaRole: + Metadata: + cfn_nag: + rules_to_suppress: + - id: W11 + reason: "DescribeNetworkInterfaces requires * resource-level permissions" + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - sts:AssumeRole + Policies: + - PolicyName: PreRequisite + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:DeleteLogStream + - logs:PutLogEvents + Resource: + - !Join [ "", [ "arn:", !Ref "AWS::Partition", ":logs:", !Ref "AWS::Region", ":", !Ref "AWS::AccountId", ":log-group:/aws/lambda/", !Ref ClusterId, "*"] ] - #VPCOptions: - # SubnetIds: - # - !Ref PublicSubnet1 - # SecurityGroupIds: - # - !Ref SchedulerSecurityGroup + - Effect: Allow + Action: + - ec2:DescribeNetworkInterfaces + Resource: + - '*' + Condition: + "ForAllValues:ArnEqualsIfExists": + "ec2:Vpc": !Sub "arn:${AWS::Partition}:ec2:${AWS::Region}:*:vpc/${VpcId}" - #ElasticSearchServiceLinkedRole: - # Type: AWS::IAM::ServiceLinkedRole - ## Properties: - # AWSServiceName: es.amazonaws.com - # Description: !Sub Service Link Role for ${ClusterId}-analytics + GetESPrivateIPLambda: + Type: AWS::Lambda::Function + Properties: + Description: Get ES private ip addresses + FunctionName: !Sub "${ClusterId}-GetESPrivateIP" + Handler: index.lambda_handler + MemorySize: 128 + Role: !GetAtt GetESPrivateIPLambdaRole.Arn + Runtime: python3.7 + Timeout: 180 + Tags: + - Key: soca:ClusterId + Value: !Ref ClusterId + Code: + ZipFile: !Sub | + import cfnresponse + import boto3 + import logging + ''' + Get prefix list id + ''' + logging.getLogger().setLevel(logging.INFO) + def lambda_handler(event, context): + try: + logging.info("event: {}".format(event)) + requestType = event['RequestType'] + if requestType == 'Delete': + cfnresponse.send(event, context, cfnresponse.SUCCESS, {}, '') + return + ClusterId = event['ResourceProperties']['ClusterId'] + logging.info("ClusterId: " + ClusterId) + ec2_client = boto3.client('ec2') + response = ec2_client.describe_network_interfaces(Filters=[ + {'Name': 'description', 'Values': ['ES ' + ClusterId]}, + {'Name': 'requester-id', 'Values': ['amazon-elasticsearch']}]) + ipAddresses = [] + for networkInterface in response['NetworkInterfaces']: + logging.debug(networkInterface) + az = networkInterface['AvailabilityZone'] + logging.info("AZ: " + az) + for privateIpAddress in networkInterface['PrivateIpAddresses']: + logging.debug(privateIpAddress) + ipAddress = privateIpAddress['PrivateIpAddress'] + logging.info("ipAddress:" + ipAddress) + ipAddresses.append(ipAddress) + if len(ipAddresses) == 0: + msg = "No IP addresses found" + logging.error(msg) + cfnresponse.send(event, context, cfnresponse.FAILED, {'error': msg}, msg) + else: + ipAddressesStr = ",".join(ipAddresses) + cfnresponse.send(event, context, cfnresponse.SUCCESS, {'IpAddresses': ipAddressesStr}, str(ipAddresses)) + except: + logging.exception("Caught exception") + error_message = 'Exception getting private IP addresses for ES soca-{}'.format(ClusterId) + cfnresponse.send(event, context, cfnresponse.FAILED, {'error': error_message}, error_message) + ESCustomResource: + DependsOn: ElasticsearchDomain + Type: AWS::CloudFormation::CustomResource + Properties: + ServiceToken: !GetAtt GetESPrivateIPLambda.Arn + ClusterId: !Ref ClusterId Outputs: ESDomainArn: Value: !GetAtt ElasticsearchDomain.DomainArn ESDomainEndpoint: Value: !GetAtt ElasticsearchDomain.DomainEndpoint + ESDomainIPAddresses: + Value: !GetAtt ESCustomResource.IpAddresses diff --git a/source/templates/Network.template b/source/templates/Network.template index a1684ec3..6c71ef1f 100644 --- a/source/templates/Network.template +++ b/source/templates/Network.template @@ -127,6 +127,11 @@ Resources: Type: AWS::EC2::EIP Properties: Domain: vpc + Tags: + - Key: Name + Value: !Sub ${ClusterId}-EIPNat + - Key: soca:ClusterId + Value: !Ref ClusterId EIPScheduler: Type: AWS::EC2::EIP @@ -134,7 +139,7 @@ Resources: Domain: vpc Tags: - Key: Name - Value: !Ref ClusterId + Value: !Sub ${ClusterId}-EIPScheduler - Key: soca:ClusterId Value: !Ref ClusterId @@ -261,5 +266,3 @@ Outputs: Value: !Ref EIPNat SchedulerPublicIP: Value: !Ref EIPScheduler - SchedulerPublicIPAllocation: - Value: !GetAtt EIPScheduler.AllocationId diff --git a/source/templates/Scheduler.template b/source/templates/Scheduler.template index 0501cdb7..682964b6 100644 --- a/source/templates/Scheduler.template +++ b/source/templates/Scheduler.template @@ -50,10 +50,6 @@ Parameters: SchedulerPublicIP: Type: String - SchedulerPublicIPAllocation: - Type: String - - Conditions: UseAmazonLinux: !Equals [ !Ref BaseOS, 'amazonlinux2'] @@ -70,7 +66,7 @@ Resources: - DeviceName: !If [UseAmazonLinux, "/dev/xvda", "/dev/sda1"] Ebs: VolumeSize: 150 - VolumeType: gp2 + VolumeType: gp3 Encrypted: true KeyName: !Ref SSHKeyPair diff --git a/source/templates/Security.template b/source/templates/Security.template index e0ba920e..29cc3a36 100644 --- a/source/templates/Security.template +++ b/source/templates/Security.template @@ -22,6 +22,12 @@ Parameters: S3InstallFolder: Type: String + CreateESServiceRole: + Type: String + +Conditions: + CreateESServiceRoleCondition: !Equals [!Ref 'CreateESServiceRole', 'True'] + Resources: SchedulerSecurityGroup: Metadata: @@ -621,6 +627,13 @@ Resources: - acm:AddTagsToCertificate Resource: "*" + ESServiceLinkedRole: + Type: AWS::IAM::ServiceLinkedRole + Condition: CreateESServiceRoleCondition + Properties: + AWSServiceName: es.amazonaws.com + Description: 'ES Role to access resources in SOCA VPC' + Outputs: SchedulerIAMRole: Value: !Ref SchedulerIAMRole diff --git a/source/templates/Viewer.template b/source/templates/Viewer.template index d3cbcb7d..a5a08dc0 100644 --- a/source/templates/Viewer.template +++ b/source/templates/Viewer.template @@ -28,6 +28,9 @@ Parameters: LambdaACMIAMRoleArn: Type: String + ESDomainIPAddresses: + Type: String + Resources: SchedulerELBPolicy: @@ -197,6 +200,23 @@ Resources: - Type: forward TargetGroupArn: !Ref TargetGroupSocaWebUI + ESLoadBalancerListenerRule: + DependsOn: + - HTTPSLoadBalancerListener + - TargetGroupES + Type: AWS::ElasticLoadBalancingV2::ListenerRule + Properties: + Actions: + - Type: forward + TargetGroupArn: !Ref TargetGroupES + Conditions: + - Field: path-pattern + PathPatternConfig: + Values: + - "/_plugin/kibana/*" + ListenerArn: !Ref HTTPSLoadBalancerListener + Priority: 1 + HTTPLoadBalancerListener: Metadata: cfn_nag: @@ -233,6 +253,24 @@ Resources: - Id: !Ref SchedulerInstanceId HealthCheckPath: "/ping" + TargetGroupES: + DependsOn: LoadBalancer + Type: AWS::ElasticLoadBalancingV2::TargetGroup + Properties: + Name: !Sub ${ClusterId}-ES + VpcId: !Ref VpcId + Port: 443 + Protocol: HTTPS + TargetType: ip + Targets: + - Id: !Select [0, !Split [ ",", !Ref ESDomainIPAddresses ] ] + - Id: !Select [1, !Split [ ",", !Ref ESDomainIPAddresses ] ] + - Id: !Select [2, !Split [ ",", !Ref ESDomainIPAddresses ] ] + - Id: !Select [3, !Split [ ",", !Ref ESDomainIPAddresses ] ] + - Id: !Select [4, !Split [ ",", !Ref ESDomainIPAddresses ] ] + - Id: !Select [5, !Split [ ",", !Ref ESDomainIPAddresses ] ] + HealthCheckPath: "/" + Outputs: LoadBalancerArn: Value: !Ref LoadBalancer