diff --git a/.bandit.yaml b/.bandit.yaml new file mode 100644 index 0000000..544f267 --- /dev/null +++ b/.bandit.yaml @@ -0,0 +1,20 @@ +--- +# Bandit config file. +# BEWARE: Bandit does not use any configuration file by default +# so you need to specify it using -c. +# If you have lines in your code triggering vulnerability errors +# and you are certain that this is acceptable, they can be individually +# silenced by appending # nosec to the line: +exclude_dirs: + - .tox + - .git + - .mypy_cache + - .pytest_cache + - .github + - venv + - tests + +# Skip assert inside test files. +assert_used: + skips: + - "*/test_*.py" diff --git a/.github/ISSUE_TEMPLATE/config.yaml b/.github/ISSUE_TEMPLATE/config.yaml new file mode 100644 index 0000000..e944c32 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yaml @@ -0,0 +1,45 @@ +name: Bug Report +description: File a bug report +title: "[Bug]: " +labels: ["bug", "triage"] +body: + - type: textarea + id: what-happened + attributes: + label: I Expect + placeholder: Tell us what you see! + value: |- + + ## When (optional) + + - + - + + ## I Expect + + - + - + + ## Instead + + - + + ## Notes + + - + validations: + required: true + - type: textarea + id: logs + attributes: + label: Relevant log output + description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. + render: shell + - type: checkboxes + id: terms + attributes: + label: Code of Conduct + description: By submitting this issue, you agree to follow our [Code of Conduct](https://example.com) + options: + - label: I agree to follow this project's Code of Conduct + required: true diff --git a/.github/ISSUE_TEMPLATE/issue-simple.md b/.github/ISSUE_TEMPLATE/issue-simple.md new file mode 100644 index 0000000..cc764d5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/issue-simple.md @@ -0,0 +1,29 @@ +--- +name: Minimal issue template +about: Minimal Issue Template +title: '[bug]: ' +labels: '' +assignees: '' +--- + +## When I (optional) + +1. xx +1. xx +1. xx + +## I expect + +- [ ] yy +- [ ] zz + +## Instead + +- +- +- + +## Notes + +Attach sanitized logs, screenshots, outputs. +CC folks diff --git a/.github/POSTMORTEM.md b/.github/POSTMORTEM.md new file mode 100644 index 0000000..b398461 --- /dev/null +++ b/.github/POSTMORTEM.md @@ -0,0 +1,142 @@ +--- +# This is a template for a postmortem reports inspired by +# the teamdigitale's one published on medium.com. +# For the original version, see the references section. +title: Fake Postmortem - Cloud connectivity incident +date: 2018-05-23 +summary: >- + Fake Postmortem inspired by the following: The Digital Team's websites were unreachable for 28 hours due to a cloud provider + outage. +authors: +- name: Mario Rossi +- name: Franco Bianchi +references: +- https://medium.com/team-per-la-trasformazione-digitale/document-postmortem-technology-italian-government-public-administration-99639a0a7877 +- https://abseil.io/resources/swe-book/html/ch02.html#blameless_postmortem_culture +glossary: {} +keywords: [] +... +--- +# Postmortem - Template for a postmortem report + +## Summary + +**Impact**: + +The following services cannot be reached: + +- Dashboard Team +- Three-Year ICT Plan +- Designers Italia +- Developers Italia +- Docs Italia +- Forum Italia + +**Duration**: +28 hours + +**Cause**: +OpenStack network outage - cloud provider _Cloud SPC Lotto 1_ + +## Context + +The Digital Team's websites are based mainly on static HTML generated by the source content of the repositories on GitHub. The HTML code is published via a web server (nginx) and exposed according to HTTPS protocol. Forum Italia (http://forum.italia.it) is the only exception to this deployment model, and is managed separately via Docker containers. At any given time, one or more web servers can be deployed on the cloud provider's (Cloud SPC Lotto 1) OpenStack virtual machines, using the API provided by the platform. + +Cloud resources (virtual machines and volume data) are allocated towards services according to the Agency for Digital Italy's Cloud SPC contract. + +## Impact and damage assessment + +On 19/05/2018, the following services became unreachable due to an internal connectivity issue of the Cloud Service Provider "Cloud SPC": + +- Dashboard Team +- Three-Year ICT Plan +- Designers Italia +- Developers Italia +- Docs Italia +- Forum Italia + +## Causes and Contributing Factors + +According to a postmortem document released by the supplier on 2018-06-07, the interruption of connectivity experienced by the 31 users (tenants) of the SPC Cloud service was triggered by a planned update of the OpenStack platform carried out on the night of Thursday 2018-05-17. + +### Detection + +The problem was detected the following morning (2018-05-18), thanks to reports from users who were no longer able to access the services provided on the Cloud SPC platform. + +### Causes + +The document states that a restart of the control nodes of the OpenStack platform (nodes that handle OpenStack's management services: neutron, glance, cinder, etc.) caused “an anomaly” in the network infrastructure, blocking the traffic on several computing nodes (nodes where virtual instances are executed), and causing virtual machines belonging to 31 users to become unreachable. +The postmortem document also explains how a bug in the playbook (update script) would have blocked network activities by modifying the permissions of the file `/var/run/neutron/lock/neutron-iptables`, as indicated in the platform's official documentation. + +Again, according to the supplier, restarting the nodes was necessary for the application of security updates for Meltdown and Spectre (CVE-2017-5715, CVE-2017-5753 and CVE-2017-5754). + +The unavailability of the Cloud SPC infrastructure was undoubtedly the root cause of the problem, but the lack of an application-level protection mechanism for the Digital Team's services prolonged their unavailability. +Indeed, due to the fact that the possibility of the entire cloud provider becoming unreachable had not been taken into account during the design phase of the services, it was not possible to respond adequately to this event. +Despite the SPC Cloud provider's failover mechanisms, the web services were not protected from generalized outages capable of undermining the entire infrastructure of the only Cloud provider at our disposal. + +## Actions taken + +WRITEME: A list of action items taken to mitigate/fix the problem + +- * Action 1 + * Owner +- * Action 2 + * Owner +... + +## Preventive actions + +WRITEME: A list of action items to prevent this from happening again + + + +## Lessons learned + +### What went wrong + +The Cloud SPC platform cannot currently distribute virtual machines through data centers or different regions (OpenStack region). +It would have been useful to be able to distribute virtual resources through independent infrastructures, even infrastructures provided by the same supplier. + +### What should have been done + +In hindsight, the Public Administration should have access to multiple cloud providers, so as to ensure the resilience of its services even when the main cloud provider is interrupted. + +### Where we got lucky + +WRITEME: What things went right that could have gone wrong + +### What should we do differently next time + +The most important lesson we learned from this experience is the need to continue investing in the development of a cross-platform, multi-supplier Cloud model. +This model would guarantee the reliability of Public Administration services even when the main cloud provider becomes affected by problems that make it unreachable for a long period of time. + +## Timeline + +A timeline of the event, from discovery through investigation to resolution. +All times are in CEST. + +### 2018-05-17 + +22.30 CEST: The SPC MaaS alert service sends alerts through email indicating that several nodes can no longer be reached. + +### 2018-05-19 + +6:50 CEST: The aforementioned services, available at the IP address 91.206.129.249, can no longer be reached + +### 2018-05-19 + +08:00 CEST: The problem is detected and reported to the supplier + +09:30 CEST: The machines are determined to be accessible through OpenStack's administration interface (API and GUI) and internal connectivity reveals no issue. Virtual machines can communicate through the tenant's private network, but do not connect to the Internet. + +15:56 CEST: The Digital Team sends the supplier and CONSIP a help request via email + +18:00 CEST: The supplier communicates that they have identified the problem, which turns out to be the same problem experienced by the DAF project, and commence work on a manual workaround + +19:00 CEST: The supplier informs us that a fix has been produced and that it will be applied to the virtual machines belonging to the 31 public administrations (tenants) involved. + +### 2018-05-20 + +11:10 CEST: The supplier restores connectivity to the VMs of the AgID tenant + +11:30 CEST: The Digital Team reboots the web services and the sites are again reachable diff --git a/.github/PULL_REQUEST_TEMPLATE/pr-simple.md b/.github/PULL_REQUEST_TEMPLATE/pr-simple.md new file mode 100644 index 0000000..a2f5db1 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/pr-simple.md @@ -0,0 +1,13 @@ +## This PR + +- [ ] +- [ ] +- [ ] + +## It's done + +- Rationale of the implementation + +## Checks + +- [ ] This PR conforms the [CONTRIBUTING.md](CONTRIBUTING.md) guidelines diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000..a135621 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,32 @@ +# Run the SuperLinter action with some custom setup. + +name: Lint + +on: + push: + branches: ["main"] + pull_request: + branches: [ "main" ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v2 + + - name: Super-Linter + uses: github/super-linter@v3.15.5 + env: + VALIDATE_MARKDOWN: false + # Disabled for conflicts with the isort version used in pre-commit + # you can re-enable it if you align your local isort with + # the one in the super-linter image. + VALIDATE_PYTHON_ISORT: false + VALIDATE_XML: false + VALIDATE_NATURAL_LANGUAGE: false diff --git a/.github/workflows/security-bandit.yml b/.github/workflows/security-bandit.yml new file mode 100644 index 0000000..e357e36 --- /dev/null +++ b/.github/workflows/security-bandit.yml @@ -0,0 +1,54 @@ +# This is a basic workflow to help you get started with Actions + +name: "security-bandit" + +# Controls when the action will run. Triggers the workflow on push or pull request +# events but only for the master branch +on: + push: + branches: [ "main" ] + paths-ignore: + - "ISSUE_TEMPLATE/**" + pull_request: + branches: [ "main" ] + paths-ignore: + - "ISSUE_TEMPLATE/**" + +permissions: read-all + +jobs: + build: + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v2 + + # Runs a single command using the runners shell + - name: Python security check using Bandit + uses: ioggstream/bandit-report-artifacts@v1.7.4 + with: + project_path: . + config_file: .bandit.yaml + + super-sast: + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@v3 + - name: Test + run: | + echo UID=$(id -u) >> .env + docker run --rm --user=$(id -u) \ + -v $PWD:/code \ + -w /code \ + -e MAVEN_OPTS=" -ntp " \ + -e RUN_OWASP_DEPENDENCY_CHECK=false \ + -e RUN_SPOTBUGS_CHECK=false \ + -e RUN_SPOTLESS_CHECK=false \ + -e RUN_SPOTLESS_APPLY=true \ + -e HOME=/tmp \ + -e USER=nobody \ + -e BANDIT_CONFIG_FILE=/code/.bandit.yaml \ + ghcr.io/par-tec/super-sast:latest diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..f19c9fb --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,77 @@ +# This workflow template runs: +# - a tox container with tests +# - a service container (eg. a database) to be used by tox tests. + +name: Test + +# Controls when the action will run. +on: + # Triggers the workflow on push or pull request events but only for the main branch + push: + branches: [ main ] + pull_request: + branches: [ main ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + + test-tox-job: + # The type of runner that the job will run on + runs-on: ubuntu-latest + container: python:3.9-slim + + # This stanza deploys a service container with + # the "rabbit" hostname. This is commented + # to save build time. Uncomment it if you need + # it! + # services: + # rabbit: + # image: rabbitmq:3-management + # ports: + # - 5672:5672 + + # ...then run the tox jobs referencing + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + # IMPORTANT!! By default `actions/checkout` just checkouts HEAD, so if you want + # to checkout tags and branches too (eg. to auto-version your deployments) + # you need to pass the `fetch-depth: 0` option. eg + # + # uses: actions/checkout@v2 + # with: + # fetch-depth: 0 + - uses: actions/checkout@v2 + + - name: Run tests. + run: | + pip3 install tox + tox + test-pre-commit: + # The type of runner that the job will run on + runs-on: ubuntu-latest + container: python:3.9 + steps: + - uses: actions/checkout@v2 + + - name: Run commit hooks. + run: | + pip3 --no-cache-dir install pre-commit + git --version + pwd + ls -la + id + git config --global --add safe.directory $PWD + pre-commit install + pre-commit run -a + + # Store (expiring) logs on failure. + # Retrieve artifacts via `gh run download`. + - uses: actions/upload-artifact@v3 + if: failure() + with: + name: pre-commit.log + path: /github/home/.cache/pre-commit/pre-commit.log + retention-days: 5 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f872e93 --- /dev/null +++ b/.gitignore @@ -0,0 +1,132 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# IDE +.vscode/ diff --git a/.gitlab/POSTMORTEM.md b/.gitlab/POSTMORTEM.md new file mode 100644 index 0000000..b398461 --- /dev/null +++ b/.gitlab/POSTMORTEM.md @@ -0,0 +1,142 @@ +--- +# This is a template for a postmortem reports inspired by +# the teamdigitale's one published on medium.com. +# For the original version, see the references section. +title: Fake Postmortem - Cloud connectivity incident +date: 2018-05-23 +summary: >- + Fake Postmortem inspired by the following: The Digital Team's websites were unreachable for 28 hours due to a cloud provider + outage. +authors: +- name: Mario Rossi +- name: Franco Bianchi +references: +- https://medium.com/team-per-la-trasformazione-digitale/document-postmortem-technology-italian-government-public-administration-99639a0a7877 +- https://abseil.io/resources/swe-book/html/ch02.html#blameless_postmortem_culture +glossary: {} +keywords: [] +... +--- +# Postmortem - Template for a postmortem report + +## Summary + +**Impact**: + +The following services cannot be reached: + +- Dashboard Team +- Three-Year ICT Plan +- Designers Italia +- Developers Italia +- Docs Italia +- Forum Italia + +**Duration**: +28 hours + +**Cause**: +OpenStack network outage - cloud provider _Cloud SPC Lotto 1_ + +## Context + +The Digital Team's websites are based mainly on static HTML generated by the source content of the repositories on GitHub. The HTML code is published via a web server (nginx) and exposed according to HTTPS protocol. Forum Italia (http://forum.italia.it) is the only exception to this deployment model, and is managed separately via Docker containers. At any given time, one or more web servers can be deployed on the cloud provider's (Cloud SPC Lotto 1) OpenStack virtual machines, using the API provided by the platform. + +Cloud resources (virtual machines and volume data) are allocated towards services according to the Agency for Digital Italy's Cloud SPC contract. + +## Impact and damage assessment + +On 19/05/2018, the following services became unreachable due to an internal connectivity issue of the Cloud Service Provider "Cloud SPC": + +- Dashboard Team +- Three-Year ICT Plan +- Designers Italia +- Developers Italia +- Docs Italia +- Forum Italia + +## Causes and Contributing Factors + +According to a postmortem document released by the supplier on 2018-06-07, the interruption of connectivity experienced by the 31 users (tenants) of the SPC Cloud service was triggered by a planned update of the OpenStack platform carried out on the night of Thursday 2018-05-17. + +### Detection + +The problem was detected the following morning (2018-05-18), thanks to reports from users who were no longer able to access the services provided on the Cloud SPC platform. + +### Causes + +The document states that a restart of the control nodes of the OpenStack platform (nodes that handle OpenStack's management services: neutron, glance, cinder, etc.) caused “an anomaly” in the network infrastructure, blocking the traffic on several computing nodes (nodes where virtual instances are executed), and causing virtual machines belonging to 31 users to become unreachable. +The postmortem document also explains how a bug in the playbook (update script) would have blocked network activities by modifying the permissions of the file `/var/run/neutron/lock/neutron-iptables`, as indicated in the platform's official documentation. + +Again, according to the supplier, restarting the nodes was necessary for the application of security updates for Meltdown and Spectre (CVE-2017-5715, CVE-2017-5753 and CVE-2017-5754). + +The unavailability of the Cloud SPC infrastructure was undoubtedly the root cause of the problem, but the lack of an application-level protection mechanism for the Digital Team's services prolonged their unavailability. +Indeed, due to the fact that the possibility of the entire cloud provider becoming unreachable had not been taken into account during the design phase of the services, it was not possible to respond adequately to this event. +Despite the SPC Cloud provider's failover mechanisms, the web services were not protected from generalized outages capable of undermining the entire infrastructure of the only Cloud provider at our disposal. + +## Actions taken + +WRITEME: A list of action items taken to mitigate/fix the problem + +- * Action 1 + * Owner +- * Action 2 + * Owner +... + +## Preventive actions + +WRITEME: A list of action items to prevent this from happening again + + + +## Lessons learned + +### What went wrong + +The Cloud SPC platform cannot currently distribute virtual machines through data centers or different regions (OpenStack region). +It would have been useful to be able to distribute virtual resources through independent infrastructures, even infrastructures provided by the same supplier. + +### What should have been done + +In hindsight, the Public Administration should have access to multiple cloud providers, so as to ensure the resilience of its services even when the main cloud provider is interrupted. + +### Where we got lucky + +WRITEME: What things went right that could have gone wrong + +### What should we do differently next time + +The most important lesson we learned from this experience is the need to continue investing in the development of a cross-platform, multi-supplier Cloud model. +This model would guarantee the reliability of Public Administration services even when the main cloud provider becomes affected by problems that make it unreachable for a long period of time. + +## Timeline + +A timeline of the event, from discovery through investigation to resolution. +All times are in CEST. + +### 2018-05-17 + +22.30 CEST: The SPC MaaS alert service sends alerts through email indicating that several nodes can no longer be reached. + +### 2018-05-19 + +6:50 CEST: The aforementioned services, available at the IP address 91.206.129.249, can no longer be reached + +### 2018-05-19 + +08:00 CEST: The problem is detected and reported to the supplier + +09:30 CEST: The machines are determined to be accessible through OpenStack's administration interface (API and GUI) and internal connectivity reveals no issue. Virtual machines can communicate through the tenant's private network, but do not connect to the Internet. + +15:56 CEST: The Digital Team sends the supplier and CONSIP a help request via email + +18:00 CEST: The supplier communicates that they have identified the problem, which turns out to be the same problem experienced by the DAF project, and commence work on a manual workaround + +19:00 CEST: The supplier informs us that a fix has been produced and that it will be applied to the virtual machines belonging to the 31 public administrations (tenants) involved. + +### 2018-05-20 + +11:10 CEST: The supplier restores connectivity to the VMs of the AgID tenant + +11:30 CEST: The Digital Team reboots the web services and the sites are again reachable diff --git a/.gitlab/issue_templates/issue-simple.md b/.gitlab/issue_templates/issue-simple.md new file mode 100644 index 0000000..4c53049 --- /dev/null +++ b/.gitlab/issue_templates/issue-simple.md @@ -0,0 +1,21 @@ +## When I (optional) + +1. xx +1. xx +1. xx + +## I expect + +- [ ] yy +- [ ] zz + +## Instead + +- +- +- + +## Notes + +Attach sanitized logs, screenshots, outputs. +CC folks diff --git a/.gitlab/merge_request_templates/pr-simple.md b/.gitlab/merge_request_templates/pr-simple.md new file mode 100644 index 0000000..a2f5db1 --- /dev/null +++ b/.gitlab/merge_request_templates/pr-simple.md @@ -0,0 +1,13 @@ +## This PR + +- [ ] +- [ ] +- [ ] + +## It's done + +- Rationale of the implementation + +## Checks + +- [ ] This PR conforms the [CONTRIBUTING.md](CONTRIBUTING.md) guidelines diff --git a/.gitlab/workflows/lint.yml b/.gitlab/workflows/lint.yml new file mode 100644 index 0000000..a135621 --- /dev/null +++ b/.gitlab/workflows/lint.yml @@ -0,0 +1,32 @@ +# Run the SuperLinter action with some custom setup. + +name: Lint + +on: + push: + branches: ["main"] + pull_request: + branches: [ "main" ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v2 + + - name: Super-Linter + uses: github/super-linter@v3.15.5 + env: + VALIDATE_MARKDOWN: false + # Disabled for conflicts with the isort version used in pre-commit + # you can re-enable it if you align your local isort with + # the one in the super-linter image. + VALIDATE_PYTHON_ISORT: false + VALIDATE_XML: false + VALIDATE_NATURAL_LANGUAGE: false diff --git a/.gitlab/workflows/security-bandit.yml b/.gitlab/workflows/security-bandit.yml new file mode 100644 index 0000000..e357e36 --- /dev/null +++ b/.gitlab/workflows/security-bandit.yml @@ -0,0 +1,54 @@ +# This is a basic workflow to help you get started with Actions + +name: "security-bandit" + +# Controls when the action will run. Triggers the workflow on push or pull request +# events but only for the master branch +on: + push: + branches: [ "main" ] + paths-ignore: + - "ISSUE_TEMPLATE/**" + pull_request: + branches: [ "main" ] + paths-ignore: + - "ISSUE_TEMPLATE/**" + +permissions: read-all + +jobs: + build: + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v2 + + # Runs a single command using the runners shell + - name: Python security check using Bandit + uses: ioggstream/bandit-report-artifacts@v1.7.4 + with: + project_path: . + config_file: .bandit.yaml + + super-sast: + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@v3 + - name: Test + run: | + echo UID=$(id -u) >> .env + docker run --rm --user=$(id -u) \ + -v $PWD:/code \ + -w /code \ + -e MAVEN_OPTS=" -ntp " \ + -e RUN_OWASP_DEPENDENCY_CHECK=false \ + -e RUN_SPOTBUGS_CHECK=false \ + -e RUN_SPOTLESS_CHECK=false \ + -e RUN_SPOTLESS_APPLY=true \ + -e HOME=/tmp \ + -e USER=nobody \ + -e BANDIT_CONFIG_FILE=/code/.bandit.yaml \ + ghcr.io/par-tec/super-sast:latest diff --git a/.gitlab/workflows/test.yml b/.gitlab/workflows/test.yml new file mode 100644 index 0000000..f19c9fb --- /dev/null +++ b/.gitlab/workflows/test.yml @@ -0,0 +1,77 @@ +# This workflow template runs: +# - a tox container with tests +# - a service container (eg. a database) to be used by tox tests. + +name: Test + +# Controls when the action will run. +on: + # Triggers the workflow on push or pull request events but only for the main branch + push: + branches: [ main ] + pull_request: + branches: [ main ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + + test-tox-job: + # The type of runner that the job will run on + runs-on: ubuntu-latest + container: python:3.9-slim + + # This stanza deploys a service container with + # the "rabbit" hostname. This is commented + # to save build time. Uncomment it if you need + # it! + # services: + # rabbit: + # image: rabbitmq:3-management + # ports: + # - 5672:5672 + + # ...then run the tox jobs referencing + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + # IMPORTANT!! By default `actions/checkout` just checkouts HEAD, so if you want + # to checkout tags and branches too (eg. to auto-version your deployments) + # you need to pass the `fetch-depth: 0` option. eg + # + # uses: actions/checkout@v2 + # with: + # fetch-depth: 0 + - uses: actions/checkout@v2 + + - name: Run tests. + run: | + pip3 install tox + tox + test-pre-commit: + # The type of runner that the job will run on + runs-on: ubuntu-latest + container: python:3.9 + steps: + - uses: actions/checkout@v2 + + - name: Run commit hooks. + run: | + pip3 --no-cache-dir install pre-commit + git --version + pwd + ls -la + id + git config --global --add safe.directory $PWD + pre-commit install + pre-commit run -a + + # Store (expiring) logs on failure. + # Retrieve artifacts via `gh run download`. + - uses: actions/upload-artifact@v3 + if: failure() + with: + name: pre-commit.log + path: /github/home/.cache/pre-commit/pre-commit.log + retention-days: 5 diff --git a/.isort.cfg b/.isort.cfg new file mode 100644 index 0000000..d82f21e --- /dev/null +++ b/.isort.cfg @@ -0,0 +1,5 @@ +# +# Align isort profile with black. +# +[tool.isort] +profile = "black" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..49de7b4 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,64 @@ +# +# Run pre-commit hooks. You can run them without installing +# the hook with +# +# $ pre-commit run --all-files +# +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-xml + - id: detect-private-key + - id: check-yaml + args: [--allow-multiple-documents] + - id: check-added-large-files +- repo: https://github.com/myint/autoflake + rev: v2.2.1 + hooks: + - id: autoflake + args: + - --in-place + - --remove-unused-variables + - --remove-all-unused-imports +- repo: https://github.com/psf/black + rev: 24.1.1 + hooks: + - id: black +- repo: https://github.com/pycqa/isort + rev: 5.13.2 + hooks: + - id: isort + name: isort (python) + # Use black profile for isort to avoid conflicts + # see https://github.com/PyCQA/isort/issues/1518 + args: ["--profile", "black"] + - id: isort + name: isort (cython) + types: [cython] + - id: isort + name: isort (pyi) + types: [pyi] +- repo: https://github.com/PyCQA/flake8 + rev: 7.0.0 + hooks: + - id: flake8 +- repo: https://github.com/PyCQA/bandit + rev: 1.7.7 + hooks: + - id: bandit + name: bandit + args: ["-c", ".bandit.yaml"] + description: 'Bandit is a tool for finding common security issues in Python code' + entry: bandit + language: python + language_version: python3 + types: [python] +- repo: https://github.com/Lucas-C/pre-commit-hooks-safety + rev: v1.3.3 + hooks: + - id: python-safety-dependencies-check diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..d18d77b --- /dev/null +++ b/.pylintrc @@ -0,0 +1,8 @@ +# .pylintr + +[TYPECHECK] +# +# Click mutates function signatures: +# see https://click.palletsprojects.com/en/8.1.x/ +# +signature-mutators=click.decorators.option diff --git a/.yamllint b/.yamllint new file mode 100644 index 0000000..d83181d --- /dev/null +++ b/.yamllint @@ -0,0 +1,15 @@ +# +# yamllint configuration files. It disables some checks to ease the integration +# with other yaml tools (eg. pre-commit autoformatter, ...) +# +extends: default + +rules: + document-end: disable + document-start: disable + truthy: disable + brackets: disable + line-length: + max: 90 + indentation: + indent-sequences: consistent diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..ca2d931 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,114 @@ +# CONTRIBUTING + +To contribute to this repository, please follow the guidelines below. + +## pre-commit + +Pre-commit checks your files before committing. It can lint, format or do +other checks on them. + +Once you install it via + + pip3 install pre-commit --user + +You can run it directly via + + pre-commit run --all-files + +Or install it as a pre-commit hook + + pre-commit install + +## Making a PR + +Contributing to a repository is done via pull requests (PR). +It is important to keep the code base clean and consistent in time, +in order to make it maintanable +and reduce unuseful deployments (see [CI](#ci)). + +A correct development process, with code reviews, is part of a correct +shift-left strategy. + +Following this procedure will help you to make a clean PR. +Each PR should be associated with an issue and a branch; +if the PR already exists, you can just start working from it. + +1. If there's no issue for your PR, create one where you describe the expected behavior and the current behavior; +1. If you are not a member of the organization, fork the repository and fetch from both your fork and the origin + + GH=ioggstream # use your github username + git clone -o par-tec https://github.com/par-tec/python-cookiecutter + cd python-cookiecutter + git remote add origin git@github.com:${GH}/python-cookiecutter.git + +1. Create a branch for your PR fetching from the main branch, using your username and issue-number as branch name. + Before checkout, make sure you have the latest version of the `par-tec/main` branch. + + ISSUE=123 # use the issue number + BRANCH=${GH}-${ISSUE} + git fetch --all + git checkout -b ${BRANCH} par-tec/main + + If the PR already exists, you can continue to work on it, always fetching the latest version + and ensuring that your working copy is up to date. Otherwise, you risk to work waste time + resolving conflicts. + + git fetch --all # Always download latest changes + git checkout par-tec/${BRANCH} + +1. Make your changes (this includes [pre-commit checks](#pre-commit)) and review them when adding. + This is an important and overlooked step, especially when + you are working alone or on a large PR. Moreover this allows you to split your changes in multiple commits + or to discard some of changes that you still want to temporarily keep in your working directory. + + git add -p + +1. You can now commit them. If your PR fixes the issue, + the commit message should start with `Fix: #ISSUE` where `ISSUE` is the issue number. + Otherwise, a reference to the issue can be added in the commit message body. + + git add . + git commit -m "Fix: #$ISSUE. Brief description of the changes." + + If the PR does not fix the issue, you can always reference it + in the commit messages. + + git commit -m "Brief description of the changes. See #ISSUE." + +1. Now you can push the branch and create the PR. + If your branch is published on your fork, you can create the PR directly + from github. + + git push origin ${BRANCH} + + When opening the PR from the web interface, please indicate: + + - if the PR is a draft one, prefixing it with the `WIP:` string + or using the **draft PR** functionality of github; + - the target branch, e.g. `par-tec/main`; + - what has been done, including the fixed issues (e.g. `Fix: #123`); + - when useful, describe the solution. + + If the PR is not ready to merge, you can still: + + - notify your colleagues tagging them (e.g. `CC: @ioggstream`); + - ask for a review if you have the associated permissions + (e.g. "Add reviewers" on github); + - proof-read it from the code-hosting platform WebUI, tag colleagues + or [suggest changes](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/incorporating-feedback-in-your-pull-request) + + This project requires that PRs are rebased before being merged, + in order to ensure a clear history. + Further information on rebasing and merging is available on + the [Linux kernel website](https://docs.kernel.org/maintainer/rebasing-and-merging.html) + and on [Atlassian](https://www.atlassian.com/git/tutorials/merging-vs-rebasing). + +1. Once the PR is merged, you can delete your local and remote branches, + and fetch the latest version from the upstream repository. + The code-hosting platform can be configured to automatically remove + remote branches automatically after merge. + +## CI + +Each PR is tested by a CI workflow that runs on GitHub Actions. +The final step might include a deployment to PyPI or to an OCI image registry. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..16de419 --- /dev/null +++ b/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2021, Par-Tec Spa +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..56b9d40 --- /dev/null +++ b/README.md @@ -0,0 +1,52 @@ +# Python Cookiecutter repository + +Python template repository including boilerplate workflows and CI. + +```bash +.bandit.yaml +.pre-commit-config.yaml +.github +└── workflows +``` + +## Creating a new project + +The name of a new project should be descriptive and short. +The repository name should be in [kebab-case](https://it.wikipedia.org/wiki/Kebab_case), string, e.g., `python-cookiecutter`, +`api-onboarding`. +Avoid CamelCase or underscores: you can use them for OOP classes or properties. + +## Contributing + +Please, see [CONTRIBUTING.md](CONTRIBUTING.md) for more details on: + +- using [pre-commit](CONTRIBUTING.md#pre-commit); +- following the git flow and making good [pull requests](CONTRIBUTING.md#making-a-pr). + +## Using this repository + +You can create new projects starting from this repository, +so you can use a consistent CI and checks for different projects. + +Besides all the explanations in the [CONTRIBUTING.md](CONTRIBUTING.md) file, you can use the docker-compose file +(e.g. if you prefer to use docker instead of installing the tools locally) + +```bash +docker-compose run pre-commit +``` + +## Testing github actions + +Tune the Github pipelines in [.github/workflows](.github/workflows/). + +To speed up the development, you can test the pipeline with [act](https://github.com/nektos/act). +Installing `act` is beyond the scope of this document. + +To test the pipeline locally and ensure that secrets (e.g., service accounts and other credentials) +are correctly configured, use: + + ```bash + # Run a specific job in the pipeline + act -j test -s CI_API_TOKEN="$(cat gh-ci.json)" \ + -s CI_ACCOUNT=my-secret-account + ``` diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 0000000..1a1ee8f --- /dev/null +++ b/TESTING.md @@ -0,0 +1,48 @@ +# Considerations for the setup of the development environment + +When setting up a development environment, clarify +which components should be verified during the development phase? + +Project tend to grow complex and it is not always easy to understand which components are involved in the test. +For example, a modern application relies on many moving parts, including container orchestrators, API gateways, datastores & queues, credentials management, and so on. + + +```mermaid +graph LR + +classDef dev stroke: red +classDef ctx stroke: lightgreen + +subgraph sut ["In red: the system under development"] +direction LR + +load-balancer((Load\nBalancer)) +backend[[Application]]:::dev +datastore[(Datastore)] +backend-services[[Back End Services]] +vault[(Vault)] +authnz([Authorization &\n Authentication]) + +Client --> load-balancer --> backend --> datastore & backend-services +Client & backend --> authnz +backend & backend-services --> vault + +end +``` + +Containerization helps in recreating enviroments that are similar to production, +and avoid to implement mocks: see [Prefer Realism Over Isolation](https://abseil.io/resources/swe-book/html/ch13.html#prefer_realism_over_isolation). +This is the reason why this repository contains a `docker-compose.yaml` file that can be used to run the application locally. + +Yet, production-like setups can be challenging to maintain in development environments. +Some of the causes are: + +- the increased deployment time and latency caused by the major number of components; +- the number of moving parts that could lead to flaky tests or unpredictable results (e.g., network issues, shortage of computational resource, ...); +- the need to maintain a large number of configurations and credentials. + +It is then key to identify the components that are relevant for the test during the development phase. At the same time, more comprehensive tests can be run in a specific continuous integration environment, where tests can be retried more easily. + +You may want to create different Compose files for different purposes: +for example a simple one for local development running a non-replicated datastore and an in-memory Hashicorp Vault instance; +and a more complex one (or a kubernetes manifest file) with all the bits for a more realistic setup. diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..13064b5 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,35 @@ +# +# Run this file using +# +# $ docker-compose up processor +# +version: "3.9" +services: + pre-commit: + build: + context: . + dockerfile: tests/Dockerfile.pre-commit + working_dir: /code + volumes: + - .:/code + super-linter: + image: github/super-linter:v3.15.5 + environment: + - RUN_LOCAL=true + - VALIDATE_MARKDOWN=false + - VALIDATE_PYTHON_ISORT=false + - VALIDATE_XML=false + - VALIDATE_NATURAL_LANGUAGE=false + - IGNORE_GITIGNORED_FILES=true + volumes: + - .:/tmp/lint/ + - ./tox.ini:/action/lib/.automation/.flake8 + # This container will run successfully + # only if all dependencies return no errors. + test: + image: busybox + depends_on: + pre-commit: + condition: service_completed_successfully + super-linter: + condition: service_completed_successfully diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..e1c44e2 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,2 @@ +# Further requirements file for testing safety. +pytest diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/Dockerfile.pre-commit b/tests/Dockerfile.pre-commit new file mode 100644 index 0000000..1562a3c --- /dev/null +++ b/tests/Dockerfile.pre-commit @@ -0,0 +1,10 @@ +# +# Self-baked pre-commit docker image. +# +FROM python:3 +RUN useradd noop -m +USER noop +RUN pip3 --no-cache-dir install --user \ + tox==4.3.5 \ + pre-commit==3.0.0 +ENTRYPOINT ["/home/noop/.local/bin/pre-commit", "run", "-a"] diff --git a/tests/test_noop.py b/tests/test_noop.py new file mode 100644 index 0000000..ee76f38 --- /dev/null +++ b/tests/test_noop.py @@ -0,0 +1,2 @@ +def test_noop(): + assert True diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..dcbe1fb --- /dev/null +++ b/tox.ini @@ -0,0 +1,79 @@ +[tox] +envlist = py3, safety + +# By default, we do not publish a module. +skipsdist=True + +[testenv] +deps = + -rrequirements.txt + -rrequirements-dev.txt + +# Uncomment here to set an extra PIP_INDEX_URL +# setenv = +# PIP_EXTRA_INDEX_URL = https://mypypiserver.org + +setenv = + PYTHONPATH=:.: + +# To show pytest logs in console, use +# tox -- --log-cli-level=DEBUG +commands = + pytest {posargs} + +[testenv:safety] +# Tune up this section (e.g., if safety does not use requirement files.) +deps = + -rrequirements.txt + -rrequirements-dev.txt + safety + +setenv = + PYTHONPATH=:.: + +commands = + safety check --short-report -r requirements.txt + +[testenv:release] +# Release with tox via: +# +# tox -e release -- $PARAMETERS +# +# passing the repo references you would set via +# twine, eg: +# --repository-url https://test.pypi.org/legacy/ +# +# To pass +deps = + build + twine + wheel + +# Limit TWINE_* to this section. +passenv = + TWINE_USERNAME + TWINE_PASSWORD + TWINE_REPOSITORY_URL + +commands = +# rm dist -rf + python -m build # sdist, or whatever + twine upload {posargs} dist/* + + +# Uncomment the following section if you want to +# test the installation on the test pypi +# [testenv:test-release] +#commands = +# pip install --index-url=https://test.pypi.org/simple + +# +# Tools configuration. +# +[flake8] +# Ignore long lines in flake8 because +# they are managed by black and we +# want to support links. +max-line-length = 150 +# Disable E203 because black correctly handles whitespaces before ':'. +extend-ignore = E203