diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..8648d98 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,149 @@ +# Contributing + +# CLI + +## Install + +```sh +➜ git clone git@github.com:calypr/backup-service + +➜ cd backup-service + +➜ python3 -m venv venv && source venv/bin/activate + +➜ pip install -r requirements.txt + +➜ pip install -e . + +➜ which bak +./venv/bin/bak + +➜ bak --help +Usage: bak [OPTIONS] COMMAND [ARGS]... + +Options: + --version Show the version and exit. + -v, --verbose, --debug Enable debug logging. + --help Show this message and exit. + +Commands: + grip (gp) Commands for GRIP backups. + pg (pg) Commands for Postgres backups. + s3 Commands for S3. +``` + +## PostgreSQL + +| Command | Example | +|-------------|------------------| +| List Tables | `bak pg ls` | +| Backup | `bak pg dump` | +| Restore | `bak pg restore` | + +## GRIP + +| Command | Example | +|-------------|--------------------| +| List Graphs | `bak pg ls` | +| Backup | `bak grip backup` | +| Restore | `bak grip restore` | + +## S3 + +| Command | Example | +|--------------|-------------------| +| List backups | `bak pg ls` | +| Upload | `bak s3 upload` | +| Download | `bak s3 download` | + +# Helm + +```sh +➜ helm repo add ohsu https://ohsu-comp-bio.github.io/helm-charts +"ohsu" has been added to your repositories + +➜ helm repo update ohsu +Update Complete. ⎈Happy Helming!⎈ + +➜ helm search repo ohsu +NAME CHART VERSION APP VERSION DESCRIPTION +ohsu/backups 0.2.5 1.13.0 A Helm chart for Kubernetes + +➜ kubectl config current-context +kind-dev + +➜ kubectl create secret generic postgres-credentials --from-literal=postgres-password= --namespace backups + +➜ kubectl create secret generic s3-credentials --from-literal=AWS_ACCESS_KEY= --from-literal=AWS_SECRET_KEY= --namespace backups + +➜ helm upgrade --install backups ohsu/backups --create-namespace --namespace backups +Release "backups" has been upgraded. Happy Helming! + +➜ kubectl create job example-job --from=cronjob/backup-service-cronjob --namespace backups +job.batch/example-job created + +➜ kubectl get jobs -n backups +NAME STATUS COMPLETIONS DURATION +example-job Complete 1/1 11s + +➜ mc ls cbds/calypr-backups/calypr-dev +2025-09-12T23:10:29/ + +➜ mc ls cbds/calypr-backups/calypr-dev/2025-09-12T23:10:29/ +CALYPR.edges +CALYPR.vertices +CALYPR__schema__.edges +CALYPR__schema__.vertices +arborist_local.sql +fence_local.sql +gecko_local.sql +indexd_local.sql +postgres.sql +requestor_local.sql +``` + +* Steps to confirm backups in S3 bucket with mc + +```sh +➜ brew install minio-mc + +➜ which mc +/opt/homebrew/bin/mc + +➜ mc alias set example https://aced-storage.ohsu.edu +Enter Access Key: +Enter Secret Key: +Added `example` successfully. + +➜ mc alias ls example +cbds + URL : https://aced-storage.ohsu.edu + AccessKey : + SecretKey : + API : s3v4 + Path : auto + Src : $HOME/.mc/config.json + +➜ mc ls cbds/calypr-backups/calypr-dev/ +... +2025-09-12T02:00:01/ <---- Last timestamped backup + +➜ mc ls cbds/calypr-backups/calypr-dev/2025-09-12T02:00:01/ +160MiB CALYPR.edges <---- CALYPR edges +1.8GiB CALYPR.vertices <---- CALYPR vertices + 0B CALYPR__schema__.edges <---- Schema edges +1.4MiB CALYPR__schema__.vertices <---- Schema vertices +107KiB arborist_local.sql <---- Arborist +234KiB fence_local.sql <---- Fence +6.0KiB gecko_local.sql <---- Gecko + 21MiB indexd_local.sql <---- Indexd +9.6KiB metadata_local.sql <---- Metadata +2.9KiB postgres.sql <---- Postgres + 64KiB requestor_local.sql <---- Requestor +8.0KiB wts_local.sql <---- Workspace Token Service +``` + +# Known Limitations (Next Steps) ⚠️ + +- [ ] No clear, human-readable output of the path of the backup in S3 after a successful run +- [ ] Always uploads to calypr-dev even when using local k8s cluster diff --git a/Dockerfile b/Dockerfile index fc946cb..2530f33 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,27 +1,27 @@ -# GRIP build -# Ref: https://github.com/bmeg/grip/blob/develop/Dockerfile -FROM golang:1.17.2-alpine AS grip - -RUN apk add --no-cache make git bash build-base - -ENV GOPATH=/go -ENV PATH="/go/bin:${PATH}" - -WORKDIR /go/src/github.com/bmeg - -RUN git clone https://github.com/bmeg/grip +# Backup build +FROM python:slim -WORKDIR /go/src/github.com/bmeg/grip +RUN apt-get update && apt-get install -y \ + build-essential \ + gcc \ + libpq-dev -# Checkout latest GRIP tag. Example: -# $ git describe --tags --abbrev=0 -# v1.9.0 -RUN git checkout $(git describe --tags --abbrev=0) +RUN apt-get install -y postgresql-common -RUN make install +RUN YES=true /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -# Backup build -FROM python:slim +# Note: We're using Postgres 14 to match the version set in Gen3-Helm: +# +# Gen3-Helm Chart: https://github.com/calypr/gen3-helm/blob/v1.0.0/helm/gen3/Chart.yaml#L92-L94 +# +# Postgres Chart: https://github.com/bitnami/charts/blob/postgresql/11.9.13/bitnami/postgresql/Chart.yaml#L4 +# +# ``` +# ➜ kubectl exec --stdin --tty StatefulSets/cbds-postgresql -- /bin/bash +# $ psql --version +# psql (PostgreSQL) 14.5 +# ``` +RUN apt-get update && apt-get install -y postgresql-client-14 WORKDIR /app @@ -39,9 +39,4 @@ RUN mkdir -p /backups COPY entrypoint.sh ./entrypoint.sh RUN chmod +x ./entrypoint.sh -RUN apt-get update && apt-get install -y --no-install-recommends postgresql-client - -# Copy GRIP binary from build stage -COPY --from=grip /go/bin/grip /usr/local/bin/grip - ENTRYPOINT ["./entrypoint.sh"] diff --git a/Dockerfile.es b/Dockerfile.es new file mode 100644 index 0000000..6d2eeaa --- /dev/null +++ b/Dockerfile.es @@ -0,0 +1,13 @@ +# Creating a custom Docker image to include the s3 snapshot repository plugin +# Ref: https://github.com/elastic/helm-charts/blob/v7.17.3/elasticsearch/README.md#how-to-install-plugins + +# Manual build command: +# docker buildx build --platform=linux/arm64,linux/amd64 -t quay.io/ohsu-comp-bio/elasticsearch-s3:7.17.3 -f Dockerfile.es . --push +# TODO: Add this to GitHub Actions for automatic builds + +# Start from the official Elasticsearch image you are currently using +FROM docker.elastic.co/elasticsearch/elasticsearch:7.17.3 + +# Install the S3 repository plugin +# The 'install' command runs at build time, and is baked into the final image +RUN bin/elasticsearch-plugin install --batch repository-s3 diff --git a/README.md b/README.md index 3351be9..84368d9 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,26 @@ Data backup and recovery service for the CALYPR systems 🔄 # 2. Quick Start ⚡ +> [!TIP] +> The recommended use of the backup-service is through deploying to a K8s cluster for automated daily backups. + ```sh +➜ helm repo add ohsu https://ohsu-comp-bio.github.io/helm-charts + +➜ helm upgrade --install backups ohsu/backups +``` + +# 3. CLI + +> [!TIP] +> Manual backups (and restorations) can be done through the CLI + +```sh +➜ git clone git@github.com:calypr/backup-service.git +Cloning into 'backup-service'... + +➜ cd backup-service + ➜ python3 -m venv venv && source venv/bin/activate ➜ pip install -r requirements.txt @@ -49,40 +68,7 @@ Commands: upload local ➜ S3 ``` -# 3. Design + Examples 📐 - -```mermaid -sequenceDiagram - participant Backup as Backup Service - participant Database - participant S3 as S3 Bucket - - Title: Gen3 Backups - - Backup-->>Database: Database Credentials - - Note over Database: `pg_dump` - - Database-->>Backup: Database Backup - - Backup-->>S3: Database Backup -``` - -| Service | Postgres Database | Database Backup Name | Description | -| ---------------------- | ------------------- | ----------------------------- | ------------------------------------------------ | -| [Arborist][arborist] | `arborist-EXAMPLE` | `arborist-EXAMPLE-TIMESTAMP` | Gen3 policy engine | -| [Fence][fence] | `fence-EXAMPLE` | `fence-EXAMPLE-TIMESTAMP` | AuthN/AuthZ OIDC service | -| [Gecko][gecko] | `gecko-EXAMPLE` | `gecko-EXAMPLE-TIMESTAMP` | Frontend configurations for dynamic data loading | -| [Indexd][indexd] | `indexd-EXAMPLE` | `indexd-EXAMPLE-TIMESTAMP` | Data indexing and tracking service | -| [Requestor][requestor] | `requestor-EXAMPLE` | `requestor-EXAMPLE-TIMESTAMP` | Data access manager | - -[arborist]: https://github.com/uc-cdis/arborist -[fence]: https://github.com/uc-cdis/fence -[gecko]: https://github.com/aced-idp/gecko -[indexd]: https://github.com/uc-cdis/indexd -[requestor]: https://github.com/uc-cdis/requestor - -## Backup ⬆️ +## Backup ⬆ ### Postgres Dump: @@ -95,12 +81,6 @@ sequenceDiagram --dir DIR ``` -## ElasticSearch Backup: - -``` -➜ bak es backup -``` - ## GRIP Backup: ```sh @@ -118,7 +98,7 @@ sequenceDiagram --secret SECRET ``` -## Restore ⬇️ +## Restore ⬇ ### Postgres Restore: @@ -131,12 +111,6 @@ sequenceDiagram --dir DIR ``` -## ElasticSearch Restore: - -``` -➜ bak es restore -``` - ## GRIP Restore: ```sh @@ -154,7 +128,40 @@ sequenceDiagram --secret SECRET ``` -# 4. Alternatives 📖 +# 4. Design 📐 + +```mermaid +sequenceDiagram + participant Backup as Backup Service + participant Database + participant S3 as S3 Bucket + + Title: Gen3 Backups + + Backup-->>Database: Database Credentials + + Note over Database: `pg_dump` + + Database-->>Backup: Database Backup + + Backup-->>S3: Database Backup +``` + +| Service | Postgres Database | Database Backup Name | Description | +| ---------------------- | ------------------- | ----------------------------- | ------------------------------------------------ | +| [Arborist][arborist] | `arborist-EXAMPLE` | `arborist-EXAMPLE-TIMESTAMP` | Gen3 policy engine | +| [Fence][fence] | `fence-EXAMPLE` | `fence-EXAMPLE-TIMESTAMP` | AuthN/AuthZ OIDC service | +| [Gecko][gecko] | `gecko-EXAMPLE` | `gecko-EXAMPLE-TIMESTAMP` | Frontend configurations for dynamic data loading | +| [Indexd][indexd] | `indexd-EXAMPLE` | `indexd-EXAMPLE-TIMESTAMP` | Data indexing and tracking service | +| [Requestor][requestor] | `requestor-EXAMPLE` | `requestor-EXAMPLE-TIMESTAMP` | Data access manager | + +[arborist]: https://github.com/uc-cdis/arborist +[fence]: https://github.com/uc-cdis/fence +[gecko]: https://github.com/aced-idp/gecko +[indexd]: https://github.com/uc-cdis/indexd +[requestor]: https://github.com/uc-cdis/requestor + +# 5. Alternatives 📖 > [!TIP] > The alternative options below work on the K8s resources themseleves (e.g. PVC/PV) as opposed to database resources (e.g. Postgres tables, ElasticSearch indices) diff --git a/entrypoint.sh b/entrypoint.sh old mode 100644 new mode 100755 index bf53b42..d2cd936 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -1,30 +1,67 @@ #!/bin/bash -set -e +set -eio pipefail + +trap 'echo "Backup failed."; exit 1' ERR + +# Backup Overview/Structure: +# +# ENDPOINT/BUCKET/TIMESTAMP +# │ +# ├─ elastic +# │ ├─ TODO +# │ └─ TODO +# │ +# ├─ grip +# │ ├─ CALYPR.edges +# │ └─ CALYPR.vertices +# │ +# └─ postgres +# ├─ arborist_local.sql +# ├─ fence_local.sql +# ├─ gecko_local.sql +# ├─ indexd_local.sql +# ├─ metadata_local.sql +# ├─ postgres.sql +# ├─ requestor_local.sql +# └─ wts_local.sql TIMESTAMP=$(date +"%Y-%m-%dT%H:%M:%S") export DIR="${DIR}/${TIMESTAMP}" -# Postgres Dump +# 1. Postgres Dump bak --debug pg dump \ --dir "${DIR}" \ --host "${PGHOST}" \ - --user "${PGUSER}" \ - --password "${PGPASSWORD}" + --user "${PGUSER}" -# GRIP Backup +# 2. GRIP Backup bak --debug grip backup \ --dir "${DIR}" \ --host "${GRIP_HOST}" \ --graph "${GRIP_GRAPH}" \ - --limit "${GRIP_LIMIT}" \ --vertex \ --edge -# S3 Upload +# 3. S3 Upload bak --debug s3 upload \ --dir "${DIR}" \ --endpoint "${ENDPOINT}" \ --bucket "${BUCKET}" \ - --key "${KEY}" \ - --secret "${SECRET}" + --key "${ACCESS_KEY}" \ + --secret "${SECRET_KEY}" + +# 4. Elasticsearch Snapshot +# We keep the Elasticsearch backups separate from that of Postgres and GRIP +# to conform to the standard snapshot behavior/structure (e.g. incremental diffs) +# Ref: https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/self-managed +bak --debug es dump \ + --endpoint "${ES_ENDPOINT}" \ + --bucket "${ES_BUCKET}" \ + --repo "${ES_REPO}" \ + --snapshot "${DIR}" \ + +echo "Backup Complete:" +echo "- ENDPOINT: ${ENDPOINT}" +echo "- BUCKET: ${BUCKET}" +echo "- DIR: ${DIR}" diff --git a/requirements.txt b/requirements.txt index 51bd166..e431af3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,9 +4,8 @@ elasticsearch gripql minio orjson -psycopg2-binary +psycopg2 pytest # Development dependencies -# TODO: Move to separate file (e.g. dev-requirements.txt) testcontainers diff --git a/src/backup/es/__init__.py b/src/backup/es/__init__.py new file mode 100644 index 0000000..3da959c --- /dev/null +++ b/src/backup/es/__init__.py @@ -0,0 +1,191 @@ +from dataclasses import dataclass +import logging +import sys +import click +from elasticsearch import Elasticsearch + + +# ElasticSearch Flags +def es_flags(fn): + options = [ + click.option( + "--host", + "-H", + envvar="ES_HOST", + default="localhost", + show_default=True, + help="ElasticSearch host ($ES_HOST)", + ), + click.option( + "--port", + "-p", + envvar="ES_PORT", + default=9200, + show_default=True, + help="ElasticSearch port ($ES_PORT)", + ), + ] + for option in reversed(options): + fn = option(fn) + return fn + + +@dataclass +class ESConfig: + """ElasticSearch config""" + + host: str + port: int + + # Backup repo + # https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/self-managed + repo: str = "" + bucket: str = "" + endpoint: str = "" + + +def _connect(esConfig: ESConfig) -> Elasticsearch: + """ + Connects to a given ElasticSearch instance. + """ + assert esConfig.host, "Host must not be empty" + assert esConfig.port, "Port must not be empty" + + try: + elastic = Elasticsearch( + hosts=[{"host": esConfig.host, "port": esConfig.port, "scheme": "http"}], + ) + except Exception as err: + logging.error(f"Error connecting to Elasticsearch: {err}") + raise + + return elastic + + +def _getIndices(esConfig: ESConfig) -> list[str]: + """ + Utiltity function to list all indices. + """ + elastic = _connect(esConfig) + + # Get all indices using the cat.indices() method + indices = elastic.cat.indices(h="index").splitlines() + + # Remove unused '.geoip_databases' to avoid `400` error during snapshot + # https://www.elastic.co/docs/reference/enrich-processor/geoip-processor + if ".geoip_databases" in indices: + indices.remove(".geoip_databases") + + return indices + + +def _getRepos(esConfig: ESConfig) -> list[str]: + """ + Utiltity function to list all repos + """ + elastic = _connect(esConfig) + + repos = elastic.cat.repositories().splitlines() + + return repos + + +def _getSnapshots(esConfig: ESConfig, repo: str) -> list[str]: + """ + Utiltity function to list all snapshots in a given repository. + """ + elastic = _connect(esConfig) + + snapshots = elastic.snapshot.get( + repository=repo, + snapshot="_all", + )["snapshots"] + + snapshot_names = [snap["snapshot"] for snap in snapshots] + + return snapshot_names + + +def _getSnapshotIndices(esConfig: ESConfig, repo: str, snapshot: str) -> list[str]: + """ + Utiltity function to list all indices in all snapshots in a given repository. + """ + elastic = _connect(esConfig) + + snapshots = elastic.snapshot.get( + repository=repo, + snapshot=snapshot, + )["snapshots"] + + indices = [] + for snap in snapshots: + indices.extend(snap.get("indices", [])) + + # Remove unused '.geoip_databases' to avoid `400` error during snapshot + # https://www.elastic.co/docs/reference/enrich-processor/geoip-processor + if ".geoip_databases" in indices: + indices.remove(".geoip_databases") + + return indices + + +def _snapshot(esConfig: ESConfig, indices: list[str], snapshot: str) -> str | None: + """ + Creates a snapshot of indices using Elasticsearch Snapshot API. + """ + elastic = _connect(esConfig) + + response = elastic.snapshot.create( + # Snapshot repo + repository=esConfig.repo, + # Timestamp + snapshot=snapshot, + # Indices to backup + indices=indices, + # Block until complete + wait_for_completion=True, + ) + + logging.debug(f"Snapshot response: {response}") + + if response["snapshot"]["state"] == "SUCCESS": + # TODO: Return more useful info here? + return response["snapshot"]["snapshot"] + else: + logging.error(f"Snapshot error: {response}") + + +def _restore(esConfig: ESConfig, indices: list[str], snapshot: str) -> str | None: + """ + Restores a single index from a snapshot using Elasticsearch Snapshot API. + If the indices do not exist, they will be created before restoring. + """ + elastic = _connect(esConfig) + + # Check if indices exist + existing_indices = _getIndices(esConfig) + + for index in indices: + if index not in existing_indices: + # Create the index if it doesn't exist + logging.info(f"Index '{index}' does not exist. Creating it before restore.") + elastic.indices.create(index=index) + + # Close indices before restore + elastic.indices.close(index=",".join(indices)) + + response = elastic.snapshot.restore( + repository=esConfig.repo, + snapshot=snapshot, + indices=indices, + wait_for_completion=True, + ) + + logging.debug(f"Restore response: {response}") + + if response["snapshot"]["state"] == "SUCCESS": + # TODO: Return more useful info here? + return response["snapshot"]["snapshot"] + + else: + logging.error(f"Snapshot '{snapshot}' error: {response}") diff --git a/src/backup/es/cli.py b/src/backup/es/cli.py new file mode 100644 index 0000000..7b02879 --- /dev/null +++ b/src/backup/es/cli.py @@ -0,0 +1,127 @@ +from datetime import datetime +from backup.es import ( + ESConfig, + _getIndices, + _getRepos, + _getSnapshots, + _getSnapshotIndices, + _snapshot, + _restore, +) +from . import ESConfig, es_flags +from .repo.cli import repo, es_repo_flags +import click +import logging + + +@click.group() +def es(): + """Commands for ElasticSearch backups.""" + pass + + +@es.command() +@es_flags +@click.option( + "--repos", + is_flag=True, + default=False, + help="List all snapshot repositories instead of indices", +) +@click.option( + "--repo", + default=None, + help="Specify a repository to list its indices", +) +@click.option( + "--snapshot", + default=None, + help="Specify a snapshot to list its indices", +) +# TODO: Fix spaghetti code +def ls(host: str, port: int, repos: bool, repo: str, snapshot: str): + """List live indices, snapshot repositories, indices of a specific repository, or indices in a snapshot""" + esConfig = ESConfig(host=host, port=port) + + # List repos in current cluster + if repos: + all_repos = _getRepos(esConfig) + + for repository in all_repos: + click.echo(repository) + + # List indices in given snapshot + elif repo and snapshot: + indices = _getSnapshotIndices(esConfig, repo, snapshot) + + for index in indices: + click.echo(index) + + # List snapshots in given repo + elif repo: + snapshots = _getSnapshots(esConfig, repo) + + for snapshot in snapshots: + click.echo(snapshot) + + # List indices in current cluster + else: + indices = _getIndices(esConfig) + + for index in indices: + click.echo(index) + + +@es.command() +@es_flags +@es_repo_flags +@click.option( + "--snapshot", + "-s", + required=True, + default=lambda: datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), + help="Snapshot name (will be created under the repository)", +) +def backup(host: str, port: int, repo: str, snapshot: str): + """elasticsearch ➜ snapshot""" + esConfig = ESConfig(host=host, port=port, repo=repo) + + indices = _getIndices(esConfig) + if not indices: + logging.warning(f"No indices found at {esConfig.host}:{esConfig.port}") + return + + logging.debug(f"Backing up indices '{indices}' to snapshot '{snapshot}'") + resp = _snapshot(esConfig, indices, snapshot) + if resp: + logging.info(f"Snapshot created: {resp}") + else: + logging.error("Snapshot creation failed") + + +@es.command() +@es_flags +@es_repo_flags +@click.option( + "--snapshot", + "-s", + required=True, + help="Snapshot name to restore from", +) +def restore(host: str, port: int, repo: str, snapshot: str): + """snapshot ➜ elasticsearch""" + esConfig = ESConfig(host=host, port=port, repo=repo) + + indices = _getSnapshotIndices(esConfig, repo, snapshot) + if not indices: + logging.warning( + f"No indices found to restore at {esConfig.host}:{esConfig.port}." + ) + return + + # Restore indices + _ = _restore(esConfig, indices, snapshot) + + +# Elasticsearch snapshot repository commands +es.add_command(repo) diff --git a/src/backup/es/repo/__init__.py b/src/backup/es/repo/__init__.py new file mode 100644 index 0000000..521e68c --- /dev/null +++ b/src/backup/es/repo/__init__.py @@ -0,0 +1,60 @@ +import logging +from backup.es import ESConfig, _connect + + +def _getRepos(esConfig: ESConfig) -> list[str] | None: + """ + Utility function to connect to ElasticSearch and list all snapshot repositories. + """ + elastic = _connect(esConfig) + + try: + repos = elastic.snapshot.get_repository(name="_all") # Get all repositories + repo_names = list(repos.keys()) # Extract just the names + return repo_names + except Exception as err: + logging.error(f"Error listing Elasticsearch repositories: {err}") + return None + + +def _initRepo(esConfig: ESConfig) -> bool: + """ + Initializes a snapshot repository in ElasticSearch. + """ + elastic = _connect(esConfig) + + # Create the repository + elastic.snapshot.create_repository( + name=esConfig.repo, + repository={ + "type": "s3", + "settings": { + "bucket": esConfig.bucket, + "base_path": esConfig.repo, + }, + }, + ) + + logging.info(f"Repository '{esConfig.repo}' created successfully.") + + return True + + +def _deleteRepo(esConfig: ESConfig, force: bool) -> bool: + """ + Initializes a snapshot repository in ElasticSearch. + """ + if not force: + logging.error("Deletion of repository requires --force flag.") + return False + + elastic = _connect(esConfig) + + # Create the repository + elastic.snapshot.delete_repository( + name=esConfig.repo, + ) + + logging.info(f"Repository '{esConfig.repo}' deleted successfully.") + + return True diff --git a/src/backup/es/repo/cli.py b/src/backup/es/repo/cli.py new file mode 100644 index 0000000..30412a6 --- /dev/null +++ b/src/backup/es/repo/cli.py @@ -0,0 +1,94 @@ +import logging +import click +from .. import ESConfig, es_flags +from ..repo import _deleteRepo, _getRepos, _initRepo +from backup.s3.cli import s3_flags + + +# ElasticSearch Flags +def es_repo_flags(fn): + options = [ + click.option( + "--repo", + "-r", + envvar="ES_REPO", + show_default=True, + help="ElasticSearch snapshot repository name ($ES_REPO)", + ), + ] + for option in reversed(options): + fn = option(fn) + return fn + + +@click.group() +def repo(): + """Commands for managing snapshot repositories.""" + pass + + +@repo.command(name="ls") +@es_flags +def listRepos(host: str, port: int): + """List snapshot repositories""" + esConfig = ESConfig(host=host, port=port) + + repos = _getRepos(esConfig) + if not repos: + logging.warning( + f"No snapshot repositories found at {esConfig.host}:{esConfig.port}" + ) + return + + # List repositories + for repo in repos: + click.echo(repo) + + +@repo.command(name="init") +@es_flags +@es_repo_flags +@s3_flags +def initRepo(host: str, port: int, repo: str, endpoint: str, bucket: str): + """Initialize a snapshot repository""" + + # Create ElasticSearchConfig including S3 endpoint and bucket for repository creation + esConfig = ESConfig( + host=host, + port=port, + repo=repo, + endpoint=endpoint, + bucket=bucket, + ) + + # TODO: Add readonly flag to for restore operations + success = _initRepo(esConfig) + if success: + click.echo(f"Repository '{repo}' initialized successfully.") + else: + logging.error(f"Failed to initialize repository '{repo}'.") + + +@repo.command(name="rm") +@es_flags +@es_repo_flags +@click.option( + "--force", + is_flag=True, + default=False, + help="Force/confirm deletion of the repository.", +) +def deleteRepo(host: str, port: int, repo: str, force: bool): + """Initialize a snapshot repository""" + # Create ElasticSearchConfig including S3 endpoint and bucket for repository creation + esConfig = ESConfig( + host=host, + port=port, + repo=repo, + ) + + success = _deleteRepo(esConfig, force) + if success: + click.echo(f"Repository '{repo}' deleted successfully.") + else: + logging.error(f"Failed to delete repository '{repo}'.") diff --git a/src/backup/grip/__init__.py b/src/backup/grip/__init__.py index d14bbaa..c483e94 100644 --- a/src/backup/grip/__init__.py +++ b/src/backup/grip/__init__.py @@ -32,7 +32,24 @@ class GripConfig: port: int -def _getEdges(grip: GripConfig, graph: str, limit: int) -> list[str]: +def _getGraphs(grip: GripConfig) -> list[str]: + """ + Utility function to connect to Grip and list all graphs. + """ + + # Connect to Grip + c = _connect(grip) + + # List Graphs + graphs = [] + + for g in c.listGraphs(): + graphs.append(g) + + return graphs + + +def _getEdges(grip: GripConfig, graph: str) -> list[str]: """ Utility function to connect to Grip and list all edges. """ @@ -45,13 +62,13 @@ def _getEdges(grip: GripConfig, graph: str, limit: int) -> list[str]: G = c.graph(graph) - for i in G.query().E().limit(limit): + for i in G.V().outE(): edges.append(i) return edges -def _getVertices(grip: GripConfig, graph: str, limit: int) -> list[str]: +def _getVertices(grip: GripConfig, graph: str) -> list[str]: """ Utility function to connect to Grip and list all vertices. """ @@ -62,7 +79,7 @@ def _getVertices(grip: GripConfig, graph: str, limit: int) -> list[str]: G = c.graph(graph) - for i in G.query().V().limit(limit): + for i in G.V(): vertices.append(i) return vertices @@ -83,32 +100,43 @@ def _connect(grip: GripConfig) -> gripql.Connection: return client -def _dump(grip: GripConfig, graph: str, limit: int, vertex: bool, edge: bool, out: Path) -> None: +def _dump(grip: GripConfig, graph: str, vertex: bool, edge: bool, out: Path) -> None: # Dump conn = _connect(grip) G = conn.graph(graph) + # Run single query to get all vertices + # Rather than G.V() and G.V().outE() + vertices = G.V() + # write vertex and edge objects from grip DB to file if vertex: with open(out / f"{graph}.vertices", "wb") as f: - for i in G.query().V().limit(limit): + for i in vertices: f.write(orjson.dumps(i, option=orjson.OPT_APPEND_NEWLINE)) if edge: with open(out / f"{graph}.edges", "wb") as f: - for i in G.query().E().limit(limit): + # Note: + # Using G.V().outE() here to return all edges + # G.V().BothE() would return duplicate edges (outbound and inbound) + # Ref: https://github.com/bmeg/grip/blob/0.8.0/conformance/tests/ot_basic.py#L129-L140 + for i in vertices.outE(): f.write(orjson.dumps(i, option=orjson.OPT_APPEND_NEWLINE)) # TODO: At this point you will need to reconnect to the new grip instance to load the data that was dumped def _restore(grip: GripConfig, graph: str, dir: Path): + ## Clean/Delete existing graph + ## GRIP initdb job (templates/post-install) + ## Load conn = _connect(grip) G = conn.graph(graph) bulkV = G.bulkAdd() - with open("grip.vertices", "rb") as f: + with open(dir / f"{graph}.vertices", "rb") as f: count = 0 for i in f: data = orjson.loads(i) @@ -123,7 +151,7 @@ def _restore(grip: GripConfig, graph: str, dir: Path): print("Vertices load res: ", str(err)) bulkE = G.bulkAdd() - with open("grip.edges", "rb") as f: + with open(dir / f"{graph}.edges", "rb") as f: count = 0 for i in f: data = orjson.loads(i) @@ -132,7 +160,7 @@ def _restore(grip: GripConfig, graph: str, dir: Path): _to = data["_to"] _from = data["_from"] del data["_id"], data["_label"], data["_to"], data["_from"] - bulkE.addEdge(_to, _from, _label, data=data, gid=_id) + bulkE.addEdge(_to, _from, _label, data=data, id=_id) count += 1 if count % 10000 == 0: print("loaded %d edges" % count) diff --git a/src/backup/grip/cli.py b/src/backup/grip/cli.py new file mode 100644 index 0000000..ba6665f --- /dev/null +++ b/src/backup/grip/cli.py @@ -0,0 +1,112 @@ +from backup.grip import ( + GripConfig, + _getGraphs, + _getEdges, + _getVertices, + _dump as _gripDump, + _restore as _gripRestore, +) +from backup.options import ( + dir_flags, +) +from pathlib import Path +import click +import logging +import json + + +# GRIP Flags +def grip_host_flags(fn): + options = [ + click.option( + "--host", + "-H", + envvar="GRIP_HOST", + default="localhost", + show_default=True, + help="GRIP host ($GRIPHOST)", + ), + click.option( + "--port", + "-p", + envvar="GRIP_PORT", + default=8201, + show_default=True, + help="GRIP port ($GRIP_PORT)", + ), + ] + for option in reversed(options): + fn = option(fn) + return fn + +# GRIP Graph Flags +def grip_flags(fn): + options = [ + click.option("--graph", "-g", default="CALYPR", help="Name of the GRIP graph."), + # click.option( + # "--edge", + # "--edges", + # "-e", + # is_flag=True, + # default=False, + # help="Output GRIP edges.", + # ), + # click.option("--graph", "-g", default="CALYPR", help="Name of the GRIP graph."), + # click.option( + # "--vertex", + # "--vertices", + # "-v", + # is_flag=True, + # default=False, + # help="Output GRIP vertices.", + # ), + ] + for option in reversed(options): + fn = option(fn) + return fn + + +@click.group() +def grip(): + """Commands for GRIP backups.""" + pass + + +@grip.command() +@grip_host_flags +def ls(host: str, port: int): + """list GRIP vertices and/or edges""" + conf = GripConfig(host=host, port=port) + + for v in _getGraphs(conf): + click.echo(json.dumps(v, indent=2)) + + +@grip.command() +@grip_host_flags +@dir_flags +def backup(host: str, port: int, graph: str, vertex: bool, edge: bool, dir: Path): + """grip ➜ local""" + conf = GripConfig(host=host, port=port) + + # Set timestamp + dir.mkdir(parents=True, exist_ok=True) + + logging.debug(f"Backing up GRIP graph '{graph}' to directory '{dir}'") + _gripDump(conf, graph, vertex, edge, dir) + + # TODO: Better way to handle GRIP graph schemas? + schema = f"{graph}__schema__" + logging.debug(f"Backing up GRIP graph '{schema}' to directory '{dir}'") + _gripDump(conf, schema, vertex, edge, dir) + + +@grip.command() +@grip_host_flags +@grip_flags +@dir_flags +def restore(host: str, port: int, graph: str, dir: Path): + """local ➜ grip""" + conf = GripConfig(host=host, port=port) + + _ = _gripRestore(conf, graph, dir) diff --git a/src/backup/main.py b/src/backup/main.py index b0e1608..074888f 100644 --- a/src/backup/main.py +++ b/src/backup/main.py @@ -1,35 +1,14 @@ -from backup.grip import ( - GripConfig, - _getEdges, - _getVertices, - _dump as _gripDump, - _restore as _gripRestore, -) -from backup.postgres import ( - PGConfig, - _getDbs, - _dump as _pgDump, - _restore as _pgRestore, -) -from backup.s3 import ( - S3Config, - _download, - _upload, -) -from backup.options import ( - dir_options, - grip_options, - pg_options, - s3_options, -) from click_aliases import ClickAliasedGroup -from datetime import datetime from elasticsearch.exceptions import ElasticsearchWarning -from pathlib import Path import click import logging import warnings -import json + +# Import command groups from subpackages +from .es.cli import es as es_command +from .grip.cli import grip as grip_command +from .postgres.cli import pg as pg_command +from .s3.cli import s3 as s3_command @click.group(cls=ClickAliasedGroup) @@ -56,148 +35,16 @@ def cli(verbose: bool): ) # Avoid INFO and ElasticsearchWarning logging from the elasticsearch logger - # https://stackoverflow.com/a/47157553 + # ref: https://stackoverflow.com/a/47157553 logging.getLogger("elastic_transport.transport").setLevel(logging.CRITICAL) warnings.simplefilter("ignore", ElasticsearchWarning) +# register subcommands +cli.add_command(es_command) +cli.add_command(grip_command) +cli.add_command(pg_command) +cli.add_command(s3_command) + if __name__ == "__main__": cli() - - -@cli.group(aliases=["gp"]) -def grip(): - """Commands for GRIP backups.""" - pass - - -@grip.command(name="ls") -@grip_options -def list_grip(host: str, port: int, graph: str, limit: int, vertex: bool, edge: bool): - """list GRIP vertices and/or edges""" - conf = GripConfig(host=host, port=port) - - if vertex: - for v in _getVertices(conf, graph, limit): - click.echo(json.dumps(v, indent=2)) - - if edge: - for e in _getEdges(conf, graph, limit): - click.echo(json.dumps(e, indent=2)) - - -@grip.command(name="backup") -@grip_options -@dir_options -def backup_grip(host: str, port: int, graph: str, limit: int, vertex: bool, edge: bool, dir: Path): - """grip ➜ local""" - conf = GripConfig(host=host, port=port) - - # Set timestamp - dir.mkdir(parents=True, exist_ok=True) - - logging.debug(f"Backing up GRIP graph '{graph}' to directory '{dir}'") - _gripDump(conf, graph, limit, vertex, edge, dir) - - # TODO: Better way to handle GRIP graph schemas? - schema = f"{graph}__schema__" - logging.debug(f"Backing up GRIP graph '{schema}' to directory '{dir}'") - _gripDump(conf, schema, limit, vertex, edge, dir) - - -@grip.command(name="restore") -@grip_options -@dir_options -def restore_grip(host: str, port: int, graph: str, dir: Path): - """local ➜ grip""" - conf = GripConfig(host=host, port=port) - - _ = _gripRestore(conf, graph, dir) - - -@cli.group(aliases=["pg"]) -def pg(): - """Commands for Postgres backups.""" - pass - - -@pg.command(name="ls") -@pg_options -def listDbs(host: str, port: int, user: str, password: str): - """list databases""" - conf = PGConfig(host=host, port=port, user=user, password=password) - - dbs = _getDbs(conf) - if not dbs: - logging.warning(f"No databases found at {conf.host}:{conf.port}.") - return - - # List databases - for database in dbs: - click.echo(database) - - -@pg.command(name="dump") -@pg_options -@dir_options -def dump_postgres(host: str, port: int, user: str, password: str, dir: Path): - """postgres ➜ local""" - conf = PGConfig(host=host, port=port, user=user, password=password) - - # Dump directory - dir.mkdir(parents=True, exist_ok=True) - - dbs = _getDbs(conf) - if not dbs: - logging.warning(f"No databases found to dump at {conf.host}:{conf.port}.") - return - - # Dump databases - for database in dbs: - dump = _pgDump(conf, database, dir) - logging.debug(f"Dumped {database} to {dump}") - - -@pg.command(name="restore") -@pg_options -@dir_options -def restore_postgres(host: str, port: int, user: str, password: str, dir: Path): - """local ➜ postgres""" - conf = PGConfig(host=host, port=port, user=user, password=password) - - dbs = _getDbs(conf) - if not dbs: - logging.warning(f"No databases found to restore at {conf.host}:{conf.port}.") - return - - # Restore databases - for database in dbs: - _ = _pgRestore(conf, database, dir) - - -@cli.group() -def s3(): - """Commands for S3.""" - pass - - -@s3.command() -@s3_options -@dir_options -def download(endpoint: str, bucket: str, key: str, secret: str, dir: Path): - """s3 ➜ local""" - conf = S3Config(endpoint=endpoint, bucket=bucket, key=key, secret=secret) - - # Download from S3 - _ = _download(conf, dir) - - -@s3.command() -@s3_options -@dir_options -def upload(endpoint: str, bucket: str, key: str, secret: str, dir: Path): - """local ➜ s3""" - s3 = S3Config(endpoint=endpoint, bucket=bucket, key=key, secret=secret) - - # Upload to S3 - _ = _upload(s3, dir) diff --git a/src/backup/options.py b/src/backup/options.py index 4dfc2b2..900a024 100644 --- a/src/backup/options.py +++ b/src/backup/options.py @@ -1,114 +1,10 @@ from pathlib import Path import click - -# GRIP Flags -def grip_options(fn): - options = [ - click.option( - "--edge", "--edges", "-e", is_flag=True, help="Output GRIP edges." - ), - click.option("--graph", "-g", default="CALYPR", help="Name of the GRIP graph."), - click.option( - "--host", - "-H", - envvar="GRIP_HOST", - default="localhost", - show_default=True, - help="GRIP host ($GRIPHOST)", - ), - click.option( - "--limit", - "-l", - envvar="GRIP_LIMIT", - type=int, - default=10000, - help="Limit number of items listed.", - ), - click.option( - "--port", - "-p", - envvar="GRIP_PORT", - default=8201, - show_default=True, - help="GRIP port ($GRIP_PORT)", - ), - click.option( - "--vertex", - "--vertices", - "-v", - is_flag=True, - help="Output GRIP vertices.", - ), - - ] - for option in reversed(options): - fn = option(fn) - return fn - - -# Postgres Flags -def pg_options(fn): - options = [ - click.option( - "--host", - "-H", - envvar="PGHOST", - default="localhost", - show_default=True, - help="Postgres host ($PGHOST)", - ), - click.option( - "--port", - "-p", - envvar="PGPORT", - default=5432, - show_default=True, - help="Postgres port ($PGPORT)", - ), - click.option( - "--user", - "-u", - envvar="PGUSER", - default="postgres", - show_default=True, - help="Postgres username ($PGUSER)", - ), - click.option( - "--password", - "-P", - envvar="PGPASSWORD", - help="Postgres password ($PGPASSWORD)", - ), - ] - for option in reversed(options): - fn = option(fn) - return fn - - -# S3 Flags -def s3_options(fn): - options = [ - click.option( - "--endpoint", - "-e", - default="https://s3.amazonaws.com", - show_default=True, - help="S3 endpoint URL", - ), - click.option("--bucket", "-b", required=True, help="S3 bucket name"), - # TODO: Support env variables for S3 credentials - # ref: https://github.com/minio/minio-go/blob/v7.0.95/pkg/credentials/env_aws.go#L27-L28 - click.option("--key", "-k", help="S3 access key ID ($AWS_ACCESS_KEY)"), - click.option("--secret", "-s", help="S3 secret access key ($AWS_SECRET_KEY)"), - ] - for option in options: - fn = option(fn) - return fn - +# Common CLI Options for all subcommand # Output/intput directory flags -dir_options = click.option( +dir_flags = click.option( "--dir", "-d", default=Path("."), diff --git a/src/backup/postgres/__init__.py b/src/backup/postgres/__init__.py index d4d80fa..3954cf1 100644 --- a/src/backup/postgres/__init__.py +++ b/src/backup/postgres/__init__.py @@ -2,6 +2,7 @@ from pathlib import Path from psycopg2.extensions import connection import logging +import os import psycopg2 import shutil import subprocess @@ -14,7 +15,6 @@ class PGConfig: host: str port: int user: str - password: str def _connect(pgConfig: PGConfig) -> connection: @@ -24,14 +24,13 @@ def _connect(pgConfig: PGConfig) -> connection: assert pgConfig.host, "Host must not be empty" assert pgConfig.port, "Port must not be empty" assert pgConfig.user, "User must not be empty" - assert pgConfig.password, "Password must not be empty" try: connection = psycopg2.connect( user=pgConfig.user, - password=pgConfig.password, host=pgConfig.host, port=pgConfig.port, + password=os.getenv("PGPASSWORD"), ) except Exception as err: logging.error(f"Error connecting to Postgres: {err}") @@ -80,9 +79,6 @@ def _dump(pgConfig: PGConfig, db: str, dir: Path) -> Path: "--no-password", ] - # Set the environment variable for the password - env = {"PGPASSWORD": pgConfig.password} - # Dump File dump = Path(f"{dir}/{db}.sql") @@ -96,10 +92,12 @@ def _dump(pgConfig: PGConfig, db: str, dir: Path) -> Path: stdout=out, stderr=subprocess.PIPE, check=True, - env=env, + env=os.environ.copy(), ) except subprocess.CalledProcessError as e: - logging.error(f"Error dumping database '{db}': {e}, stderr: {e.stderr.decode() if e.stderr else ''}") + logging.error( + f"Error dumping database '{db}': {e}, stderr: {e.stderr.decode() if e.stderr else ''}" + ) raise return dump @@ -115,12 +113,11 @@ def _restore(pgConfig: PGConfig, db: str, dir: Path) -> Path: logging.error(f"Dump file {dump} does not exist") raise FileNotFoundError(f"Dump file {dump} does not exist") - if not shutil.which("pg_restore"): logging.error("pg_restore not found in PATH") command = [ - "pg_restore", + "psql", "-U", pgConfig.user, "-h", @@ -129,7 +126,7 @@ def _restore(pgConfig: PGConfig, db: str, dir: Path) -> Path: str(pgConfig.port), "-d", db, - "--no-password", + "-f", dump.as_posix(), ] @@ -145,4 +142,11 @@ def _restore(pgConfig: PGConfig, db: str, dir: Path) -> Path: return dump except subprocess.CalledProcessError as e: - raise + stdout = e.stdout.decode(errors="replace") if e.stdout else "" + stderr = e.stderr.decode(errors="replace") if e.stderr else "" + logging.error( + f"Error restoring database '{db}': returncode={e.returncode}; stdout={stdout}; stderr={stderr}" + ) + raise RuntimeError( + f"pg_restore failed for '{db}': returncode={e.returncode}; stdout={stdout}; stderr={stderr}" + ) from e diff --git a/src/backup/postgres/cli.py b/src/backup/postgres/cli.py new file mode 100644 index 0000000..9065214 --- /dev/null +++ b/src/backup/postgres/cli.py @@ -0,0 +1,108 @@ +from backup.postgres import ( + PGConfig, + _getDbs, + _dump as _pgDump, + _restore as _pgRestore, +) +from backup.options import ( + dir_flags, +) +import click +import logging +from pathlib import Path + + +# Postgres Flags +def pg_flags(fn): + options = [ + click.option( + "--host", + "-H", + envvar="PGHOST", + default="localhost", + show_default=True, + help="Postgres host ($PGHOST)", + ), + click.option( + "--port", + "-p", + envvar="PGPORT", + default=5432, + show_default=True, + help="Postgres port ($PGPORT)", + ), + click.option( + "--user", + "-u", + envvar="PGUSER", + default="postgres", + show_default=True, + help="Postgres username ($PGUSER)", + ), + ] + for option in reversed(options): + fn = option(fn) + return fn + + +@click.group() +def pg(): + """Postgres-related commands (moved from main.py).""" + pass + + +@pg.command() +@pg_flags +def ls(host: str, port: int, user: str): + """list databases""" + conf = PGConfig(host=host, port=port, user=user) + + dbs = _getDbs(conf) + if not dbs: + logging.warning(f"No databases found at {conf.host}:{conf.port}.") + return + + # List databases + for database in dbs: + click.echo(database) + + +@pg.command() +@pg_flags +@dir_flags +def dump(host: str, port: int, user: str, dir: Path): + """postgres ➜ local""" + conf = PGConfig(host=host, port=port, user=user) + + # Dump directory + dir.mkdir(parents=True, exist_ok=True) + + dbs = _getDbs(conf) + if not dbs: + logging.warning(f"No databases found to dump at {conf.host}:{conf.port}.") + return + + # Dump databases + for database in dbs: + dump = _pgDump(conf, database, dir) + logging.debug(f"Dumped {database} to {dump}") + + +@pg.command() +@pg_flags +@dir_flags +def restore(host: str, port: int, user: str, dir: Path): + """local ➜ postgres""" + conf = PGConfig(host=host, port=port, user=user) + + dbs = _getDbs(conf) + if not dbs: + logging.warning(f"No databases found to restore at {conf.host}:{conf.port}.") + return + + # Restore databases + for database in dbs: + if database == "gecko_cbds" or database == "metadata_cbds": + logging.debug("Skipping restore of gecko and metadata databases...") + continue + _ = _pgRestore(conf, database, dir) diff --git a/src/backup/s3/cli.py b/src/backup/s3/cli.py new file mode 100644 index 0000000..248879a --- /dev/null +++ b/src/backup/s3/cli.py @@ -0,0 +1,61 @@ +from backup.s3 import ( + S3Config, + _download, + _upload, +) +from backup.options import ( + dir_flags, +) +import click +from pathlib import Path + + +# S3 Flags +def s3_flags(fn): + options = [ + click.option( + "--endpoint", + "-e", + show_default=True, + help="S3 endpoint URL", + ), + click.option("--bucket", "-b", required=True, help="S3 bucket name"), + ] + for option in options: + fn = option(fn) + return fn + + +@click.group() +def s3(): + """Commands for S3.""" + pass + + +@s3.command() +@s3_flags +def ls(endpoint: str, bucket: str, key: str, secret: str): + # TODO: Implement + pass + + +@s3.command() +@s3_flags +@dir_flags +def download(endpoint: str, bucket: str, key: str, secret: str, dir: Path): + """s3 ➜ local""" + conf = S3Config(endpoint=endpoint, bucket=bucket, key=key, secret=secret) + + # Download from S3 + _ = _download(conf, dir) + + +@s3.command() +@s3_flags +@dir_flags +def upload(endpoint: str, bucket: str, key: str, secret: str, dir: Path): + """local ➜ s3""" + s3 = S3Config(endpoint=endpoint, bucket=bucket, key=key, secret=secret) + + # Upload to S3 + _ = _upload(s3, dir) diff --git a/tests/conftest.py b/tests/conftest.py index bf7be8a..e3f5865 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,4 @@ +import os from backup.postgres import PGConfig from backup.s3 import S3Config from minio import Minio @@ -26,11 +27,12 @@ def testPostgres(): with PostgresContainer("postgres") as postgres: logging.debug(f"Postgres ready at {postgres.get_connection_url}") + # Set PGPASSWORD environment variable for authentication + os.environ["PGPASSWORD"] = postgres.password + yield PGConfig( # Default: test user=postgres.username, - # Default: test - password=postgres.password, host="localhost", port=postgres.get_exposed_port(5432), ) diff --git a/tests/elasticsearch/test_elasticsearch.py b/tests/elasticsearch/test_elasticsearch.py new file mode 100644 index 0000000..7a337e6 --- /dev/null +++ b/tests/elasticsearch/test_elasticsearch.py @@ -0,0 +1,2 @@ +def testExample(): + assert True is not False diff --git a/tests/grip/test_grip.py b/tests/grip/test_grip.py new file mode 100644 index 0000000..7a337e6 --- /dev/null +++ b/tests/grip/test_grip.py @@ -0,0 +1,2 @@ +def testExample(): + assert True is not False diff --git a/tests/postgres/test_postgres.py b/tests/postgres/test_postgres.py new file mode 100644 index 0000000..7a337e6 --- /dev/null +++ b/tests/postgres/test_postgres.py @@ -0,0 +1,2 @@ +def testExample(): + assert True is not False diff --git a/tests/s3/test_s3.py b/tests/s3/test_s3.py new file mode 100644 index 0000000..7a337e6 --- /dev/null +++ b/tests/s3/test_s3.py @@ -0,0 +1,2 @@ +def testExample(): + assert True is not False diff --git a/tests/test_backups.py b/tests/test_backups.py index e1313f2..518dd93 100644 --- a/tests/test_backups.py +++ b/tests/test_backups.py @@ -19,11 +19,12 @@ _upload, ) from backup.options import ( - dir_options, - pg_options, - s3_options, + dir_flags, ) +# TODO: This "end-to-end" test file still needs to be implemented with: +# - Backups +# - Restores def testConnect(testPostgres): """