diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml new file mode 100644 index 0000000..51be54c --- /dev/null +++ b/.github/workflows/format.yml @@ -0,0 +1,29 @@ +# Based on https://docs.astral.sh/uv/guides/integration/github/#multiple-python-versions +name: Format + +# trigger on PRs and main branches +on: + pull_request: + push: + branches: + - master + - develop + +jobs: + build: + runs-on: ubuntu-24.04 + + steps: + - uses: actions/checkout@v5 + + - name: Install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.9.5" + + - name: Install dependencies + run: uv sync --dev + + - name: Check format + run: | + uv run ruff format --check diff --git a/.github/workflows/code_style.yml b/.github/workflows/lint.yml similarity index 96% rename from .github/workflows/code_style.yml rename to .github/workflows/lint.yml index e0870d6..952e0b3 100644 --- a/.github/workflows/code_style.yml +++ b/.github/workflows/lint.yml @@ -1,5 +1,5 @@ # Based on https://docs.astral.sh/uv/guides/integration/github/#multiple-python-versions -name: Code Style +name: Lint # trigger on PRs and main branches on: diff --git a/README.md b/README.md index abfb38f..65344bb 100644 --- a/README.md +++ b/README.md @@ -147,6 +147,9 @@ make html ```bash # Lint uv run ruff check + +# Format code +uv run ruff format ``` ## Contact diff --git a/datacrunch/InferenceClient/inference_client.py b/datacrunch/InferenceClient/inference_client.py index c5f5bfa..ec808e2 100644 --- a/datacrunch/InferenceClient/inference_client.py +++ b/datacrunch/InferenceClient/inference_client.py @@ -9,14 +9,15 @@ class InferenceClientError(Exception): """Base exception for InferenceClient errors.""" + pass class AsyncStatus(str, Enum): - Initialized = "Initialized" - Queue = "Queue" - Inference = "Inference" - Completed = "Completed" + Initialized = 'Initialized' + Queue = 'Queue' + Inference = 'Inference' + Completed = 'Completed' @dataclass_json(undefined=Undefined.EXCLUDE) @@ -38,28 +39,29 @@ def _is_stream_response(self, headers: CaseInsensitiveDict[str]) -> bool: bool: True if the response is likely a stream, False otherwise """ # Standard chunked transfer encoding - is_chunked_transfer = headers.get( - 'Transfer-Encoding', '').lower() == 'chunked' + is_chunked_transfer = headers.get('Transfer-Encoding', '').lower() == 'chunked' # Server-Sent Events content type - is_event_stream = headers.get( - 'Content-Type', '').lower() == 'text/event-stream' + is_event_stream = headers.get('Content-Type', '').lower() == 'text/event-stream' # NDJSON - is_ndjson = headers.get( - 'Content-Type', '').lower() == 'application/x-ndjson' + is_ndjson = headers.get('Content-Type', '').lower() == 'application/x-ndjson' # Stream JSON - is_stream_json = headers.get( - 'Content-Type', '').lower() == 'application/stream+json' + is_stream_json = headers.get('Content-Type', '').lower() == 'application/stream+json' # Keep-alive - is_keep_alive = headers.get( - 'Connection', '').lower() == 'keep-alive' + is_keep_alive = headers.get('Connection', '').lower() == 'keep-alive' # No content length has_no_content_length = 'Content-Length' not in headers # No Content-Length with keep-alive often suggests streaming (though not definitive) is_keep_alive_and_no_content_length = is_keep_alive and has_no_content_length - return (self._stream or is_chunked_transfer or is_event_stream or is_ndjson or - is_stream_json or is_keep_alive_and_no_content_length) + return ( + self._stream + or is_chunked_transfer + or is_event_stream + or is_ndjson + or is_stream_json + or is_keep_alive_and_no_content_length + ) def output(self, is_text: bool = False) -> Any: try: @@ -70,9 +72,9 @@ def output(self, is_text: bool = False) -> Any: # if the response is a stream (check headers), raise relevant error if self._is_stream_response(self._original_response.headers): raise InferenceClientError( - "Response might be a stream, use the stream method instead") - raise InferenceClientError( - f"Failed to parse response as JSON: {str(e)}") + 'Response might be a stream, use the stream method instead' + ) + raise InferenceClientError(f'Failed to parse response as JSON: {str(e)}') def stream(self, chunk_size: int = 512, as_text: bool = True) -> Generator[Any, None, None]: """Stream the response content. @@ -95,7 +97,9 @@ def stream(self, chunk_size: int = 512, as_text: bool = True) -> Generator[Any, class InferenceClient: - def __init__(self, inference_key: str, endpoint_base_url: str, timeout_seconds: int = 60 * 5) -> None: + def __init__( + self, inference_key: str, endpoint_base_url: str, timeout_seconds: int = 60 * 5 + ) -> None: """ Initialize the InferenceClient. @@ -108,23 +112,21 @@ def __init__(self, inference_key: str, endpoint_base_url: str, timeout_seconds: InferenceClientError: If the parameters are invalid """ if not inference_key: - raise InferenceClientError("inference_key cannot be empty") + raise InferenceClientError('inference_key cannot be empty') parsed_url = urlparse(endpoint_base_url) if not parsed_url.scheme or not parsed_url.netloc: - raise InferenceClientError("endpoint_base_url must be a valid URL") + raise InferenceClientError('endpoint_base_url must be a valid URL') self.inference_key = inference_key self.endpoint_base_url = endpoint_base_url.rstrip('/') - self.base_domain = self.endpoint_base_url[:self.endpoint_base_url.rindex( - '/')] - self.deployment_name = self.endpoint_base_url[self.endpoint_base_url.rindex( - '/')+1:] + self.base_domain = self.endpoint_base_url[: self.endpoint_base_url.rindex('/')] + self.deployment_name = self.endpoint_base_url[self.endpoint_base_url.rindex('/') + 1 :] self.timeout_seconds = timeout_seconds self._session = requests.Session() self._global_headers = { 'Authorization': f'Bearer {inference_key}', - 'Content-Type': 'application/json' + 'Content-Type': 'application/json', } def __enter__(self): @@ -174,9 +176,11 @@ def remove_global_header(self, key: str) -> None: def _build_url(self, path: str) -> str: """Construct the full URL by joining the base URL with the path.""" - return f"{self.endpoint_base_url}/{path.lstrip('/')}" + return f'{self.endpoint_base_url}/{path.lstrip("/")}' - def _build_request_headers(self, request_headers: Optional[Dict[str, str]] = None) -> Dict[str, str]: + def _build_request_headers( + self, request_headers: Optional[Dict[str, str]] = None + ) -> Dict[str, str]: """ Build the final headers by merging global headers with request-specific headers. @@ -211,20 +215,26 @@ def _make_request(self, method: str, path: str, **kwargs) -> requests.Response: response = self._session.request( method=method, url=self._build_url(path), - headers=self._build_request_headers( - kwargs.pop('headers', None)), + headers=self._build_request_headers(kwargs.pop('headers', None)), timeout=timeout, - **kwargs + **kwargs, ) response.raise_for_status() return response except requests.exceptions.Timeout: - raise InferenceClientError( - f"Request to {path} timed out after {timeout} seconds") + raise InferenceClientError(f'Request to {path} timed out after {timeout} seconds') except requests.exceptions.RequestException as e: - raise InferenceClientError(f"Request to {path} failed: {str(e)}") - - def run_sync(self, data: Dict[str, Any], path: str = "", timeout_seconds: int = 60 * 5, headers: Optional[Dict[str, str]] = None, http_method: str = "POST", stream: bool = False): + raise InferenceClientError(f'Request to {path} failed: {str(e)}') + + def run_sync( + self, + data: Dict[str, Any], + path: str = '', + timeout_seconds: int = 60 * 5, + headers: Optional[Dict[str, str]] = None, + http_method: str = 'POST', + stream: bool = False, + ): """Make a synchronous request to the inference endpoint. Args: @@ -242,16 +252,30 @@ def run_sync(self, data: Dict[str, Any], path: str = "", timeout_seconds: int = InferenceClientError: If the request fails """ response = self._make_request( - http_method, path, json=data, timeout_seconds=timeout_seconds, headers=headers, stream=stream) + http_method, + path, + json=data, + timeout_seconds=timeout_seconds, + headers=headers, + stream=stream, + ) return InferenceResponse( headers=response.headers, status_code=response.status_code, status_text=response.reason, - _original_response=response + _original_response=response, ) - def run(self, data: Dict[str, Any], path: str = "", timeout_seconds: int = 60 * 5, headers: Optional[Dict[str, str]] = None, http_method: str = "POST", no_response: bool = False): + def run( + self, + data: Dict[str, Any], + path: str = '', + timeout_seconds: int = 60 * 5, + headers: Optional[Dict[str, str]] = None, + http_method: str = 'POST', + no_response: bool = False, + ): """Make an asynchronous request to the inference endpoint. Args: @@ -275,44 +299,143 @@ def run(self, data: Dict[str, Any], path: str = "", timeout_seconds: int = 60 * # If no_response is True, use the "Prefer: respond-async-proxy" header to run async and don't wait for the response headers['Prefer'] = 'respond-async-proxy' self._make_request( - http_method, path, json=data, timeout_seconds=timeout_seconds, headers=headers) + http_method, + path, + json=data, + timeout_seconds=timeout_seconds, + headers=headers, + ) return # Add the "Prefer: respond-async" header to the request, to run async and wait for the response headers['Prefer'] = 'respond-async' response = self._make_request( - http_method, path, json=data, timeout_seconds=timeout_seconds, headers=headers) + http_method, + path, + json=data, + timeout_seconds=timeout_seconds, + headers=headers, + ) result = response.json() execution_id = result['Id'] return AsyncInferenceExecution(self, execution_id, AsyncStatus.Initialized) - def get(self, path: str, params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response: - return self._make_request('GET', path, params=params, headers=headers, timeout_seconds=timeout_seconds) + def get( + self, + path: str, + params: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, str]] = None, + timeout_seconds: Optional[int] = None, + ) -> requests.Response: + return self._make_request( + 'GET', path, params=params, headers=headers, timeout_seconds=timeout_seconds + ) - def post(self, path: str, json: Optional[Dict[str, Any]] = None, data: Optional[Union[str, Dict[str, Any]]] = None, - params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response: - return self._make_request('POST', path, json=json, data=data, params=params, headers=headers, timeout_seconds=timeout_seconds) + def post( + self, + path: str, + json: Optional[Dict[str, Any]] = None, + data: Optional[Union[str, Dict[str, Any]]] = None, + params: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, str]] = None, + timeout_seconds: Optional[int] = None, + ) -> requests.Response: + return self._make_request( + 'POST', + path, + json=json, + data=data, + params=params, + headers=headers, + timeout_seconds=timeout_seconds, + ) - def put(self, path: str, json: Optional[Dict[str, Any]] = None, data: Optional[Union[str, Dict[str, Any]]] = None, - params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response: - return self._make_request('PUT', path, json=json, data=data, params=params, headers=headers, timeout_seconds=timeout_seconds) + def put( + self, + path: str, + json: Optional[Dict[str, Any]] = None, + data: Optional[Union[str, Dict[str, Any]]] = None, + params: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, str]] = None, + timeout_seconds: Optional[int] = None, + ) -> requests.Response: + return self._make_request( + 'PUT', + path, + json=json, + data=data, + params=params, + headers=headers, + timeout_seconds=timeout_seconds, + ) - def delete(self, path: str, params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response: - return self._make_request('DELETE', path, params=params, headers=headers, timeout_seconds=timeout_seconds) + def delete( + self, + path: str, + params: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, str]] = None, + timeout_seconds: Optional[int] = None, + ) -> requests.Response: + return self._make_request( + 'DELETE', + path, + params=params, + headers=headers, + timeout_seconds=timeout_seconds, + ) - def patch(self, path: str, json: Optional[Dict[str, Any]] = None, data: Optional[Union[str, Dict[str, Any]]] = None, - params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response: - return self._make_request('PATCH', path, json=json, data=data, params=params, headers=headers, timeout_seconds=timeout_seconds) + def patch( + self, + path: str, + json: Optional[Dict[str, Any]] = None, + data: Optional[Union[str, Dict[str, Any]]] = None, + params: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, str]] = None, + timeout_seconds: Optional[int] = None, + ) -> requests.Response: + return self._make_request( + 'PATCH', + path, + json=json, + data=data, + params=params, + headers=headers, + timeout_seconds=timeout_seconds, + ) - def head(self, path: str, params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response: - return self._make_request('HEAD', path, params=params, headers=headers, timeout_seconds=timeout_seconds) + def head( + self, + path: str, + params: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, str]] = None, + timeout_seconds: Optional[int] = None, + ) -> requests.Response: + return self._make_request( + 'HEAD', + path, + params=params, + headers=headers, + timeout_seconds=timeout_seconds, + ) - def options(self, path: str, params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response: - return self._make_request('OPTIONS', path, params=params, headers=headers, timeout_seconds=timeout_seconds) + def options( + self, + path: str, + params: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, str]] = None, + timeout_seconds: Optional[int] = None, + ) -> requests.Response: + return self._make_request( + 'OPTIONS', + path, + params=params, + headers=headers, + timeout_seconds=timeout_seconds, + ) - def health(self, healthcheck_path: str = "/health") -> requests.Response: + def health(self, healthcheck_path: str = '/health') -> requests.Response: """ Check the health status of the API. @@ -325,7 +448,7 @@ def health(self, healthcheck_path: str = "/health") -> requests.Response: try: return self.get(healthcheck_path) except InferenceClientError as e: - raise InferenceClientError(f"Health check failed: {str(e)}") + raise InferenceClientError(f'Health check failed: {str(e)}') @dataclass_json(undefined=Undefined.EXCLUDE) @@ -351,9 +474,15 @@ def status_json(self) -> Dict[str, Any]: Returns: Dict[str, Any]: The status response containing the execution status and other metadata """ - url = f'{self._inference_client.base_domain}/status/{self._inference_client.deployment_name}' + url = ( + f'{self._inference_client.base_domain}/status/{self._inference_client.deployment_name}' + ) response = self._inference_client._session.get( - url, headers=self._inference_client._build_request_headers({self.INFERENCE_ID_HEADER: self.id})) + url, + headers=self._inference_client._build_request_headers( + {self.INFERENCE_ID_HEADER: self.id} + ), + ) response_json = response.json() self._status = AsyncStatus(response_json['Status']) @@ -366,9 +495,15 @@ def result(self) -> Dict[str, Any]: Returns: Dict[str, Any]: The results of the inference execution """ - url = f'{self._inference_client.base_domain}/result/{self._inference_client.deployment_name}' + url = ( + f'{self._inference_client.base_domain}/result/{self._inference_client.deployment_name}' + ) response = self._inference_client._session.get( - url, headers=self._inference_client._build_request_headers({self.INFERENCE_ID_HEADER: self.id})) + url, + headers=self._inference_client._build_request_headers( + {self.INFERENCE_ID_HEADER: self.id} + ), + ) if response.headers['Content-Type'] == 'application/json': return response.json() diff --git a/datacrunch/_version.py b/datacrunch/_version.py index 3c96109..b4b0c6b 100644 --- a/datacrunch/_version.py +++ b/datacrunch/_version.py @@ -1,5 +1,6 @@ try: from importlib.metadata import version + __version__ = version('datacrunch') except Exception: - __version__ = "0.0.0+dev" # fallback for development + __version__ = '0.0.0+dev' # fallback for development diff --git a/datacrunch/authentication/authentication.py b/datacrunch/authentication/authentication.py index 8b6a60e..6a5e3cb 100644 --- a/datacrunch/authentication/authentication.py +++ b/datacrunch/authentication/authentication.py @@ -34,13 +34,12 @@ def authenticate(self) -> dict: """ url = self._base_url + TOKEN_ENDPOINT payload = { - "grant_type": CLIENT_CREDENTIALS, - "client_id": self._client_id, - "client_secret": self._client_secret + 'grant_type': CLIENT_CREDENTIALS, + 'client_id': self._client_id, + 'client_secret': self._client_secret, } - response = requests.post( - url, json=payload, headers=self._generate_headers()) + response = requests.post(url, json=payload, headers=self._generate_headers()) handle_error(response) auth_data = response.json() @@ -71,13 +70,9 @@ def refresh(self) -> dict: """ url = self._base_url + TOKEN_ENDPOINT - payload = { - "grant_type": REFRESH_TOKEN, - "refresh_token": self._refresh_token - } + payload = {'grant_type': REFRESH_TOKEN, 'refresh_token': self._refresh_token} - response = requests.post( - url, json=payload, headers=self._generate_headers()) + response = requests.post(url, json=payload, headers=self._generate_headers()) # if refresh token is also expired, authenticate again: if response.status_code == 401 or response.status_code == 400: @@ -98,9 +93,7 @@ def refresh(self) -> dict: def _generate_headers(self): # get the first 10 chars of the client id client_id_truncated = self._client_id[:10] - headers = { - 'User-Agent': 'datacrunch-python-' + client_id_truncated - } + headers = {'User-Agent': 'datacrunch-python-' + client_id_truncated} return headers def is_expired(self) -> bool: diff --git a/datacrunch/balance/balance.py b/datacrunch/balance/balance.py index 6061bac..12aac25 100644 --- a/datacrunch/balance/balance.py +++ b/datacrunch/balance/balance.py @@ -1,4 +1,3 @@ - BALANCE_ENDPOINT = '/balance' @@ -48,4 +47,4 @@ def get(self) -> Balance: :rtype: Balance """ balance = self._http_client.get(BALANCE_ENDPOINT).json() - return Balance(balance["amount"], balance["currency"]) + return Balance(balance['amount'], balance['currency']) diff --git a/datacrunch/constants.py b/datacrunch/constants.py index 2de6667..fd9e791 100644 --- a/datacrunch/constants.py +++ b/datacrunch/constants.py @@ -36,12 +36,12 @@ def __init__(self): class VolumeStatus: - ORDERED = "ordered" - CREATING = "creating" - ATTACHED = "attached" - DETACHED = "detached" - DELETING = "deleting" - DELETED = "deleted" + ORDERED = 'ordered' + CREATING = 'creating' + ATTACHED = 'attached' + DETACHED = 'detached' + DELETING = 'deleting' + DELETED = 'deleted' CLONING = 'cloning' def __init__(self): @@ -49,29 +49,29 @@ def __init__(self): class VolumeTypes: - NVMe = "NVMe" - HDD = "HDD" + NVMe = 'NVMe' + HDD = 'HDD' def __init__(self): return class Locations: - FIN_01: str = "FIN-01" - ICE_01: str = "ICE-01" + FIN_01: str = 'FIN-01' + ICE_01: str = 'ICE-01' def __init__(self): return class ErrorCodes: - INVALID_REQUEST = "invalid_request" - UNAUTHORIZED_REQUEST = "unauthorized_request" - INSUFFICIENT_FUNDS = "insufficient_funds" - FORBIDDEN_ACTION = "forbidden_action" - NOT_FOUND = "not_found" - SERVER_ERROR = "server_error" - SERVICE_UNAVAILABLE = "service_unavailable" + INVALID_REQUEST = 'invalid_request' + UNAUTHORIZED_REQUEST = 'unauthorized_request' + INSUFFICIENT_FUNDS = 'insufficient_funds' + FORBIDDEN_ACTION = 'forbidden_action' + NOT_FOUND = 'not_found' + SERVER_ERROR = 'server_error' + SERVICE_UNAVAILABLE = 'service_unavailable' def __init__(self): return diff --git a/datacrunch/containers/containers.py b/datacrunch/containers/containers.py index d614041..edc1b22 100644 --- a/datacrunch/containers/containers.py +++ b/datacrunch/containers/containers.py @@ -26,47 +26,47 @@ class EnvVarType(str, Enum): """Types of environment variables that can be set in containers.""" - PLAIN = "plain" - SECRET = "secret" + PLAIN = 'plain' + SECRET = 'secret' class SecretType(str, Enum): """Types of secrets that can be set in containers.""" - GENERIC = "generic" # Regular secret, can be used in env vars - FILESET = "file-secret" # A file secret that can be mounted into the container + GENERIC = 'generic' # Regular secret, can be used in env vars + FILESET = 'file-secret' # A file secret that can be mounted into the container class VolumeMountType(str, Enum): """Types of volume mounts that can be configured for containers.""" - SCRATCH = "scratch" - SECRET = "secret" - MEMORY = "memory" - SHARED = "shared" + SCRATCH = 'scratch' + SECRET = 'secret' + MEMORY = 'memory' + SHARED = 'shared' class ContainerRegistryType(str, Enum): """Supported container registry types.""" - GCR = "gcr" - DOCKERHUB = "dockerhub" - GITHUB = "ghcr" - AWS_ECR = "aws-ecr" - CUSTOM = "custom" + GCR = 'gcr' + DOCKERHUB = 'dockerhub' + GITHUB = 'ghcr' + AWS_ECR = 'aws-ecr' + CUSTOM = 'custom' class ContainerDeploymentStatus(str, Enum): """Possible states of a container deployment.""" - INITIALIZING = "initializing" - HEALTHY = "healthy" - DEGRADED = "degraded" - UNHEALTHY = "unhealthy" - PAUSED = "paused" - QUOTA_REACHED = "quota_reached" - IMAGE_PULLING = "image_pulling" - VERSION_UPDATING = "version_updating" + INITIALIZING = 'initializing' + HEALTHY = 'healthy' + DEGRADED = 'degraded' + UNHEALTHY = 'unhealthy' + PAUSED = 'paused' + QUOTA_REACHED = 'quota_reached' + IMAGE_PULLING = 'image_pulling' + VERSION_UPDATING = 'version_updating' @dataclass_json @@ -137,8 +137,7 @@ class VolumeMount: @dataclass_json(undefined=Undefined.EXCLUDE) @dataclass class GeneralStorageMount(VolumeMount): - """General storage volume mount configuration. - """ + """General storage volume mount configuration.""" def __init__(self, mount_path: str): """Initialize a general scratch volume mount. @@ -369,7 +368,8 @@ class Deployment: containers: List[Container] compute: ComputeResource container_registry_settings: ContainerRegistrySettings = field( - default_factory=lambda: ContainerRegistrySettings(is_private=False)) + default_factory=lambda: ContainerRegistrySettings(is_private=False) + ) is_spot: bool = False endpoint_base_url: Optional[str] = None scaling: Optional[ScalingOptions] = None @@ -384,11 +384,10 @@ def __str__(self): str: A formatted string representation of the deployment. """ # Get all attributes except _inference_client - attrs = {k: v for k, v in self.__dict__.items() if k != - '_inference_client'} + attrs = {k: v for k, v in self.__dict__.items() if k != '_inference_client'} # Format each attribute - attr_strs = [f"{k}={repr(v)}" for k, v in attrs.items()] - return f"Deployment({', '.join(attr_strs)})" + attr_strs = [f'{k}={repr(v)}' for k, v in attrs.items()] + return f'Deployment({", ".join(attr_strs)})' def __repr__(self): """Returns a repr representation of the deployment, excluding sensitive information. @@ -399,7 +398,9 @@ def __repr__(self): return self.__str__() @classmethod - def from_dict_with_inference_key(cls, data: Dict[str, Any], inference_key: str = None) -> 'Deployment': + def from_dict_with_inference_key( + cls, data: Dict[str, Any], inference_key: str = None + ) -> 'Deployment': """Creates a Deployment instance from a dictionary with an inference key. Args: @@ -413,7 +414,7 @@ def from_dict_with_inference_key(cls, data: Dict[str, Any], inference_key: str = if inference_key and deployment.endpoint_base_url: deployment._inference_client = InferenceClient( inference_key=inference_key, - endpoint_base_url=deployment.endpoint_base_url + endpoint_base_url=deployment.endpoint_base_url, ) return deployment @@ -427,11 +428,9 @@ def set_inference_client(self, inference_key: str) -> None: ValueError: If endpoint_base_url is not set. """ if self.endpoint_base_url is None: - raise ValueError( - "Endpoint base URL must be set to use inference client") + raise ValueError('Endpoint base URL must be set to use inference client') self._inference_client = InferenceClient( - inference_key=inference_key, - endpoint_base_url=self.endpoint_base_url + inference_key=inference_key, endpoint_base_url=self.endpoint_base_url ) def _validate_inference_client(self) -> None: @@ -442,9 +441,18 @@ def _validate_inference_client(self) -> None: """ if self._inference_client is None: raise ValueError( - "Inference client not initialized. Use from_dict_with_inference_key or set_inference_client to initialize inference capabilities.") + 'Inference client not initialized. Use from_dict_with_inference_key or set_inference_client to initialize inference capabilities.' + ) - def run_sync(self, data: Dict[str, Any], path: str = "", timeout_seconds: int = 60 * 5, headers: Optional[Dict[str, str]] = None, http_method: str = "POST", stream: bool = False) -> InferenceResponse: + def run_sync( + self, + data: Dict[str, Any], + path: str = '', + timeout_seconds: int = 60 * 5, + headers: Optional[Dict[str, str]] = None, + http_method: str = 'POST', + stream: bool = False, + ) -> InferenceResponse: """Runs a synchronous inference request. Args: @@ -462,9 +470,19 @@ def run_sync(self, data: Dict[str, Any], path: str = "", timeout_seconds: int = ValueError: If the inference client is not initialized. """ self._validate_inference_client() - return self._inference_client.run_sync(data, path, timeout_seconds, headers, http_method, stream) + return self._inference_client.run_sync( + data, path, timeout_seconds, headers, http_method, stream + ) - def run(self, data: Dict[str, Any], path: str = "", timeout_seconds: int = 60 * 5, headers: Optional[Dict[str, str]] = None, http_method: str = "POST", stream: bool = False): + def run( + self, + data: Dict[str, Any], + path: str = '', + timeout_seconds: int = 60 * 5, + headers: Optional[Dict[str, str]] = None, + http_method: str = 'POST', + stream: bool = False, + ): """Runs an asynchronous inference request. Args: @@ -495,11 +513,16 @@ def health(self): """ self._validate_inference_client() # build healthcheck path - healthcheck_path = "/health" - if self.containers and self.containers[0].healthcheck and self.containers[0].healthcheck.path: + healthcheck_path = '/health' + if ( + self.containers + and self.containers[0].healthcheck + and self.containers[0].healthcheck.path + ): healthcheck_path = self.containers[0].healthcheck.path return self._inference_client.health(healthcheck_path) + # Function alias healthcheck = health @@ -655,7 +678,14 @@ class AWSECRCredentials(BaseRegistryCredentials): region: str ecr_repo: str - def __init__(self, name: str, access_key_id: str, secret_access_key: str, region: str, ecr_repo: str): + def __init__( + self, + name: str, + access_key_id: str, + secret_access_key: str, + region: str, + ecr_repo: str, + ): """Initializes AWS ECR credentials. Args: @@ -718,7 +748,10 @@ def get_deployments(self) -> List[Deployment]: List[Deployment]: List of all deployments. """ response = self.client.get(CONTAINER_DEPLOYMENTS_ENDPOINT) - return [Deployment.from_dict_with_inference_key(deployment, self._inference_key) for deployment in response.json()] + return [ + Deployment.from_dict_with_inference_key(deployment, self._inference_key) + for deployment in response.json() + ] def get_deployment_by_name(self, deployment_name: str) -> Deployment: """Retrieves a specific deployment by name. @@ -729,17 +762,13 @@ def get_deployment_by_name(self, deployment_name: str) -> Deployment: Returns: Deployment: The requested deployment. """ - response = self.client.get( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}") + response = self.client.get(f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}') return Deployment.from_dict_with_inference_key(response.json(), self._inference_key) # Function alias get_deployment = get_deployment_by_name - def create_deployment( - self, - deployment: Deployment - ) -> Deployment: + def create_deployment(self, deployment: Deployment) -> Deployment: """Creates a new container deployment. Args: @@ -748,10 +777,7 @@ def create_deployment( Returns: Deployment: The created deployment. """ - response = self.client.post( - CONTAINER_DEPLOYMENTS_ENDPOINT, - deployment.to_dict() - ) + response = self.client.post(CONTAINER_DEPLOYMENTS_ENDPOINT, deployment.to_dict()) return Deployment.from_dict_with_inference_key(response.json(), self._inference_key) def update_deployment(self, deployment_name: str, deployment: Deployment) -> Deployment: @@ -765,8 +791,7 @@ def update_deployment(self, deployment_name: str, deployment: Deployment) -> Dep Deployment: The updated deployment. """ response = self.client.patch( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}", - deployment.to_dict() + f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}', deployment.to_dict() ) return Deployment.from_dict_with_inference_key(response.json(), self._inference_key) @@ -776,8 +801,7 @@ def delete_deployment(self, deployment_name: str) -> None: Args: deployment_name: Name of the deployment to delete. """ - self.client.delete( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}") + self.client.delete(f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}') def get_deployment_status(self, deployment_name: str) -> ContainerDeploymentStatus: """Retrieves the current status of a deployment. @@ -788,9 +812,8 @@ def get_deployment_status(self, deployment_name: str) -> ContainerDeploymentStat Returns: ContainerDeploymentStatus: Current status of the deployment. """ - response = self.client.get( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/status") - return ContainerDeploymentStatus(response.json()["status"]) + response = self.client.get(f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/status') + return ContainerDeploymentStatus(response.json()['status']) def restart_deployment(self, deployment_name: str) -> None: """Restarts a deployment. @@ -798,8 +821,7 @@ def restart_deployment(self, deployment_name: str) -> None: Args: deployment_name: Name of the deployment to restart. """ - self.client.post( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/restart") + self.client.post(f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/restart') def get_deployment_scaling_options(self, deployment_name: str) -> ScalingOptions: """Retrieves the scaling options for a deployment. @@ -810,11 +832,12 @@ def get_deployment_scaling_options(self, deployment_name: str) -> ScalingOptions Returns: ScalingOptions: Current scaling options for the deployment. """ - response = self.client.get( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/scaling") + response = self.client.get(f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/scaling') return ScalingOptions.from_dict(response.json()) - def update_deployment_scaling_options(self, deployment_name: str, scaling_options: ScalingOptions) -> ScalingOptions: + def update_deployment_scaling_options( + self, deployment_name: str, scaling_options: ScalingOptions + ) -> ScalingOptions: """Updates the scaling options for a deployment. Args: @@ -825,8 +848,8 @@ def update_deployment_scaling_options(self, deployment_name: str, scaling_option ScalingOptions: Updated scaling options for the deployment. """ response = self.client.patch( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/scaling", - scaling_options.to_dict() + f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/scaling', + scaling_options.to_dict(), ) return ScalingOptions.from_dict(response.json()) @@ -839,9 +862,8 @@ def get_deployment_replicas(self, deployment_name: str) -> List[ReplicaInfo]: Returns: List[ReplicaInfo]: List of replica information. """ - response = self.client.get( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/replicas") - return [ReplicaInfo.from_dict(replica) for replica in response.json()["list"]] + response = self.client.get(f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/replicas') + return [ReplicaInfo.from_dict(replica) for replica in response.json()['list']] def purge_deployment_queue(self, deployment_name: str) -> None: """Purges the deployment queue. @@ -849,8 +871,7 @@ def purge_deployment_queue(self, deployment_name: str) -> None: Args: deployment_name: Name of the deployment. """ - self.client.post( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/purge-queue") + self.client.post(f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/purge-queue') def pause_deployment(self, deployment_name: str) -> None: """Pauses a deployment. @@ -858,8 +879,7 @@ def pause_deployment(self, deployment_name: str) -> None: Args: deployment_name: Name of the deployment to pause. """ - self.client.post( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/pause") + self.client.post(f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/pause') def resume_deployment(self, deployment_name: str) -> None: """Resumes a paused deployment. @@ -867,8 +887,7 @@ def resume_deployment(self, deployment_name: str) -> None: Args: deployment_name: Name of the deployment to resume. """ - self.client.post( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/resume") + self.client.post(f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/resume') def get_deployment_environment_variables(self, deployment_name: str) -> Dict[str, List[EnvVar]]: """Retrieves environment variables for a deployment. @@ -880,16 +899,18 @@ def get_deployment_environment_variables(self, deployment_name: str) -> Dict[str Dict[str, List[EnvVar]]: Dictionary mapping container names to their environment variables. """ response = self.client.get( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/environment-variables") + f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/environment-variables' + ) result = {} for item in response.json(): - container_name = item["container_name"] - env_vars = item["env"] - result[container_name] = [EnvVar.from_dict( - env_var) for env_var in env_vars] + container_name = item['container_name'] + env_vars = item['env'] + result[container_name] = [EnvVar.from_dict(env_var) for env_var in env_vars] return result - def add_deployment_environment_variables(self, deployment_name: str, container_name: str, env_vars: List[EnvVar]) -> Dict[str, List[EnvVar]]: + def add_deployment_environment_variables( + self, deployment_name: str, container_name: str, env_vars: List[EnvVar] + ) -> Dict[str, List[EnvVar]]: """Adds environment variables to a container in a deployment. Args: @@ -901,19 +922,22 @@ def add_deployment_environment_variables(self, deployment_name: str, container_n Dict[str, List[EnvVar]]: Updated environment variables for all containers. """ response = self.client.post( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/environment-variables", - {"container_name": container_name, "env": [ - env_var.to_dict() for env_var in env_vars]} + f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/environment-variables', + { + 'container_name': container_name, + 'env': [env_var.to_dict() for env_var in env_vars], + }, ) result = {} for item in response.json(): - container_name = item["container_name"] - env_vars = item["env"] - result[container_name] = [EnvVar.from_dict( - env_var) for env_var in env_vars] + container_name = item['container_name'] + env_vars = item['env'] + result[container_name] = [EnvVar.from_dict(env_var) for env_var in env_vars] return result - def update_deployment_environment_variables(self, deployment_name: str, container_name: str, env_vars: List[EnvVar]) -> Dict[str, List[EnvVar]]: + def update_deployment_environment_variables( + self, deployment_name: str, container_name: str, env_vars: List[EnvVar] + ) -> Dict[str, List[EnvVar]]: """Updates environment variables for a container in a deployment. Args: @@ -925,19 +949,22 @@ def update_deployment_environment_variables(self, deployment_name: str, containe Dict[str, List[EnvVar]]: Updated environment variables for all containers. """ response = self.client.patch( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/environment-variables", - {"container_name": container_name, "env": [ - env_var.to_dict() for env_var in env_vars]} + f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/environment-variables', + { + 'container_name': container_name, + 'env': [env_var.to_dict() for env_var in env_vars], + }, ) result = {} item = response.json() - container_name = item["container_name"] - env_vars = item["env"] - result[container_name] = [EnvVar.from_dict( - env_var) for env_var in env_vars] + container_name = item['container_name'] + env_vars = item['env'] + result[container_name] = [EnvVar.from_dict(env_var) for env_var in env_vars] return result - def delete_deployment_environment_variables(self, deployment_name: str, container_name: str, env_var_names: List[str]) -> Dict[str, List[EnvVar]]: + def delete_deployment_environment_variables( + self, deployment_name: str, container_name: str, env_var_names: List[str] + ) -> Dict[str, List[EnvVar]]: """Deletes environment variables from a container in a deployment. Args: @@ -949,18 +976,19 @@ def delete_deployment_environment_variables(self, deployment_name: str, containe Dict[str, List[EnvVar]]: Updated environment variables for all containers. """ response = self.client.delete( - f"{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/environment-variables", - {"container_name": container_name, "env": env_var_names} + f'{CONTAINER_DEPLOYMENTS_ENDPOINT}/{deployment_name}/environment-variables', + {'container_name': container_name, 'env': env_var_names}, ) result = {} for item in response.json(): - container_name = item["container_name"] - env_vars = item["env"] - result[container_name] = [EnvVar.from_dict( - env_var) for env_var in env_vars] + container_name = item['container_name'] + env_vars = item['env'] + result[container_name] = [EnvVar.from_dict(env_var) for env_var in env_vars] return result - def get_compute_resources(self, size: int = None, is_available: bool = None) -> List[ComputeResource]: + def get_compute_resources( + self, size: int = None, is_available: bool = None + ) -> List[ComputeResource]: """Retrieves compute resources, optionally filtered by size and availability. Args: @@ -979,8 +1007,7 @@ def get_compute_resources(self, size: int = None, is_available: bool = None) -> if size: resources = [r for r in resources if r.size == size] if is_available: - resources = [ - r for r in resources if r.is_available == is_available] + resources = [r for r in resources if r.is_available == is_available] return resources # Function alias @@ -1002,7 +1029,7 @@ def create_secret(self, name: str, value: str) -> None: name: Name of the secret. value: Value of the secret. """ - self.client.post(SECRETS_ENDPOINT, {"name": name, "value": value}) + self.client.post(SECRETS_ENDPOINT, {'name': name, 'value': value}) def delete_secret(self, secret_name: str, force: bool = False) -> None: """Deletes a secret. @@ -1012,7 +1039,8 @@ def delete_secret(self, secret_name: str, force: bool = False) -> None: force: Whether to force delete even if secret is in use. """ self.client.delete( - f"{SECRETS_ENDPOINT}/{secret_name}", params={"force": str(force).lower()}) + f'{SECRETS_ENDPOINT}/{secret_name}', params={'force': str(force).lower()} + ) def get_registry_credentials(self) -> List[RegistryCredential]: """Retrieves all registry credentials. @@ -1038,8 +1066,7 @@ def delete_registry_credentials(self, credentials_name: str) -> None: Args: credentials_name: Name of the credentials to delete. """ - self.client.delete( - f"{CONTAINER_REGISTRY_CREDENTIALS_ENDPOINT}/{credentials_name}") + self.client.delete(f'{CONTAINER_REGISTRY_CREDENTIALS_ENDPOINT}/{credentials_name}') def get_fileset_secrets(self) -> List[Secret]: """Retrieves all fileset secrets. @@ -1056,11 +1083,13 @@ def delete_fileset_secret(self, secret_name: str) -> None: Args: secret_name: Name of the secret to delete. """ - self.client.delete(f"{FILESET_SECRETS_ENDPOINT}/{secret_name}") + self.client.delete(f'{FILESET_SECRETS_ENDPOINT}/{secret_name}') - def create_fileset_secret_from_file_paths(self, secret_name: str, file_paths: List[str]) -> None: + def create_fileset_secret_from_file_paths( + self, secret_name: str, file_paths: List[str] + ) -> None: """Creates a new fileset secret. - A fileset secret is a secret that contains several files, + A fileset secret is a secret that contains several files, and can be used to mount a directory with the files in a container. Args: @@ -1069,13 +1098,12 @@ def create_fileset_secret_from_file_paths(self, secret_name: str, file_paths: Li """ processed_files = [] for file_path in file_paths: - with open(file_path, "rb") as f: - base64_content = base64.b64encode(f.read()).decode("utf-8") - processed_files.append({ - "file_name": os.path.basename(file_path), - "base64_content": base64_content - }) - self.client.post(FILESET_SECRETS_ENDPOINT, { - "name": secret_name, - "files": processed_files - }) + with open(file_path, 'rb') as f: + base64_content = base64.b64encode(f.read()).decode('utf-8') + processed_files.append( + { + 'file_name': os.path.basename(file_path), + 'base64_content': base64_content, + } + ) + self.client.post(FILESET_SECRETS_ENDPOINT, {'name': secret_name, 'files': processed_files}) diff --git a/datacrunch/datacrunch.py b/datacrunch/datacrunch.py index 9bcd76a..6989272 100644 --- a/datacrunch/datacrunch.py +++ b/datacrunch/datacrunch.py @@ -17,7 +17,13 @@ class DataCrunchClient: """Client for interacting with DataCrunch's public API""" - def __init__(self, client_id: str, client_secret: str, base_url: str = "https://api.datacrunch.io/v1", inference_key: str = None) -> None: + def __init__( + self, + client_id: str, + client_secret: str, + base_url: str = 'https://api.datacrunch.io/v1', + inference_key: str = None, + ) -> None: """The DataCrunch client :param client_id: client id @@ -32,8 +38,7 @@ def __init__(self, client_id: str, client_secret: str, base_url: str = "https:// # Validate that client_id and client_secret are not empty if not client_id or not client_secret: - raise ValueError( - "client_id and client_secret must be provided") + raise ValueError('client_id and client_secret must be provided') # Constants self.constants: Constants = Constants(base_url, __version__) @@ -41,9 +46,9 @@ def __init__(self, client_id: str, client_secret: str, base_url: str = "https:// # Services self._authentication: AuthenticationService = AuthenticationService( - client_id, client_secret, self.constants.base_url) - self._http_client: HTTPClient = HTTPClient( - self._authentication, self.constants.base_url) + client_id, client_secret, self.constants.base_url + ) + self._http_client: HTTPClient = HTTPClient(self._authentication, self.constants.base_url) self.balance: BalanceService = BalanceService(self._http_client) """Balance service. Get client balance""" @@ -51,8 +56,7 @@ def __init__(self, client_id: str, client_secret: str, base_url: str = "https:// self.images: ImagesService = ImagesService(self._http_client) """Image service""" - self.instance_types: InstanceTypesService = InstanceTypesService( - self._http_client) + self.instance_types: InstanceTypesService = InstanceTypesService(self._http_client) """Instance type service""" self.instances: InstancesService = InstancesService(self._http_client) @@ -61,21 +65,17 @@ def __init__(self, client_id: str, client_secret: str, base_url: str = "https:// self.ssh_keys: SSHKeysService = SSHKeysService(self._http_client) """SSH keys service""" - self.startup_scripts: StartupScriptsService = StartupScriptsService( - self._http_client) + self.startup_scripts: StartupScriptsService = StartupScriptsService(self._http_client) """Startup Scripts service""" - self.volume_types: VolumeTypesService = VolumeTypesService( - self._http_client) + self.volume_types: VolumeTypesService = VolumeTypesService(self._http_client) """Volume type service""" self.volumes: VolumesService = VolumesService(self._http_client) """Volume service. Create, attach, detach, get, rename, delete volumes""" - self.locations: LocationsService = LocationsService( - self._http_client) + self.locations: LocationsService = LocationsService(self._http_client) """Locations service. Get locations""" - self.containers: ContainersService = ContainersService( - self._http_client, inference_key) + self.containers: ContainersService = ContainersService(self._http_client, inference_key) """Containers service. Deploy, manage, and monitor container deployments""" diff --git a/datacrunch/helpers.py b/datacrunch/helpers.py index 82d78c3..a0641e8 100644 --- a/datacrunch/helpers.py +++ b/datacrunch/helpers.py @@ -1,6 +1,7 @@ from typing import Type import json + def stringify_class_object_properties(class_object: Type) -> str: """Generates a json string representation of a class object's properties and values @@ -9,5 +10,9 @@ def stringify_class_object_properties(class_object: Type) -> str: :return: _description_ :rtype: json string representation of a class object's properties and values """ - class_properties = {property: getattr(class_object, property, '') for property in class_object.__dir__() if property[:1] != '_' and type(getattr(class_object, property, '')).__name__ != 'method'} - return json.dumps(class_properties, indent=2) \ No newline at end of file + class_properties = { + property: getattr(class_object, property, '') + for property in class_object.__dir__() + if property[:1] != '_' and type(getattr(class_object, property, '')).__name__ != 'method' + } + return json.dumps(class_properties, indent=2) diff --git a/datacrunch/http_client/http_client.py b/datacrunch/http_client/http_client.py index a77d395..71aba53 100644 --- a/datacrunch/http_client/http_client.py +++ b/datacrunch/http_client/http_client.py @@ -56,8 +56,7 @@ def post(self, url: str, json: dict = None, params: dict = None, **kwargs) -> re url = self._add_base_url(url) headers = self._generate_headers() - response = requests.post( - url, json=json, headers=headers, params=params, **kwargs) + response = requests.post(url, json=json, headers=headers, params=params, **kwargs) handle_error(response) return response @@ -86,8 +85,7 @@ def put(self, url: str, json: dict = None, params: dict = None, **kwargs) -> req url = self._add_base_url(url) headers = self._generate_headers() - response = requests.put( - url, json=json, headers=headers, params=params, **kwargs) + response = requests.put(url, json=json, headers=headers, params=params, **kwargs) handle_error(response) return response @@ -119,7 +117,9 @@ def get(self, url: str, params: dict = None, **kwargs) -> requests.Response: return response - def patch(self, url: str, json: dict = None, params: dict = None, **kwargs) -> requests.Response: + def patch( + self, url: str, json: dict = None, params: dict = None, **kwargs + ) -> requests.Response: """Sends a PATCH request. A wrapper for the requests.patch method. @@ -143,13 +143,14 @@ def patch(self, url: str, json: dict = None, params: dict = None, **kwargs) -> r url = self._add_base_url(url) headers = self._generate_headers() - response = requests.patch( - url, json=json, headers=headers, params=params, **kwargs) + response = requests.patch(url, json=json, headers=headers, params=params, **kwargs) handle_error(response) return response - def delete(self, url: str, json: dict = None, params: dict = None, **kwargs) -> requests.Response: + def delete( + self, url: str, json: dict = None, params: dict = None, **kwargs + ) -> requests.Response: """Sends a DELETE request. A wrapper for the requests.delete method. @@ -173,8 +174,7 @@ def delete(self, url: str, json: dict = None, params: dict = None, **kwargs) -> url = self._add_base_url(url) headers = self._generate_headers() - response = requests.delete( - url, headers=headers, json=json, params=params, **kwargs) + response = requests.delete(url, headers=headers, json=json, params=params, **kwargs) handle_error(response) return response @@ -186,7 +186,7 @@ def _refresh_token_if_expired(self) -> None: :raises APIException: an api exception with message and error type code """ - if (self._auth_service.is_expired()): + if self._auth_service.is_expired(): # try to refresh. if refresh token has expired, reauthenticate try: self._auth_service.refresh() @@ -202,7 +202,7 @@ def _generate_headers(self) -> dict: headers = { 'Authorization': self._generate_bearer_header(), 'User-Agent': self._generate_user_agent(), - 'Content-Type': 'application/json' + 'Content-Type': 'application/json', } return headers diff --git a/datacrunch/images/images.py b/datacrunch/images/images.py index d6382f3..6e85ee8 100644 --- a/datacrunch/images/images.py +++ b/datacrunch/images/images.py @@ -76,12 +76,18 @@ def __init__(self, http_client) -> None: self._http_client = http_client def get(self) -> List[Image]: - """Get the available instance images + """Get the available instance images :return: list of images objects :rtype: List[Image] """ images = self._http_client.get(IMAGES_ENDPOINT).json() - image_objects = list(map(lambda image: Image( - image['id'], image['name'], image['image_type'], image['details']), images)) + image_objects = list( + map( + lambda image: Image( + image['id'], image['name'], image['image_type'], image['details'] + ), + images, + ) + ) return image_objects diff --git a/datacrunch/instance_types/instance_types.py b/datacrunch/instance_types/instance_types.py index ed1fbdb..447adbd 100644 --- a/datacrunch/instance_types/instance_types.py +++ b/datacrunch/instance_types/instance_types.py @@ -4,18 +4,19 @@ class InstanceType: - - def __init__(self, - id: str, - instance_type: str, - price_per_hour: float, - spot_price_per_hour: float, - description: str, - cpu: dict, - gpu: dict, - memory: dict, - gpu_memory: dict, - storage: dict) -> None: + def __init__( + self, + id: str, + instance_type: str, + price_per_hour: float, + spot_price_per_hour: float, + description: str, + cpu: dict, + gpu: dict, + memory: dict, + gpu_memory: dict, + storage: dict, + ) -> None: """Initialize an instance type object :param id: instance type id @@ -146,17 +147,18 @@ def __str__(self) -> str: :return: instance type string representation :rtype: str """ - return (f'id: {self._id}\n' - f'instance type: {self._instance_type}\n' - f'price_per_hour: ${self._price_per_hour}\n' - f'spot_price_per_hour: ${self._spot_price_per_hour}\n' - f'description: {self._description}\n' - f'cpu: {self._cpu}\n' - f'gpu: {self._gpu}\n' - f'memory :{self._memory}\n' - f'gpu_memory :{self._gpu_memory}\n' - f'storage :{self._storage}\n' - ) + return ( + f'id: {self._id}\n' + f'instance type: {self._instance_type}\n' + f'price_per_hour: ${self._price_per_hour}\n' + f'spot_price_per_hour: ${self._spot_price_per_hour}\n' + f'description: {self._description}\n' + f'cpu: {self._cpu}\n' + f'gpu: {self._gpu}\n' + f'memory :{self._memory}\n' + f'gpu_memory :{self._gpu_memory}\n' + f'storage :{self._storage}\n' + ) class InstanceTypesService: @@ -172,17 +174,22 @@ def get(self) -> List[InstanceType]: :rtype: List[InstanceType] """ instance_types = self._http_client.get(INSTANCE_TYPES_ENDPOINT).json() - instance_type_objects = list(map(lambda instance_type: InstanceType( - id=instance_type['id'], - instance_type=instance_type['instance_type'], - price_per_hour=instance_type['price_per_hour'], - spot_price_per_hour=instance_type['spot_price'], - description=instance_type['description'], - cpu=instance_type['cpu'], - gpu=instance_type['gpu'], - memory=instance_type['memory'], - gpu_memory=instance_type['gpu_memory'], - storage=instance_type['storage'] - ), instance_types)) + instance_type_objects = list( + map( + lambda instance_type: InstanceType( + id=instance_type['id'], + instance_type=instance_type['instance_type'], + price_per_hour=instance_type['price_per_hour'], + spot_price_per_hour=instance_type['spot_price'], + description=instance_type['description'], + cpu=instance_type['cpu'], + gpu=instance_type['gpu'], + memory=instance_type['memory'], + gpu_memory=instance_type['gpu_memory'], + storage=instance_type['storage'], + ), + instance_types, + ) + ) return instance_type_objects diff --git a/datacrunch/instances/instances.py b/datacrunch/instances/instances.py index 1cb8c2c..6f561a6 100644 --- a/datacrunch/instances/instances.py +++ b/datacrunch/instances/instances.py @@ -90,9 +90,11 @@ def get(self, status: Optional[str] = None) -> List[Instance]: Returns: List of instance objects matching the criteria. """ - instances_dict = self._http_client.get( - INSTANCES_ENDPOINT, params={'status': status}).json() - return [Instance.from_dict(instance_dict, infer_missing=True) for instance_dict in instances_dict] + instances_dict = self._http_client.get(INSTANCES_ENDPOINT, params={'status': status}).json() + return [ + Instance.from_dict(instance_dict, infer_missing=True) + for instance_dict in instances_dict + ] def get_by_id(self, id: str) -> Instance: """Retrieves a specific instance by its ID. @@ -106,30 +108,31 @@ def get_by_id(self, id: str) -> Instance: Raises: HTTPError: If the instance is not found or other API error occurs. """ - instance_dict = self._http_client.get( - INSTANCES_ENDPOINT + f'/{id}').json() + instance_dict = self._http_client.get(INSTANCES_ENDPOINT + f'/{id}').json() return Instance.from_dict(instance_dict, infer_missing=True) - def create(self, - instance_type: str, - image: str, - hostname: str, - description: str, - ssh_key_ids: list = [], - location: str = Locations.FIN_01, - startup_script_id: Optional[str] = None, - volumes: Optional[List[Dict]] = None, - existing_volumes: Optional[List[str]] = None, - os_volume: Optional[Dict] = None, - is_spot: bool = False, - contract: Optional[Contract] = None, - pricing: Optional[Pricing] = None, - coupon: Optional[str] = None, - *, - max_wait_time: float = 180, - initial_interval: float = 0.5, - max_interval: float = 5, - backoff_coefficient: float = 2.0) -> Instance: + def create( + self, + instance_type: str, + image: str, + hostname: str, + description: str, + ssh_key_ids: list = [], + location: str = Locations.FIN_01, + startup_script_id: Optional[str] = None, + volumes: Optional[List[Dict]] = None, + existing_volumes: Optional[List[str]] = None, + os_volume: Optional[Dict] = None, + is_spot: bool = False, + contract: Optional[Contract] = None, + pricing: Optional[Pricing] = None, + coupon: Optional[str] = None, + *, + max_wait_time: float = 180, + initial_interval: float = 0.5, + max_interval: float = 5, + backoff_coefficient: float = 2.0, + ) -> Instance: """Creates and deploys a new cloud instance. Args: @@ -159,18 +162,18 @@ def create(self, HTTPError: If instance creation fails or other API error occurs. """ payload = { - "instance_type": instance_type, - "image": image, - "ssh_key_ids": ssh_key_ids, - "startup_script_id": startup_script_id, - "hostname": hostname, - "description": description, - "location_code": location, - "os_volume": os_volume, - "volumes": volumes, - "existing_volumes": existing_volumes, - "is_spot": is_spot, - "coupon": coupon, + 'instance_type': instance_type, + 'image': image, + 'ssh_key_ids': ssh_key_ids, + 'startup_script_id': startup_script_id, + 'hostname': hostname, + 'description': description, + 'location_code': location, + 'os_volume': os_volume, + 'volumes': volumes, + 'existing_volumes': existing_volumes, + 'is_spot': is_spot, + 'coupon': coupon, } if contract: payload['contract'] = contract @@ -188,12 +191,18 @@ def create(self, now = time.monotonic() if now >= deadline: raise TimeoutError( - f"Instance {id} did not enter provisioning state within {max_wait_time:.1f} seconds") + f'Instance {id} did not enter provisioning state within {max_wait_time:.1f} seconds' + ) - interval = min(initial_interval * backoff_coefficient ** i, max_interval, deadline - now) + interval = min(initial_interval * backoff_coefficient**i, max_interval, deadline - now) time.sleep(interval) - def action(self, id_list: Union[List[str], str], action: str, volume_ids: Optional[List[str]] = None) -> None: + def action( + self, + id_list: Union[List[str], str], + action: str, + volume_ids: Optional[List[str]] = None, + ) -> None: """Performs an action on one or more instances. Args: @@ -207,16 +216,17 @@ def action(self, id_list: Union[List[str], str], action: str, volume_ids: Option if type(id_list) is str: id_list = [id_list] - payload = { - "id": id_list, - "action": action, - "volume_ids": volume_ids - } + payload = {'id': id_list, 'action': action, 'volume_ids': volume_ids} self._http_client.put(INSTANCES_ENDPOINT, json=payload) return - def is_available(self, instance_type: str, is_spot: bool = False, location_code: Optional[str] = None) -> bool: + def is_available( + self, + instance_type: str, + is_spot: bool = False, + location_code: Optional[str] = None, + ) -> bool: """Checks if a specific instance type is available for deployment. Args: @@ -232,7 +242,9 @@ def is_available(self, instance_type: str, is_spot: bool = False, location_code: url = f'/instance-availability/{instance_type}' return self._http_client.get(url, query_params).json() - def get_availabilities(self, is_spot: Optional[bool] = None, location_code: Optional[str] = None) -> List[Dict]: + def get_availabilities( + self, is_spot: Optional[bool] = None, location_code: Optional[str] = None + ) -> List[Dict]: """Retrieves a list of available instance types across locations. Args: diff --git a/datacrunch/locations/locations.py b/datacrunch/locations/locations.py index fae883d..f980016 100644 --- a/datacrunch/locations/locations.py +++ b/datacrunch/locations/locations.py @@ -10,7 +10,6 @@ def __init__(self, http_client) -> None: self._http_client = http_client def get(self) -> List[dict]: - """Get all locations - """ + """Get all locations""" locations = self._http_client.get(LOCATIONS_ENDPOINT).json() return locations diff --git a/datacrunch/ssh_keys/ssh_keys.py b/datacrunch/ssh_keys/ssh_keys.py index 8c45b18..c009786 100644 --- a/datacrunch/ssh_keys/ssh_keys.py +++ b/datacrunch/ssh_keys/ssh_keys.py @@ -61,8 +61,7 @@ def get(self) -> List[SSHKey]: :rtype: List[SSHKey] """ keys = self._http_client.get(SSHKEYS_ENDPOINT).json() - keys_object_list = list(map(lambda key: SSHKey( - key['id'], key['name'], key['key']), keys)) + keys_object_list = list(map(lambda key: SSHKey(key['id'], key['name'], key['key']), keys)) return keys_object_list @@ -84,7 +83,7 @@ def delete(self, id_list: List[str]) -> None: :param id_list: list of SSH keys ids :type id_list: List[str] """ - payload = {"keys": id_list} + payload = {'keys': id_list} self._http_client.delete(SSHKEYS_ENDPOINT, json=payload) return @@ -107,6 +106,6 @@ def create(self, name: str, key: str) -> SSHKey: :return: new SSH key object :rtype: SSHKey """ - payload = {"name": name, "key": key} + payload = {'name': name, 'key': key} id = self._http_client.post(SSHKEYS_ENDPOINT, json=payload).text return SSHKey(id, name, key) diff --git a/datacrunch/startup_scripts/startup_scripts.py b/datacrunch/startup_scripts/startup_scripts.py index c1e8f33..7f9a30e 100644 --- a/datacrunch/startup_scripts/startup_scripts.py +++ b/datacrunch/startup_scripts/startup_scripts.py @@ -61,8 +61,12 @@ def get(self) -> List[StartupScript]: :rtype: List[StartupScript] """ scripts = self._http_client.get(STARTUP_SCRIPTS_ENDPOINT).json() - scripts_objects = list(map(lambda script: StartupScript( - script['id'], script['name'], script['script']), scripts)) + scripts_objects = list( + map( + lambda script: StartupScript(script['id'], script['name'], script['script']), + scripts, + ) + ) return scripts_objects def get_by_id(self, id) -> StartupScript: @@ -73,8 +77,7 @@ def get_by_id(self, id) -> StartupScript: :return: startup script object :rtype: StartupScript """ - script = self._http_client.get( - STARTUP_SCRIPTS_ENDPOINT + f'/{id}').json()[0] + script = self._http_client.get(STARTUP_SCRIPTS_ENDPOINT + f'/{id}').json()[0] return StartupScript(script['id'], script['name'], script['script']) @@ -84,7 +87,7 @@ def delete(self, id_list: List[str]) -> None: :param id_list: list of startup scripts ids :type id_list: List[str] """ - payload = {"scripts": id_list} + payload = {'scripts': id_list} self._http_client.delete(STARTUP_SCRIPTS_ENDPOINT, json=payload) return @@ -107,7 +110,6 @@ def create(self, name: str, script: str) -> StartupScript: :return: the new startup script's id :rtype: str """ - payload = {"name": name, "script": script} - id = self._http_client.post( - STARTUP_SCRIPTS_ENDPOINT, json=payload).text + payload = {'name': name, 'script': script} + id = self._http_client.post(STARTUP_SCRIPTS_ENDPOINT, json=payload).text return StartupScript(id, name, script) diff --git a/datacrunch/volume_types/volume_types.py b/datacrunch/volume_types/volume_types.py index 596f1e9..4f33aac 100644 --- a/datacrunch/volume_types/volume_types.py +++ b/datacrunch/volume_types/volume_types.py @@ -4,10 +4,7 @@ class VolumeType: - - def __init__(self, - type: str, - price_per_month_per_gb: float) -> None: + def __init__(self, type: str, price_per_month_per_gb: float) -> None: """Initialize a volume type object :param type: volume type name @@ -58,9 +55,14 @@ def get(self) -> List[VolumeType]: :rtype: List[VolumesType] """ volume_types = self._http_client.get(VOLUME_TYPES_ENDPOINT).json() - volume_type_objects = list(map(lambda volume_type: VolumeType( - type=volume_type['type'], - price_per_month_per_gb=volume_type['price']['price_per_month_per_gb'], - ), volume_types)) + volume_type_objects = list( + map( + lambda volume_type: VolumeType( + type=volume_type['type'], + price_per_month_per_gb=volume_type['price']['price_per_month_per_gb'], + ), + volume_types, + ) + ) return volume_type_objects diff --git a/datacrunch/volumes/volumes.py b/datacrunch/volumes/volumes.py index 60ce1ac..8e433c5 100644 --- a/datacrunch/volumes/volumes.py +++ b/datacrunch/volumes/volumes.py @@ -8,20 +8,21 @@ class Volume: """A volume model class""" - def __init__(self, - id: str, - status: str, - name: str, - size: int, - type: str, - is_os_volume: bool, - created_at: str, - target: str = None, - location: str = Locations.FIN_01, - instance_id: str = None, - ssh_key_ids: List[str] = [], - deleted_at: str = None, - ) -> None: + def __init__( + self, + id: str, + status: str, + name: str, + size: int, + type: str, + is_os_volume: bool, + created_at: str, + target: str = None, + location: str = Locations.FIN_01, + instance_id: str = None, + ssh_key_ids: List[str] = [], + deleted_at: str = None, + ) -> None: """Initialize the volume object :param id: volume id @@ -181,18 +182,18 @@ def create_from_dict(cls: 'Volume', volume_dict: dict) -> 'Volume': """ return cls( - id = volume_dict['id'], - status = volume_dict['status'], - name = volume_dict['name'], - size = volume_dict['size'], - type = volume_dict['type'], - is_os_volume = volume_dict['is_os_volume'], - created_at = volume_dict['created_at'], - target = volume_dict['target'], - location = volume_dict['location'], - instance_id = volume_dict['instance_id'], - ssh_key_ids = volume_dict['ssh_key_ids'], - deleted_at = volume_dict.get('deleted_at'), + id=volume_dict['id'], + status=volume_dict['status'], + name=volume_dict['name'], + size=volume_dict['size'], + type=volume_dict['type'], + is_os_volume=volume_dict['is_os_volume'], + created_at=volume_dict['created_at'], + target=volume_dict['target'], + location=volume_dict['location'], + instance_id=volume_dict['instance_id'], + ssh_key_ids=volume_dict['ssh_key_ids'], + deleted_at=volume_dict.get('deleted_at'), ) def __str__(self) -> str: @@ -218,8 +219,7 @@ def get(self, status: str = None) -> List[Volume]: :return: list of volume details objects :rtype: List[Volume] """ - volumes_dict = self._http_client.get( - VOLUMES_ENDPOINT, params={'status': status}).json() + volumes_dict = self._http_client.get(VOLUMES_ENDPOINT, params={'status': status}).json() return list(map(Volume.create_from_dict, volumes_dict)) def get_by_id(self, id: str) -> Volume: @@ -230,8 +230,7 @@ def get_by_id(self, id: str) -> Volume: :return: Volume details object :rtype: Volume """ - volume_dict = self._http_client.get( - VOLUMES_ENDPOINT + f'/{id}').json() + volume_dict = self._http_client.get(VOLUMES_ENDPOINT + f'/{id}').json() return Volume.create_from_dict(volume_dict) @@ -241,19 +240,18 @@ def get_in_trash(self) -> List[Volume]: :return: list of volume details objects :rtype: List[Volume] """ - volumes_dicts = self._http_client.get( - VOLUMES_ENDPOINT + '/trash' - ).json() + volumes_dicts = self._http_client.get(VOLUMES_ENDPOINT + '/trash').json() return list(map(Volume.create_from_dict, volumes_dicts)) - def create(self, - type: str, - name: str, - size: int, - instance_id: str = None, - location: str = Locations.FIN_01, - ) -> Volume: + def create( + self, + type: str, + name: str, + size: int, + instance_id: str = None, + location: str = Locations.FIN_01, + ) -> Volume: """Create new volume :param type: volume type @@ -270,11 +268,11 @@ def create(self, :rtype: Volume """ payload = { - "type": type, - "name": name, - "size": size, - "instance_id": instance_id, - "location_code": location + 'type': type, + 'name': name, + 'size': size, + 'instance_id': instance_id, + 'location_code': location, } id = self._http_client.post(VOLUMES_ENDPOINT, json=payload).text volume = self.get_by_id(id) @@ -290,9 +288,9 @@ def attach(self, id_list: Union[List[str], str], instance_id: str) -> None: :type instance_id: str """ payload = { - "id": id_list, - "action": VolumeActions.ATTACH, - "instance_id": instance_id + 'id': id_list, + 'action': VolumeActions.ATTACH, + 'instance_id': instance_id, } self._http_client.put(VOLUMES_ENDPOINT, json=payload) @@ -306,8 +304,8 @@ def detach(self, id_list: Union[List[str], str]) -> None: :type id_list: Union[List[str], str] """ payload = { - "id": id_list, - "action": VolumeActions.DETACH, + 'id': id_list, + 'action': VolumeActions.DETACH, } self._http_client.put(VOLUMES_ENDPOINT, json=payload) @@ -325,20 +323,13 @@ def clone(self, id: str, name: str = None, type: str = None) -> Volume: :return: the new volume object, or a list of volume objects if cloned mutliple volumes :rtype: Volume or List[Volume] """ - payload = { - "id": id, - "action": VolumeActions.CLONE, - "name": name, - "type": type - } + payload = {'id': id, 'action': VolumeActions.CLONE, 'name': name, 'type': type} # clone volume(s) - volume_ids_array = self._http_client.put( - VOLUMES_ENDPOINT, json=payload).json() + volume_ids_array = self._http_client.put(VOLUMES_ENDPOINT, json=payload).json() # map the IDs into Volume objects - volumes_array = list( - map(lambda volume_id: self.get_by_id(volume_id), volume_ids_array)) + volumes_array = list(map(lambda volume_id: self.get_by_id(volume_id), volume_ids_array)) # if the array has only one element, return that element if len(volumes_array) == 1: @@ -355,11 +346,7 @@ def rename(self, id_list: Union[List[str], str], name: str) -> None: :param name: new name :type name: str """ - payload = { - "id": id_list, - "action": VolumeActions.RENAME, - "name": name - } + payload = {'id': id_list, 'action': VolumeActions.RENAME, 'name': name} self._http_client.put(VOLUMES_ENDPOINT, json=payload) return @@ -373,9 +360,9 @@ def increase_size(self, id_list: Union[List[str], str], size: int) -> None: :type size: int """ payload = { - "id": id_list, - "action": VolumeActions.INCREASE_SIZE, - "size": size, + 'id': id_list, + 'action': VolumeActions.INCREASE_SIZE, + 'size': size, } self._http_client.put(VOLUMES_ENDPOINT, json=payload) @@ -389,9 +376,9 @@ def delete(self, id_list: Union[List[str], str], is_permanent: bool = False) -> :type id_list: Union[List[str], str] """ payload = { - "id": id_list, - "action": VolumeActions.DELETE, - "is_permanent": is_permanent + 'id': id_list, + 'action': VolumeActions.DELETE, + 'is_permanent': is_permanent, } self._http_client.put(VOLUMES_ENDPOINT, json=payload) diff --git a/docs/source/conf.py b/docs/source/conf.py index ebd4029..3338181 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -17,6 +17,7 @@ from datacrunch import __version__ import os import sys + sys.path.insert(0, os.path.abspath('../../')) # -- Project information ----------------------------------------------------- @@ -125,15 +126,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -143,8 +141,13 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'DataCrunch-Python-SDK.tex', 'DataCrunch-Python-SDK Documentation', - 'DataCrunch.io', 'manual'), + ( + master_doc, + 'DataCrunch-Python-SDK.tex', + 'DataCrunch-Python-SDK Documentation', + 'DataCrunch.io', + 'manual', + ), ] @@ -153,8 +156,13 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'datacrunch-python-sdk', 'DataCrunch-Python-SDK Documentation', - [author], 1) + ( + master_doc, + 'datacrunch-python-sdk', + 'DataCrunch-Python-SDK Documentation', + [author], + 1, + ) ] @@ -164,9 +172,15 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'DataCrunch-Python-SDK', 'DataCrunch-Python-SDK Documentation', - author, 'DataCrunch-Python-SDK', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + 'DataCrunch-Python-SDK', + 'DataCrunch-Python-SDK Documentation', + author, + 'DataCrunch-Python-SDK', + 'One line description of project.', + 'Miscellaneous', + ), ] diff --git a/examples/advanced_create_instance.py b/examples/advanced_create_instance.py index b7a3af0..9cf6a0c 100644 --- a/examples/advanced_create_instance.py +++ b/examples/advanced_create_instance.py @@ -26,11 +26,12 @@ try: # Create datcrunch client - datacrunch = DataCrunchClient( - DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) + datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Create new SSH key - public_key = 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI0qq2Qjt5GPi7DKdcnBHOkvk8xNsG9dA607tnWagOkHC test_key' + public_key = ( + 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI0qq2Qjt5GPi7DKdcnBHOkvk8xNsG9dA607tnWagOkHC test_key' + ) ssh_key = datacrunch.ssh_keys.create('my test key', public_key) # Get all SSH keys @@ -51,21 +52,22 @@ if price_per_hour * DURATION < balance.amount: # Deploy a new 8V instance - instance = datacrunch.instances.create(instance_type=INSTANCE_TYPE_8V, - image='ubuntu-22.04-cuda-12.0-docker', - ssh_key_ids=ssh_keys_ids, - hostname='example', - description='large instance', - os_volume={ - "name": "Large OS volume", - "size": 95 - }) + instance = datacrunch.instances.create( + instance_type=INSTANCE_TYPE_8V, + image='ubuntu-22.04-cuda-12.0-docker', + ssh_key_ids=ssh_keys_ids, + hostname='example', + description='large instance', + os_volume={'name': 'Large OS volume', 'size': 95}, + ) else: # Deploy a new 4V instance - instance = datacrunch.instances.create(instance_type=INSTANCE_TYPE_4V, - image='ubuntu-22.04-cuda-12.0-docker', - ssh_key_ids=ssh_keys_ids, - hostname='example', - description='medium instance') + instance = datacrunch.instances.create( + instance_type=INSTANCE_TYPE_4V, + image='ubuntu-22.04-cuda-12.0-docker', + ssh_key_ids=ssh_keys_ids, + hostname='example', + description='medium instance', + ) except APIException as exception: print(exception) diff --git a/examples/containers/calling_the_endpoint_asynchronously.py b/examples/containers/calling_the_endpoint_asynchronously.py index 27018f6..fd98dbe 100644 --- a/examples/containers/calling_the_endpoint_asynchronously.py +++ b/examples/containers/calling_the_endpoint_asynchronously.py @@ -13,7 +13,10 @@ # DataCrunch client instance datacrunch = DataCrunchClient( - DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET, inference_key=DATACRUNCH_INFERENCE_KEY) + DATACRUNCH_CLIENT_ID, + DATACRUNCH_CLIENT_SECRET, + inference_key=DATACRUNCH_INFERENCE_KEY, +) # Get the deployment deployment = datacrunch.containers.get_deployment_by_name(DEPLOYMENT_NAME) @@ -21,16 +24,14 @@ # Make an asynchronous request to the endpoint. # This example demonstrates calling a SGLang deployment which serves LLMs using an OpenAI-compatible API format data = { - "model": "deepseek-ai/deepseek-llm-7b-chat", - "prompt": "Is consciousness fundamentally computational, or is there something more to subjective experience that cannot be reduced to information processing?", - "max_tokens": 128, - "temperature": 0.7, - "top_p": 0.9 + 'model': 'deepseek-ai/deepseek-llm-7b-chat', + 'prompt': 'Is consciousness fundamentally computational, or is there something more to subjective experience that cannot be reduced to information processing?', + 'max_tokens': 128, + 'temperature': 0.7, + 'top_p': 0.9, } -header = { - "Content-Type": "application/json" -} +header = {'Content-Type': 'application/json'} response = deployment.run( data=data, diff --git a/examples/containers/calling_the_endpoint_synchronously.py b/examples/containers/calling_the_endpoint_synchronously.py index 72ea5ff..407edd3 100644 --- a/examples/containers/calling_the_endpoint_synchronously.py +++ b/examples/containers/calling_the_endpoint_synchronously.py @@ -11,7 +11,10 @@ # DataCrunch client instance datacrunch = DataCrunchClient( - DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET, inference_key=DATACRUNCH_INFERENCE_KEY) + DATACRUNCH_CLIENT_ID, + DATACRUNCH_CLIENT_SECRET, + inference_key=DATACRUNCH_INFERENCE_KEY, +) # Get the deployment deployment = datacrunch.containers.get_deployment_by_name(DEPLOYMENT_NAME) @@ -19,16 +22,13 @@ # Make a synchronous request to the endpoint. # This example demonstrates calling a SGLang deployment which serves LLMs using an OpenAI-compatible API format data = { - "model": "deepseek-ai/deepseek-llm-7b-chat", - "prompt": "Is consciousness fundamentally computational, or is there something more to subjective experience that cannot be reduced to information processing?", - "max_tokens": 128, - "temperature": 0.7, - "top_p": 0.9 + 'model': 'deepseek-ai/deepseek-llm-7b-chat', + 'prompt': 'Is consciousness fundamentally computational, or is there something more to subjective experience that cannot be reduced to information processing?', + 'max_tokens': 128, + 'temperature': 0.7, + 'top_p': 0.9, } -response = deployment.run_sync( - data=data, - path='v1/completions' -) # wait for the response +response = deployment.run_sync(data=data, path='v1/completions') # wait for the response # Print the response print(response.output()) diff --git a/examples/containers/calling_the_endpoint_with_inference_key.py b/examples/containers/calling_the_endpoint_with_inference_key.py index da7e7e2..7d51032 100644 --- a/examples/containers/calling_the_endpoint_with_inference_key.py +++ b/examples/containers/calling_the_endpoint_with_inference_key.py @@ -8,17 +8,17 @@ # Create an inference client that uses only the inference key, without client credentials inference_client = InferenceClient( inference_key=DATACRUNCH_INFERENCE_KEY, - endpoint_base_url=DATACRUNCH_ENDPOINT_BASE_URL + endpoint_base_url=DATACRUNCH_ENDPOINT_BASE_URL, ) # Make a synchronous request to the endpoint. # This example demonstrates calling a SGLang deployment which serves LLMs using an OpenAI-compatible API format data = { - "model": "deepseek-ai/deepseek-llm-7b-chat", - "prompt": "Is consciousness fundamentally computational, or is there something more to subjective experience that cannot be reduced to information processing?", - "max_tokens": 128, - "temperature": 0.7, - "top_p": 0.9 + 'model': 'deepseek-ai/deepseek-llm-7b-chat', + 'prompt': 'Is consciousness fundamentally computational, or is there something more to subjective experience that cannot be reduced to information processing?', + 'max_tokens': 128, + 'temperature': 0.7, + 'top_p': 0.9, } response = inference_client.run_sync(data=data, path='v1/completions') diff --git a/examples/containers/calling_the_endpoint_with_inference_key_async.py b/examples/containers/calling_the_endpoint_with_inference_key_async.py index e14f865..85234b9 100644 --- a/examples/containers/calling_the_endpoint_with_inference_key_async.py +++ b/examples/containers/calling_the_endpoint_with_inference_key_async.py @@ -10,22 +10,21 @@ # Create an inference client that uses only the inference key, without client credentials inference_client = InferenceClient( inference_key=DATACRUNCH_INFERENCE_KEY, - endpoint_base_url=DATACRUNCH_ENDPOINT_BASE_URL + endpoint_base_url=DATACRUNCH_ENDPOINT_BASE_URL, ) # Make an asynchronous request to the endpoint # This example demonstrates calling a SGLang deployment which serves LLMs using an OpenAI-compatible API format data = { - "model": "deepseek-ai/deepseek-llm-7b-chat", - "prompt": "Is consciousness fundamentally computational, or is there something more to subjective experience that cannot be reduced to information processing?", - "max_tokens": 128, - "temperature": 0.7, - "top_p": 0.9 + 'model': 'deepseek-ai/deepseek-llm-7b-chat', + 'prompt': 'Is consciousness fundamentally computational, or is there something more to subjective experience that cannot be reduced to information processing?', + 'max_tokens': 128, + 'temperature': 0.7, + 'top_p': 0.9, } # Run the request asynchronously using the inference client -async_inference_execution = inference_client.run( - data=data, path='v1/completions') +async_inference_execution = inference_client.run(data=data, path='v1/completions') # Poll for status until completion while async_inference_execution.status() != AsyncStatus.Completed: diff --git a/examples/containers/compute_resources_example.py b/examples/containers/compute_resources_example.py index e6f2758..825a01b 100644 --- a/examples/containers/compute_resources_example.py +++ b/examples/containers/compute_resources_example.py @@ -9,21 +9,19 @@ datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Example 1: List all compute resources -print("All compute resources:") +print('All compute resources:') all_resources = datacrunch.containers.get_compute_resources() for resource in all_resources: - print( - f"Name: {resource.name}, Size: {resource.size}, Available: {resource.is_available}") + print(f'Name: {resource.name}, Size: {resource.size}, Available: {resource.is_available}') # Example 2: List available compute resources -print("\nAvailable compute resources:") -available_resources = datacrunch.containers.get_compute_resources( - is_available=True) +print('\nAvailable compute resources:') +available_resources = datacrunch.containers.get_compute_resources(is_available=True) for resource in available_resources: - print(f"Name: {resource.name}, Size: {resource.size}") + print(f'Name: {resource.name}, Size: {resource.size}') # Example 3: List compute resources of size 8 -print("\nCompute resources with size 8:") +print('\nCompute resources with size 8:') size_8_resources = datacrunch.containers.get_compute_resources(size=8) for resource in size_8_resources: - print(f"Name: {resource.name}, Available: {resource.is_available}") + print(f'Name: {resource.name}, Available: {resource.is_available}') diff --git a/examples/containers/container_deployments_example.py b/examples/containers/container_deployments_example.py index e40c4f2..ad39f96 100644 --- a/examples/containers/container_deployments_example.py +++ b/examples/containers/container_deployments_example.py @@ -29,8 +29,8 @@ ) # Configuration constants -DEPLOYMENT_NAME = "my-deployment" -IMAGE_NAME = "your-image-name:version" +DEPLOYMENT_NAME = 'my-deployment' +IMAGE_NAME = 'your-image-name:version' # Get client secret and id from environment variables DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') @@ -40,7 +40,12 @@ datacrunch = None -def wait_for_deployment_health(client: DataCrunchClient, deployment_name: str, max_attempts: int = 10, delay: int = 30) -> bool: +def wait_for_deployment_health( + client: DataCrunchClient, + deployment_name: str, + max_attempts: int = 10, + delay: int = 30, +) -> bool: """Wait for deployment to reach healthy status. Args: @@ -55,12 +60,12 @@ def wait_for_deployment_health(client: DataCrunchClient, deployment_name: str, m for attempt in range(max_attempts): try: status = client.containers.get_deployment_status(deployment_name) - print(f"Deployment status: {status}") + print(f'Deployment status: {status}') if status == ContainerDeploymentStatus.HEALTHY: return True time.sleep(delay) except APIException as e: - print(f"Error checking deployment status: {e}") + print(f'Error checking deployment status: {e}') return False return False @@ -74,9 +79,9 @@ def cleanup_resources(client: DataCrunchClient) -> None: try: # Delete deployment client.containers.delete_deployment(DEPLOYMENT_NAME) - print("Deployment deleted") + print('Deployment deleted') except APIException as e: - print(f"Error during cleanup: {e}") + print(f'Error during cleanup: {e}') def main() -> None: @@ -84,46 +89,38 @@ def main() -> None: try: # Initialize client global datacrunch - datacrunch = DataCrunchClient( - DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) + datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Create container configuration container = Container( image=IMAGE_NAME, exposed_port=80, - healthcheck=HealthcheckSettings( - enabled=True, - port=80, - path="/health" - ), + healthcheck=HealthcheckSettings(enabled=True, port=80, path='/health'), volume_mounts=[ - GeneralStorageMount( - mount_path="/data" - ), + GeneralStorageMount(mount_path='/data'), # Optional: Fileset secret SecretMount( - mount_path="/path/to/mount", - secret_name="my-fileset-secret" # This fileset secret must be created beforehand + mount_path='/path/to/mount', + secret_name='my-fileset-secret', # This fileset secret must be created beforehand ), # Optional: Mount an existing shared filesystem volume - SharedFileSystemMount( - mount_path="/sfs", volume_id=""), + SharedFileSystemMount(mount_path='/sfs', volume_id=''), ], env=[ # Secret environment variables needed to be added beforehand EnvVar( - name="HF_TOKEN", + name='HF_TOKEN', # This is a reference to a secret already created - value_or_reference_to_secret="hf-token", - type=EnvVarType.SECRET + value_or_reference_to_secret='hf-token', + type=EnvVarType.SECRET, ), # Plain environment variables can be added directly EnvVar( - name="VERSION", - value_or_reference_to_secret="1.5.2", - type=EnvVarType.PLAIN - ) - ] + name='VERSION', + value_or_reference_to_secret='1.5.2', + type=EnvVarType.PLAIN, + ), + ], ) # Create scaling configuration @@ -136,20 +133,14 @@ def main() -> None: concurrent_requests_per_replica=1, scaling_triggers=ScalingTriggers( queue_load=QueueLoadScalingTrigger(threshold=1), - cpu_utilization=UtilizationScalingTrigger( - enabled=True, - threshold=80 - ), - gpu_utilization=UtilizationScalingTrigger( - enabled=True, - threshold=80 - ) - ) + cpu_utilization=UtilizationScalingTrigger(enabled=True, threshold=80), + gpu_utilization=UtilizationScalingTrigger(enabled=True, threshold=80), + ), ) # Create registry and compute settings registry_settings = ContainerRegistrySettings(is_private=False) - compute = ComputeResource(name="General Compute", size=1) + compute = ComputeResource(name='General Compute', size=1) # Create deployment object deployment = Deployment( @@ -158,24 +149,22 @@ def main() -> None: containers=[container], compute=compute, scaling=scaling_options, - is_spot=False + is_spot=False, ) # Create the deployment - created_deployment = datacrunch.containers.create_deployment( - deployment) - print(f"Created deployment: {created_deployment.name}") + created_deployment = datacrunch.containers.create_deployment(deployment) + print(f'Created deployment: {created_deployment.name}') # Wait for deployment to be healthy if not wait_for_deployment_health(datacrunch, DEPLOYMENT_NAME): - print("Deployment health check failed") + print('Deployment health check failed') cleanup_resources(datacrunch) return # Update scaling configuration try: - deployment = datacrunch.containers.get_deployment_by_name( - DEPLOYMENT_NAME) + deployment = datacrunch.containers.get_deployment_by_name(DEPLOYMENT_NAME) # Create new scaling options with increased replica counts deployment.scaling = ScalingOptions( min_replica_count=2, @@ -186,55 +175,49 @@ def main() -> None: concurrent_requests_per_replica=1, scaling_triggers=ScalingTriggers( queue_load=QueueLoadScalingTrigger(threshold=1), - cpu_utilization=UtilizationScalingTrigger( - enabled=True, - threshold=80 - ), - gpu_utilization=UtilizationScalingTrigger( - enabled=True, - threshold=80 - ) - ) + cpu_utilization=UtilizationScalingTrigger(enabled=True, threshold=80), + gpu_utilization=UtilizationScalingTrigger(enabled=True, threshold=80), + ), ) updated_deployment = datacrunch.containers.update_deployment( - DEPLOYMENT_NAME, deployment) - print(f"Updated deployment scaling: {updated_deployment.name}") + DEPLOYMENT_NAME, deployment + ) + print(f'Updated deployment scaling: {updated_deployment.name}') except APIException as e: - print(f"Error updating scaling options: {e}") + print(f'Error updating scaling options: {e}') # Demonstrate deployment operations try: # Pause deployment datacrunch.containers.pause_deployment(DEPLOYMENT_NAME) - print("Deployment paused") + print('Deployment paused') time.sleep(60) # Resume deployment datacrunch.containers.resume_deployment(DEPLOYMENT_NAME) - print("Deployment resumed") + print('Deployment resumed') # Restart deployment datacrunch.containers.restart_deployment(DEPLOYMENT_NAME) - print("Deployment restarted") + print('Deployment restarted') # Purge queue - datacrunch.containers.purge_deployment_queue( - DEPLOYMENT_NAME) - print("Queue purged") + datacrunch.containers.purge_deployment_queue(DEPLOYMENT_NAME) + print('Queue purged') except APIException as e: - print(f"Error in deployment operations: {e}") + print(f'Error in deployment operations: {e}') # Clean up cleanup_resources(datacrunch) except Exception as e: - print(f"Unexpected error: {e}") + print(f'Unexpected error: {e}') # Attempt cleanup even if there was an error try: cleanup_resources(datacrunch) except Exception as cleanup_error: - print(f"Error during cleanup after failure: {cleanup_error}") + print(f'Error during cleanup after failure: {cleanup_error}') -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/examples/containers/delete_deployment_example.py b/examples/containers/delete_deployment_example.py index 4a1c98c..274bfca 100644 --- a/examples/containers/delete_deployment_example.py +++ b/examples/containers/delete_deployment_example.py @@ -1,10 +1,9 @@ -"""Example script demonstrating deleting a deployment using the DataCrunch API. -""" +"""Example script demonstrating deleting a deployment using the DataCrunch API.""" import os from datacrunch import DataCrunchClient -DEPLOYMENT_NAME = "sglang-deployment-example-20250411-160652" +DEPLOYMENT_NAME = 'sglang-deployment-example-20250411-160652' # Get confidential values from environment variables DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') @@ -15,4 +14,4 @@ # Register signal handlers for cleanup datacrunch.containers.delete_deployment(DEPLOYMENT_NAME) -print("Deployment deleted") \ No newline at end of file +print('Deployment deleted') diff --git a/examples/containers/environment_variables_example.py b/examples/containers/environment_variables_example.py index 8e33bf5..092ac58 100644 --- a/examples/containers/environment_variables_example.py +++ b/examples/containers/environment_variables_example.py @@ -20,81 +20,69 @@ datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Example deployment and container names -DEPLOYMENT_NAME = "my-deployment" -CONTAINER_NAME = "main" +DEPLOYMENT_NAME = 'my-deployment' +CONTAINER_NAME = 'main' def print_env_vars(env_vars: Dict[str, List[EnvVar]]) -> None: """Helper function to print environment variables""" - print("\nCurrent environment variables:") + print('\nCurrent environment variables:') for container_name, vars in env_vars.items(): - print(f"\nContainer: {container_name}") + print(f'\nContainer: {container_name}') for var in vars: - print(f" {var.name}: {var.value_or_reference_to_secret} ({var.type})") + print(f' {var.name}: {var.value_or_reference_to_secret} ({var.type})') def main(): # First, let's get the current environment variables - print("Getting current environment variables...") - env_vars = datacrunch.containers.get_deployment_environment_variables( - DEPLOYMENT_NAME) + print('Getting current environment variables...') + env_vars = datacrunch.containers.get_deployment_environment_variables(DEPLOYMENT_NAME) print_env_vars(env_vars) # Create a new secret - secret_name = "my-secret-key" - datacrunch.containers.create_secret( - secret_name, - "my-secret-value" - ) + secret_name = 'my-secret-key' + datacrunch.containers.create_secret(secret_name, 'my-secret-value') # Add new environment variables - print("\nAdding new environment variables...") + print('\nAdding new environment variables...') new_env_vars = [ EnvVar( - name="API_KEY", + name='API_KEY', value_or_reference_to_secret=secret_name, - type=EnvVarType.SECRET + type=EnvVarType.SECRET, ), - EnvVar( - name="DEBUG", - value_or_reference_to_secret="true", - type=EnvVarType.PLAIN - ) + EnvVar(name='DEBUG', value_or_reference_to_secret='true', type=EnvVarType.PLAIN), ] env_vars = datacrunch.containers.add_deployment_environment_variables( deployment_name=DEPLOYMENT_NAME, container_name=CONTAINER_NAME, - env_vars=new_env_vars + env_vars=new_env_vars, ) print_env_vars(env_vars) # Update existing environment variables - print("\nUpdating environment variables...") + print('\nUpdating environment variables...') updated_env_vars = [ - EnvVar( - name="DEBUG", - value_or_reference_to_secret="false", - type=EnvVarType.PLAIN - ), + EnvVar(name='DEBUG', value_or_reference_to_secret='false', type=EnvVarType.PLAIN), ] env_vars = datacrunch.containers.update_deployment_environment_variables( deployment_name=DEPLOYMENT_NAME, container_name=CONTAINER_NAME, - env_vars=updated_env_vars + env_vars=updated_env_vars, ) print_env_vars(env_vars) # Delete environment variables - print("\nDeleting environment variables...") + print('\nDeleting environment variables...') env_vars = datacrunch.containers.delete_deployment_environment_variables( deployment_name=DEPLOYMENT_NAME, container_name=CONTAINER_NAME, - env_var_names=["DEBUG"] + env_var_names=['DEBUG'], ) print_env_vars(env_vars) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/examples/containers/fileset_secret_example.py b/examples/containers/fileset_secret_example.py index c04ddc0..65ca539 100644 --- a/examples/containers/fileset_secret_example.py +++ b/examples/containers/fileset_secret_example.py @@ -12,13 +12,14 @@ datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Define the secret name and the file paths from your local filesystem where this script is running -SECRET_NAME = "my-fileset-secret" -RELATIVE_FILE_PATH = "./relative-path/file1.txt" -ABSOLUTE_FILE_PATH = "/home/username/absolute-path/file2.json" +SECRET_NAME = 'my-fileset-secret' +RELATIVE_FILE_PATH = './relative-path/file1.txt' +ABSOLUTE_FILE_PATH = '/home/username/absolute-path/file2.json' # Create the fileset secret that has 2 files fileset_secret = datacrunch.containers.create_fileset_secret_from_file_paths( - secret_name=SECRET_NAME, file_paths=[RELATIVE_FILE_PATH, ABSOLUTE_FILE_PATH]) + secret_name=SECRET_NAME, file_paths=[RELATIVE_FILE_PATH, ABSOLUTE_FILE_PATH] +) # Get the secret secrets = datacrunch.containers.get_fileset_secrets() diff --git a/examples/containers/registry_credentials_example.py b/examples/containers/registry_credentials_example.py index e13400f..d017850 100644 --- a/examples/containers/registry_credentials_example.py +++ b/examples/containers/registry_credentials_example.py @@ -5,7 +5,7 @@ GithubCredentials, GCRCredentials, AWSECRCredentials, - CustomRegistryCredentials + CustomRegistryCredentials, ) # Get client secret and id from environment variables @@ -17,21 +17,21 @@ # Example 1: DockerHub Credentials dockerhub_creds = DockerHubCredentials( - name="my-dockerhub-creds", - username="your-dockerhub-username", - access_token="your-dockerhub-access-token" + name='my-dockerhub-creds', + username='your-dockerhub-username', + access_token='your-dockerhub-access-token', ) datacrunch.containers.add_registry_credentials(dockerhub_creds) -print("Created DockerHub credentials") +print('Created DockerHub credentials') # Example 2: GitHub Container Registry Credentials github_creds = GithubCredentials( - name="my-github-creds", - username="your-github-username", - access_token="your-github-token" + name='my-github-creds', + username='your-github-username', + access_token='your-github-token', ) datacrunch.containers.add_registry_credentials(github_creds) -print("Created GitHub credentials") +print('Created GitHub credentials') # Example 3: Google Container Registry (GCR) Credentials # For GCR, you need to provide a service account key JSON string @@ -48,23 +48,20 @@ "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/your-service-account%40your-project.iam.gserviceaccount.com" }""" -gcr_creds = GCRCredentials( - name="my-gcr-creds", - service_account_key=gcr_service_account_key -) +gcr_creds = GCRCredentials(name='my-gcr-creds', service_account_key=gcr_service_account_key) datacrunch.containers.add_registry_credentials(gcr_creds) -print("Created GCR credentials") +print('Created GCR credentials') # Example 4: AWS ECR Credentials aws_creds = AWSECRCredentials( - name="my-aws-ecr-creds", - access_key_id="AKIAEXAMPLE123456", - secret_access_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", - region="eu-north-1", - ecr_repo="887841266746.dkr.ecr.eu-north-1.amazonaws.com" + name='my-aws-ecr-creds', + access_key_id='AKIAEXAMPLE123456', + secret_access_key='wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY', + region='eu-north-1', + ecr_repo='887841266746.dkr.ecr.eu-north-1.amazonaws.com', ) datacrunch.containers.add_registry_credentials(aws_creds) -print("Created AWS ECR credentials") +print('Created AWS ECR credentials') # Example 5: Custom Registry Credentials custom_docker_config = """{ @@ -76,16 +73,14 @@ }""" custom_creds = CustomRegistryCredentials( - name="my-custom-registry-creds", - docker_config_json=custom_docker_config + name='my-custom-registry-creds', docker_config_json=custom_docker_config ) datacrunch.containers.add_registry_credentials(custom_creds) -print("Created Custom registry credentials") +print('Created Custom registry credentials') # Delete all registry credentials datacrunch.containers.delete_registry_credentials('my-dockerhub-creds') datacrunch.containers.delete_registry_credentials('my-github-creds') datacrunch.containers.delete_registry_credentials('my-gcr-creds') datacrunch.containers.delete_registry_credentials('my-aws-ecr-creds') -datacrunch.containers.delete_registry_credentials( - 'my-custom-registry-creds') +datacrunch.containers.delete_registry_credentials('my-custom-registry-creds') diff --git a/examples/containers/secrets_example.py b/examples/containers/secrets_example.py index 7c46826..d8ab2af 100644 --- a/examples/containers/secrets_example.py +++ b/examples/containers/secrets_example.py @@ -10,28 +10,22 @@ # List all secrets secrets = datacrunch.containers.get_secrets() -print("Available secrets:") +print('Available secrets:') for secret in secrets: - print(f"- {secret.name} (created at: {secret.created_at})") + print(f'- {secret.name} (created at: {secret.created_at})') # Create a new secret -secret_name = "my-api-key" -secret_value = "super-secret-value" -datacrunch.containers.create_secret( - name=secret_name, - value=secret_value -) -print(f"\nCreated new secret: {secret_name}") +secret_name = 'my-api-key' +secret_value = 'super-secret-value' +datacrunch.containers.create_secret(name=secret_name, value=secret_value) +print(f'\nCreated new secret: {secret_name}') # Delete a secret (with force=False by default) datacrunch.containers.delete_secret(secret_name) -print(f"\nDeleted secret: {secret_name}") +print(f'\nDeleted secret: {secret_name}') # Delete a secret with force=True (will delete even if secret is in use) -secret_name = "another-secret" -datacrunch.containers.create_secret( - name=secret_name, - value=secret_value -) +secret_name = 'another-secret' +datacrunch.containers.create_secret(name=secret_name, value=secret_value) datacrunch.containers.delete_secret(secret_name, force=True) -print(f"\nForce deleted secret: {secret_name}") +print(f'\nForce deleted secret: {secret_name}') diff --git a/examples/containers/sglang_deployment_example.py b/examples/containers/sglang_deployment_example.py index ca9c802..5756d5e 100644 --- a/examples/containers/sglang_deployment_example.py +++ b/examples/containers/sglang_deployment_example.py @@ -28,14 +28,13 @@ ContainerDeploymentStatus, ) -CURRENT_TIMESTAMP = datetime.now().strftime( - "%Y%m%d-%H%M%S").lower() # e.g. 20250403-120000 +CURRENT_TIMESTAMP = datetime.now().strftime('%Y%m%d-%H%M%S').lower() # e.g. 20250403-120000 # Configuration constants -DEPLOYMENT_NAME = f"sglang-deployment-example-{CURRENT_TIMESTAMP}" -SGLANG_IMAGE_URL = "docker.io/lmsysorg/sglang:v0.4.1.post6-cu124" -DEEPSEEK_MODEL_PATH = "deepseek-ai/deepseek-llm-7b-chat" -HF_SECRET_NAME = "huggingface-token" +DEPLOYMENT_NAME = f'sglang-deployment-example-{CURRENT_TIMESTAMP}' +SGLANG_IMAGE_URL = 'docker.io/lmsysorg/sglang:v0.4.1.post6-cu124' +DEEPSEEK_MODEL_PATH = 'deepseek-ai/deepseek-llm-7b-chat' +HF_SECRET_NAME = 'huggingface-token' # Get confidential values from environment variables DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') @@ -43,7 +42,13 @@ DATACRUNCH_INFERENCE_KEY = os.environ.get('DATACRUNCH_INFERENCE_KEY') HF_TOKEN = os.environ.get('HF_TOKEN') -def wait_for_deployment_health(datacrunch_client: DataCrunchClient, deployment_name: str, max_attempts: int = 20, delay: int = 30) -> bool: + +def wait_for_deployment_health( + datacrunch_client: DataCrunchClient, + deployment_name: str, + max_attempts: int = 20, + delay: int = 30, +) -> bool: """Wait for deployment to reach healthy status. Args: @@ -55,18 +60,16 @@ def wait_for_deployment_health(datacrunch_client: DataCrunchClient, deployment_n Returns: bool: True if deployment is healthy, False otherwise """ - print("Waiting for deployment to be healthy (may take several minutes to download model)...") + print('Waiting for deployment to be healthy (may take several minutes to download model)...') for attempt in range(max_attempts): try: - status = datacrunch_client.containers.get_deployment_status( - deployment_name) - print( - f"Attempt {attempt+1}/{max_attempts} - Deployment status: {status}") + status = datacrunch_client.containers.get_deployment_status(deployment_name) + print(f'Attempt {attempt + 1}/{max_attempts} - Deployment status: {status}') if status == ContainerDeploymentStatus.HEALTHY: return True time.sleep(delay) except APIException as e: - print(f"Error checking deployment status: {e}") + print(f'Error checking deployment status: {e}') return False return False @@ -80,18 +83,18 @@ def cleanup_resources(datacrunch_client: DataCrunchClient) -> None: try: # Delete deployment datacrunch_client.containers.delete_deployment(DEPLOYMENT_NAME) - print("Deployment deleted") + print('Deployment deleted') except APIException as e: - print(f"Error during cleanup: {e}") + print(f'Error during cleanup: {e}') def graceful_shutdown(signum, frame) -> None: """Handle graceful shutdown on signals.""" - print(f"\nSignal {signum} received, cleaning up resources...") + print(f'\nSignal {signum} received, cleaning up resources...') try: cleanup_resources(datacrunch) except Exception as e: - print(f"Error during cleanup: {e}") + print(f'Error during cleanup: {e}') sys.exit(0) @@ -100,15 +103,16 @@ def graceful_shutdown(signum, frame) -> None: datacrunch_inference_key = DATACRUNCH_INFERENCE_KEY if not datacrunch_inference_key: datacrunch_inference_key = input( - "Enter your Inference API Key from the DataCrunch dashboard: ") + 'Enter your Inference API Key from the DataCrunch dashboard: ' + ) else: - print("Using Inference API Key from environment") + print('Using Inference API Key from environment') # Initialize client with inference key datacrunch = DataCrunchClient( client_id=DATACRUNCH_CLIENT_ID, client_secret=DATACRUNCH_CLIENT_SECRET, - inference_key=datacrunch_inference_key + inference_key=datacrunch_inference_key, ) # Register signal handlers for cleanup @@ -116,26 +120,22 @@ def graceful_shutdown(signum, frame) -> None: signal.signal(signal.SIGTERM, graceful_shutdown) # Create a secret for the Hugging Face token - print(f"Creating secret for Hugging Face token: {HF_SECRET_NAME}") + print(f'Creating secret for Hugging Face token: {HF_SECRET_NAME}') try: # Check if secret already exists existing_secrets = datacrunch.containers.get_secrets() - secret_exists = any( - secret.name == HF_SECRET_NAME for secret in existing_secrets) + secret_exists = any(secret.name == HF_SECRET_NAME for secret in existing_secrets) if not secret_exists: # check is HF_TOKEN is set, if not, prompt the user if not HF_TOKEN: - HF_TOKEN = input( - "Enter your Hugging Face token: ") - datacrunch.containers.create_secret( - HF_SECRET_NAME, HF_TOKEN) + HF_TOKEN = input('Enter your Hugging Face token: ') + datacrunch.containers.create_secret(HF_SECRET_NAME, HF_TOKEN) print(f"Secret '{HF_SECRET_NAME}' created successfully") else: - print( - f"Secret '{HF_SECRET_NAME}' already exists, using existing secret") + print(f"Secret '{HF_SECRET_NAME}' already exists, using existing secret") except APIException as e: - print(f"Error creating secret: {e}") + print(f'Error creating secret: {e}') sys.exit(1) # Create container configuration @@ -143,23 +143,28 @@ def graceful_shutdown(signum, frame) -> None: container = Container( image=SGLANG_IMAGE_URL, exposed_port=APP_PORT, - healthcheck=HealthcheckSettings( - enabled=True, - port=APP_PORT, - path="/health" - ), + healthcheck=HealthcheckSettings(enabled=True, port=APP_PORT, path='/health'), entrypoint_overrides=EntrypointOverridesSettings( enabled=True, - cmd=["python3", "-m", "sglang.launch_server", "--model-path", - DEEPSEEK_MODEL_PATH, "--host", "0.0.0.0", "--port", str(APP_PORT)] + cmd=[ + 'python3', + '-m', + 'sglang.launch_server', + '--model-path', + DEEPSEEK_MODEL_PATH, + '--host', + '0.0.0.0', + '--port', + str(APP_PORT), + ], ), env=[ EnvVar( - name="HF_TOKEN", + name='HF_TOKEN', value_or_reference_to_secret=HF_SECRET_NAME, - type=EnvVarType.SECRET + type=EnvVarType.SECRET, ) - ] + ], ) # Create scaling configuration @@ -167,27 +172,20 @@ def graceful_shutdown(signum, frame) -> None: min_replica_count=1, max_replica_count=5, scale_down_policy=ScalingPolicy(delay_seconds=60 * 5), - scale_up_policy=ScalingPolicy( - delay_seconds=0), # No delay for scale up + scale_up_policy=ScalingPolicy(delay_seconds=0), # No delay for scale up queue_message_ttl_seconds=500, # Modern LLM engines are optimized for batching requests, with minimal performance impact. Taking advantage of batching can significantly improve throughput. concurrent_requests_per_replica=32, scaling_triggers=ScalingTriggers( # lower value means more aggressive scaling queue_load=QueueLoadScalingTrigger(threshold=0.1), - cpu_utilization=UtilizationScalingTrigger( - enabled=True, - threshold=90 - ), - gpu_utilization=UtilizationScalingTrigger( - enabled=True, - threshold=90 - ) - ) + cpu_utilization=UtilizationScalingTrigger(enabled=True, threshold=90), + gpu_utilization=UtilizationScalingTrigger(enabled=True, threshold=90), + ), ) # Set compute settings. For a 7B model, General Compute (24GB VRAM) is sufficient - compute = ComputeResource(name="General Compute", size=1) + compute = ComputeResource(name='General Compute', size=1) # Create deployment object (no need to provide container_registry_settings because it's public) deployment = Deployment( @@ -195,57 +193,54 @@ def graceful_shutdown(signum, frame) -> None: containers=[container], compute=compute, scaling=scaling_options, - is_spot=False + is_spot=False, ) # Create the deployment - created_deployment = datacrunch.containers.create_deployment( - deployment) - print(f"Created deployment: {created_deployment.name}") - print("This could take several minutes while the model is downloaded and the server starts...") + created_deployment = datacrunch.containers.create_deployment(deployment) + print(f'Created deployment: {created_deployment.name}') + print('This could take several minutes while the model is downloaded and the server starts...') # Wait for deployment to be healthy if not wait_for_deployment_health(datacrunch, DEPLOYMENT_NAME): - print("Deployment health check failed") + print('Deployment health check failed') cleanup_resources(datacrunch) sys.exit(1) # Test the deployment with a simple request - print("\nTesting the deployment...") + print('\nTesting the deployment...') try: # Test model info endpoint print( - "Testing /get_model_info endpoint by making a sync GET request to the SGLang server...") - model_info_response = created_deployment._inference_client.get( - path="/get_model_info") - print("Model info endpoint is working!") - print(f"Response: {model_info_response}") + 'Testing /get_model_info endpoint by making a sync GET request to the SGLang server...' + ) + model_info_response = created_deployment._inference_client.get(path='/get_model_info') + print('Model info endpoint is working!') + print(f'Response: {model_info_response}') # Test completions endpoint - print("\nTesting completions API...") + print('\nTesting completions API...') completions_data = { - "model": DEEPSEEK_MODEL_PATH, - "prompt": "Is consciousness fundamentally computational, or is there something more to subjective experience that cannot be reduced to information processing?", - "max_tokens": 128, - "temperature": 0.7, - "top_p": 0.9, + 'model': DEEPSEEK_MODEL_PATH, + 'prompt': 'Is consciousness fundamentally computational, or is there something more to subjective experience that cannot be reduced to information processing?', + 'max_tokens': 128, + 'temperature': 0.7, + 'top_p': 0.9, } # Make a sync inference request to the SGLang server completions_response = created_deployment.run_sync( completions_data, - path="/v1/completions", + path='/v1/completions', ) - print("Completions API is working!") - print(f"Response: {completions_response.output()}\n") + print('Completions API is working!') + print(f'Response: {completions_response.output()}\n') # Make a stream sync inference request to the SGLang server completions_response_stream = created_deployment.run_sync( - {**completions_data, "stream": True}, - path="/v1/completions", - stream=True + {**completions_data, 'stream': True}, path='/v1/completions', stream=True ) - print("Stream completions API is working!") + print('Stream completions API is working!') # Print the streamed response for line in completions_response_stream.stream(as_text=True): if line: @@ -265,24 +260,22 @@ def graceful_shutdown(signum, frame) -> None: continue except Exception as e: - print(f"Error testing deployment: {e}") + print(f'Error testing deployment: {e}') # Cleanup or keep running based on user input - keep_running = input( - "\nDo you want to keep the deployment running? (y/n): ") + keep_running = input('\nDo you want to keep the deployment running? (y/n): ') if keep_running.lower() != 'y': cleanup_resources(datacrunch) else: - print( - f"Deployment {DEPLOYMENT_NAME} is running. Don't forget to delete it when finished.") - print("You can delete it from the DataCrunch dashboard or by running:") + print(f"Deployment {DEPLOYMENT_NAME} is running. Don't forget to delete it when finished.") + print('You can delete it from the DataCrunch dashboard or by running:') print(f"datacrunch.containers.delete('{DEPLOYMENT_NAME}')") except Exception as e: - print(f"Unexpected error: {e}") + print(f'Unexpected error: {e}') # Attempt cleanup even if there was an error try: cleanup_resources(datacrunch) except Exception as cleanup_error: - print(f"Error during cleanup after failure: {cleanup_error}") + print(f'Error during cleanup after failure: {cleanup_error}') sys.exit(1) diff --git a/examples/containers/update_deployment_scaling_example.py b/examples/containers/update_deployment_scaling_example.py index 4c91666..74db5ae 100644 --- a/examples/containers/update_deployment_scaling_example.py +++ b/examples/containers/update_deployment_scaling_example.py @@ -12,7 +12,7 @@ ScalingPolicy, ScalingTriggers, QueueLoadScalingTrigger, - UtilizationScalingTrigger + UtilizationScalingTrigger, ) @@ -26,88 +26,82 @@ try: # Get current scaling options - scaling_options = datacrunch.containers.get_deployment_scaling_options( - DEPLOYMENT_NAME) + scaling_options = datacrunch.containers.get_deployment_scaling_options(DEPLOYMENT_NAME) - print("Current scaling configuration:\n") - print(f"Min replicas: {scaling_options.min_replica_count}") - print(f"Max replicas: {scaling_options.max_replica_count}") - print( - f"Scale-up delay: {scaling_options.scale_up_policy.delay_seconds} seconds") - print( - f"Scale-down delay: {scaling_options.scale_down_policy.delay_seconds} seconds") - print( - f"Queue message TTL: {scaling_options.queue_message_ttl_seconds} seconds") - print( - f"Concurrent requests per replica: {scaling_options.concurrent_requests_per_replica}") - print("Scaling Triggers:") - print( - f" Queue load threshold: {scaling_options.scaling_triggers.queue_load.threshold}") + print('Current scaling configuration:\n') + print(f'Min replicas: {scaling_options.min_replica_count}') + print(f'Max replicas: {scaling_options.max_replica_count}') + print(f'Scale-up delay: {scaling_options.scale_up_policy.delay_seconds} seconds') + print(f'Scale-down delay: {scaling_options.scale_down_policy.delay_seconds} seconds') + print(f'Queue message TTL: {scaling_options.queue_message_ttl_seconds} seconds') + print(f'Concurrent requests per replica: {scaling_options.concurrent_requests_per_replica}') + print('Scaling Triggers:') + print(f' Queue load threshold: {scaling_options.scaling_triggers.queue_load.threshold}') if scaling_options.scaling_triggers.cpu_utilization: print( - f" CPU utilization enabled: {scaling_options.scaling_triggers.cpu_utilization.enabled}") + f' CPU utilization enabled: {scaling_options.scaling_triggers.cpu_utilization.enabled}' + ) print( - f" CPU utilization threshold: {scaling_options.scaling_triggers.cpu_utilization.threshold}%") + f' CPU utilization threshold: {scaling_options.scaling_triggers.cpu_utilization.threshold}%' + ) if scaling_options.scaling_triggers.gpu_utilization: print( - f" GPU utilization enabled: {scaling_options.scaling_triggers.gpu_utilization.enabled}") + f' GPU utilization enabled: {scaling_options.scaling_triggers.gpu_utilization.enabled}' + ) if scaling_options.scaling_triggers.gpu_utilization.threshold: print( - f" GPU utilization threshold: {scaling_options.scaling_triggers.gpu_utilization.threshold}%") + f' GPU utilization threshold: {scaling_options.scaling_triggers.gpu_utilization.threshold}%' + ) # Create scaling options using ScalingOptions dataclass scaling_options = ScalingOptions( min_replica_count=1, max_replica_count=5, - scale_down_policy=ScalingPolicy( - delay_seconds=600), # Longer cooldown period + scale_down_policy=ScalingPolicy(delay_seconds=600), # Longer cooldown period scale_up_policy=ScalingPolicy(delay_seconds=0), # Quick scale-up queue_message_ttl_seconds=500, concurrent_requests_per_replica=50, # LLMs can handle concurrent requests scaling_triggers=ScalingTriggers( queue_load=QueueLoadScalingTrigger(threshold=1.0), - cpu_utilization=UtilizationScalingTrigger( - enabled=True, - threshold=75 - ), + cpu_utilization=UtilizationScalingTrigger(enabled=True, threshold=75), gpu_utilization=UtilizationScalingTrigger( enabled=False # Disable GPU utilization trigger - ) - ) + ), + ), ) # Update scaling options updated_options = datacrunch.containers.update_deployment_scaling_options( - DEPLOYMENT_NAME, scaling_options) + DEPLOYMENT_NAME, scaling_options + ) - print("\nUpdated scaling configuration:\n") - print(f"Min replicas: {updated_options.min_replica_count}") - print(f"Max replicas: {updated_options.max_replica_count}") - print( - f"Scale-up delay: {updated_options.scale_up_policy.delay_seconds} seconds") - print( - f"Scale-down delay: {updated_options.scale_down_policy.delay_seconds} seconds") - print( - f"Queue message TTL: {updated_options.queue_message_ttl_seconds} seconds") - print( - f"Concurrent requests per replica: {updated_options.concurrent_requests_per_replica}") - print("Scaling Triggers:") - print( - f" Queue load threshold: {updated_options.scaling_triggers.queue_load.threshold}") + print('\nUpdated scaling configuration:\n') + print(f'Min replicas: {updated_options.min_replica_count}') + print(f'Max replicas: {updated_options.max_replica_count}') + print(f'Scale-up delay: {updated_options.scale_up_policy.delay_seconds} seconds') + print(f'Scale-down delay: {updated_options.scale_down_policy.delay_seconds} seconds') + print(f'Queue message TTL: {updated_options.queue_message_ttl_seconds} seconds') + print(f'Concurrent requests per replica: {updated_options.concurrent_requests_per_replica}') + print('Scaling Triggers:') + print(f' Queue load threshold: {updated_options.scaling_triggers.queue_load.threshold}') if updated_options.scaling_triggers.cpu_utilization: print( - f" CPU utilization enabled: {updated_options.scaling_triggers.cpu_utilization.enabled}") + f' CPU utilization enabled: {updated_options.scaling_triggers.cpu_utilization.enabled}' + ) print( - f" CPU utilization threshold: {updated_options.scaling_triggers.cpu_utilization.threshold}%") + f' CPU utilization threshold: {updated_options.scaling_triggers.cpu_utilization.threshold}%' + ) if updated_options.scaling_triggers.gpu_utilization: print( - f" GPU utilization enabled: {updated_options.scaling_triggers.gpu_utilization.enabled}") + f' GPU utilization enabled: {updated_options.scaling_triggers.gpu_utilization.enabled}' + ) if updated_options.scaling_triggers.gpu_utilization.threshold: print( - f" GPU utilization threshold: {updated_options.scaling_triggers.gpu_utilization.threshold}%") + f' GPU utilization threshold: {updated_options.scaling_triggers.gpu_utilization.threshold}%' + ) except APIException as e: - print(f"Error updating scaling options: {e}") + print(f'Error updating scaling options: {e}') except Exception as e: - print(f"Unexpected error: {e}") + print(f'Unexpected error: {e}') diff --git a/examples/instance_actions.py b/examples/instance_actions.py index 184cc99..09c9225 100644 --- a/examples/instance_actions.py +++ b/examples/instance_actions.py @@ -16,42 +16,41 @@ ssh_keys_ids = list(map(lambda ssh_key: ssh_key.id, ssh_keys)) # Create a new 1V100.6V instance -instance = datacrunch.instances.create(instance_type='1V100.6V', - image='ubuntu-22.04-cuda-12.0-docker', - ssh_key_ids=ssh_keys_ids, - hostname='example', - description='example instance') +instance = datacrunch.instances.create( + instance_type='1V100.6V', + image='ubuntu-22.04-cuda-12.0-docker', + ssh_key_ids=ssh_keys_ids, + hostname='example', + description='example instance', +) print(instance.id) # Try to shutdown instance right away, # encounter an error (because it's still provisioning) try: - datacrunch.instances.action( - instance.id, datacrunch.constants.instance_actions.SHUTDOWN) + datacrunch.instances.action(instance.id, datacrunch.constants.instance_actions.SHUTDOWN) except APIException as exception: print(exception) # we were too eager... # Wait until instance is running (check every 30sec), only then shut it down -while (instance.status != datacrunch.constants.instance_status.RUNNING): +while instance.status != datacrunch.constants.instance_status.RUNNING: time.sleep(30) instance = datacrunch.instances.get_by_id(instance.id) # Shutdown! try: - datacrunch.instances.action( - instance.id, datacrunch.constants.instance_actions.SHUTDOWN) + datacrunch.instances.action(instance.id, datacrunch.constants.instance_actions.SHUTDOWN) except APIException as exception: print(exception) # no exception this time # Wait until instance is offline (check every 30sec), only then hibernate -while (instance.status != datacrunch.constants.instance_status.OFFLINE): +while instance.status != datacrunch.constants.instance_status.OFFLINE: time.sleep(30) instance = datacrunch.instances.get_by_id(instance.id) # Hibernate the instance try: - datacrunch.instances.action( - instance.id, datacrunch.constants.instance_actions.HIBERNATE) + datacrunch.instances.action(instance.id, datacrunch.constants.instance_actions.HIBERNATE) except APIException as exception: print(exception) diff --git a/examples/instances_and_volumes.py b/examples/instances_and_volumes.py index 44b65d1..6d50887 100644 --- a/examples/instances_and_volumes.py +++ b/examples/instances_and_volumes.py @@ -20,46 +20,52 @@ ssh_keys = datacrunch.ssh_keys.get() # Create instance with extra attached volumes -instance_with_extra_volumes = datacrunch.instances.create(instance_type='1V100.6V', - image='ubuntu-22.04-cuda-12.0-docker', - ssh_key_ids=ssh_keys, - hostname='example', - description='example instance', - volumes=[ - {"type": HDD, "name": "volume-1", - "size": 95}, - {"type": NVMe, - "name": "volume-2", "size": 95} - ]) +instance_with_extra_volumes = datacrunch.instances.create( + instance_type='1V100.6V', + image='ubuntu-22.04-cuda-12.0-docker', + ssh_key_ids=ssh_keys, + hostname='example', + description='example instance', + volumes=[ + {'type': HDD, 'name': 'volume-1', 'size': 95}, + {'type': NVMe, 'name': 'volume-2', 'size': 95}, + ], +) # Create instance with custom OS volume size and name -instance_with_custom_os_volume = datacrunch.instances.create(instance_type='1V100.6V', - image='ubuntu-22.04-cuda-12.0-docker', - ssh_key_ids=ssh_keys, - hostname='example', - description='example instance', - os_volume={ - "name": "OS volume", - "size": 95 - }) +instance_with_custom_os_volume = datacrunch.instances.create( + instance_type='1V100.6V', + image='ubuntu-22.04-cuda-12.0-docker', + ssh_key_ids=ssh_keys, + hostname='example', + description='example instance', + os_volume={'name': 'OS volume', 'size': 95}, +) # Create instance with existing OS volume as an image -instance_with_existing_os_volume = datacrunch.instances.create(instance_type='1V100.6V', - image=EXISTING_OS_VOLUME_ID, - ssh_key_ids=ssh_keys, - hostname='example', - description='example instance') +instance_with_existing_os_volume = datacrunch.instances.create( + instance_type='1V100.6V', + image=EXISTING_OS_VOLUME_ID, + ssh_key_ids=ssh_keys, + hostname='example', + description='example instance', +) # Delete instance AND OS volume (the rest of the volumes would be detached) -datacrunch.instances.action(instance_id=EXAMPLE_INSTANCE_ID, - action=datacrunch.constants.instance_actions.DELETE) +datacrunch.instances.action( + instance_id=EXAMPLE_INSTANCE_ID, action=datacrunch.constants.instance_actions.DELETE +) # Delete instance WITHOUT deleting the OS volume (will detach all volumes of the instance) -datacrunch.instances.action(instance_id=EXAMPLE_INSTANCE_ID, - action=datacrunch.constants.instance_actions.DELETE, - volume_ids=[]) +datacrunch.instances.action( + instance_id=EXAMPLE_INSTANCE_ID, + action=datacrunch.constants.instance_actions.DELETE, + volume_ids=[], +) # Delete instance and one of it's volumes (will delete one volume, detach the rest) -datacrunch.instances.action(instance_id=EXAMPLE_INSTANCE_ID, - action=datacrunch.constants.instance_actions.DELETE, - volume_ids=[EXAMPLE_VOLUME_ID]) +datacrunch.instances.action( + instance_id=EXAMPLE_INSTANCE_ID, + action=datacrunch.constants.instance_actions.DELETE, + volume_ids=[EXAMPLE_VOLUME_ID], +) diff --git a/examples/simple_create_instance.py b/examples/simple_create_instance.py index a52b18d..6e4c4ab 100644 --- a/examples/simple_create_instance.py +++ b/examples/simple_create_instance.py @@ -15,12 +15,14 @@ ssh_keys_ids = list(map(lambda ssh_key: ssh_key.id, ssh_keys)) # Create a new instance -instance = datacrunch.instances.create(instance_type='1V100.6V', - image='ubuntu-22.04-cuda-12.0-docker', - location=Locations.FIN_01, - ssh_key_ids=ssh_keys_ids, - hostname='example', - description='example instance') +instance = datacrunch.instances.create( + instance_type='1V100.6V', + image='ubuntu-22.04-cuda-12.0-docker', + location=Locations.FIN_01, + ssh_key_ids=ssh_keys_ids, + hostname='example', + description='example instance', +) # Wait for instance to enter running state while instance.status != InstanceStatus.RUNNING: @@ -30,5 +32,4 @@ print(instance) # Delete instance -datacrunch.instances.action( - instance.id, datacrunch.constants.instance_actions.DELETE) +datacrunch.instances.action(instance.id, datacrunch.constants.instance_actions.DELETE) diff --git a/examples/ssh_keys.py b/examples/ssh_keys.py index dc91639..fefaa60 100644 --- a/examples/ssh_keys.py +++ b/examples/ssh_keys.py @@ -9,7 +9,9 @@ datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Create new SSH key -public_key = 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI0qq2Qjt5GPi7DKdcnBHOkvk8xNsG9dA607tnWagOkHC test_key' +public_key = ( + 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI0qq2Qjt5GPi7DKdcnBHOkvk8xNsG9dA607tnWagOkHC test_key' +) ssh_key = datacrunch.ssh_keys.create('my test key', public_key) # Print new key id, name, public key diff --git a/examples/startup_scripts.py b/examples/startup_scripts.py index baa8587..99f4352 100644 --- a/examples/startup_scripts.py +++ b/examples/startup_scripts.py @@ -17,7 +17,7 @@ # download a cat picture curl https://http.cat/200 --output cat.jpg """ -script = datacrunch.startup_scripts.create("catty businness", bash_script) +script = datacrunch.startup_scripts.create('catty businness', bash_script) # Print new startup script id, name, script code print(script.id) diff --git a/examples/storage_volumes.py b/examples/storage_volumes.py index 1dc51d0..9efe142 100644 --- a/examples/storage_volumes.py +++ b/examples/storage_volumes.py @@ -19,24 +19,19 @@ all_volumes = datacrunch.volumes.get() # Get all attached volumes -all_attached_volumes = datacrunch.volumes.get( - status=datacrunch.constants.volume_status.ATTACHED) +all_attached_volumes = datacrunch.volumes.get(status=datacrunch.constants.volume_status.ATTACHED) # Get volume by id -random_volume = datacrunch.volumes.get_by_id( - "0c41e387-3dd8-495f-a285-e861527f2f3d") +random_volume = datacrunch.volumes.get_by_id('0c41e387-3dd8-495f-a285-e861527f2f3d') # Create a 200 GB detached NVMe volume -nvme_volume = datacrunch.volumes.create(type=NVMe, - name="data-storage-1", - size=200) +nvme_volume = datacrunch.volumes.create(type=NVMe, name='data-storage-1', size=200) # Create a 500 GB HDD volume and attach it to an existing shutdown instance # Note: If the instance isn't shutdown an exception would be raised -hdd_volume = datacrunch.volumes.create(type=HDD, - name="data-storage-2", - size=500, - instance_id=INSTANCE_ID) +hdd_volume = datacrunch.volumes.create( + type=HDD, name='data-storage-2', size=500, instance_id=INSTANCE_ID +) nvme_volume_id = nvme_volume.id hdd_volume_id = hdd_volume.id @@ -48,7 +43,7 @@ datacrunch.volumes.detach([nvme_volume_id, hdd_volume_id]) # rename volume -datacrunch.volumes.rename(nvme_volume_id, "new-name") +datacrunch.volumes.rename(nvme_volume_id, 'new-name') # increase volume size datacrunch.volumes.increase_size(nvme_volume_id, 300) @@ -57,7 +52,7 @@ datacrunch.volumes.clone(nvme_volume_id) # clone volume and give it a new name and storage type (from NVMe to HDD) -datacrunch.volumes.clone(nvme_volume_id, name="my-cloned-volume", type=HDD) +datacrunch.volumes.clone(nvme_volume_id, name='my-cloned-volume', type=HDD) # clone multiple volumes at once datacrunch.volumes.clone([nvme_volume_id, hdd_volume_id]) diff --git a/pyproject.toml b/pyproject.toml index f6b3a9f..0321f6e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,3 +49,8 @@ module-root = "" [tool.ruff] # TODO(shamrin) fix these errors and stop ignoring them lint.ignore = ["F401"] +line-length = 100 + +[tool.ruff.format] +quote-style = "single" +docstring-code-format = true diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 5ca0eaf..6a10724 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -7,7 +7,7 @@ Make sure to run the server and the account has enough balance before running the tests """ -BASE_URL = "http://localhost:3010/v1" +BASE_URL = 'http://localhost:3010/v1' # Load env variables, make sure there's an env file with valid client credentials load_dotenv() diff --git a/tests/integration_tests/test_instances.py b/tests/integration_tests/test_instances.py index 75a9889..faa52c1 100644 --- a/tests/integration_tests/test_instances.py +++ b/tests/integration_tests/test_instances.py @@ -3,32 +3,32 @@ from datacrunch.datacrunch import DataCrunchClient from datacrunch.constants import Locations -IN_GITHUB_ACTIONS = os.getenv("GITHUB_ACTIONS") == "true" +IN_GITHUB_ACTIONS = os.getenv('GITHUB_ACTIONS') == 'true' @pytest.mark.skipif(IN_GITHUB_ACTIONS, reason="Test doesn't work in Github Actions.") @pytest.mark.withoutresponses -class TestInstances(): - +class TestInstances: def test_create_instance(self, datacrunch_client: DataCrunchClient): # get ssh key ssh_key = datacrunch_client.ssh_keys.get()[0] # create instance instance = datacrunch_client.instances.create( - hostname="test-instance", + hostname='test-instance', location=Locations.FIN_01, instance_type='CPU.4V', - description="test instance", - image="ubuntu-18.04", - ssh_key_ids=[ssh_key.id]) + description='test instance', + image='ubuntu-18.04', + ssh_key_ids=[ssh_key.id], + ) # assert instance is created assert instance.id is not None assert instance.status == datacrunch_client.constants.instance_status.PROVISIONING # delete instance - datacrunch_client.instances.action(instance.id, "delete") + datacrunch_client.instances.action(instance.id, 'delete') # permanently delete all volumes in trash trash = datacrunch_client.volumes.get_in_trash() diff --git a/tests/integration_tests/test_locations.py b/tests/integration_tests/test_locations.py index f2c4613..8098a96 100644 --- a/tests/integration_tests/test_locations.py +++ b/tests/integration_tests/test_locations.py @@ -3,28 +3,30 @@ from datacrunch.datacrunch import DataCrunchClient from datacrunch.constants import Locations -IN_GITHUB_ACTIONS = os.getenv("GITHUB_ACTIONS") == "true" +IN_GITHUB_ACTIONS = os.getenv('GITHUB_ACTIONS') == 'true' location_codes = [Locations.FIN_01, Locations.ICE_01] @pytest.mark.skipif(IN_GITHUB_ACTIONS, reason="Test doesn't work in Github Actions.") @pytest.mark.withoutresponses -class TestLocations(): - - def test_specific_instance_availability_in_specific_location(self, datacrunch_client: DataCrunchClient): +class TestLocations: + def test_specific_instance_availability_in_specific_location( + self, datacrunch_client: DataCrunchClient + ): # call the instance availability endpoint, for a specific location availability = datacrunch_client.instances.is_available( - 'CPU.4V', location_code=Locations.FIN_01) + 'CPU.4V', location_code=Locations.FIN_01 + ) assert availability is not None assert isinstance(availability, bool) def test_all_availabilies_in_specific_location(self, datacrunch_client: DataCrunchClient): - # call the instance availability endpoint, for a specific location availabilities = datacrunch_client.instances.get_availabilities( - location_code=Locations.FIN_01) + location_code=Locations.FIN_01 + ) assert availabilities is not None assert isinstance(availabilities, list) diff --git a/tests/integration_tests/test_volumes.py b/tests/integration_tests/test_volumes.py index c5e4d1d..b28ec28 100644 --- a/tests/integration_tests/test_volumes.py +++ b/tests/integration_tests/test_volumes.py @@ -4,7 +4,7 @@ from datacrunch.datacrunch import DataCrunchClient from datacrunch.constants import Locations, VolumeTypes, VolumeStatus -IN_GITHUB_ACTIONS = os.getenv("GITHUB_ACTIONS") == "true" +IN_GITHUB_ACTIONS = os.getenv('GITHUB_ACTIONS') == 'true' NVMe = VolumeTypes.NVMe @@ -12,12 +12,10 @@ @pytest.mark.skipif(IN_GITHUB_ACTIONS, reason="Test doesn't work in Github Actions.") @pytest.mark.withoutresponses -class TestVolumes(): - +class TestVolumes: def test_get_volumes_from_trash(self, datacrunch_client: DataCrunchClient): # create new volume - volume = datacrunch_client.volumes.create( - type=NVMe, name="test_volume", size=100) + volume = datacrunch_client.volumes.create(type=NVMe, name='test_volume', size=100) # delete volume datacrunch_client.volumes.delete(volume.id) @@ -33,8 +31,7 @@ def test_get_volumes_from_trash(self, datacrunch_client: DataCrunchClient): def test_permanently_delete_detached_volumes(seld, datacrunch_client): # create new volume - volume = datacrunch_client.volumes.create( - type=NVMe, name="test_volume", size=100) + volume = datacrunch_client.volumes.create(type=NVMe, name='test_volume', size=100) # permanently delete the detached volume datacrunch_client.volumes.delete(volume.id, is_permanent=True) @@ -56,8 +53,7 @@ def test_permanently_delete_detached_volumes(seld, datacrunch_client): def test_permanently_delete_a_deleted_volume_from_trash(self, datacrunch_client): # create new volume - volume = datacrunch_client.volumes.create( - type=NVMe, name="test_volume", size=100) + volume = datacrunch_client.volumes.create(type=NVMe, name='test_volume', size=100) # delete volume datacrunch_client.volumes.delete(volume.id) @@ -83,7 +79,8 @@ def test_permanently_delete_a_deleted_volume_from_trash(self, datacrunch_client) def test_create_volume(self, datacrunch_client): # create new volume volume = datacrunch_client.volumes.create( - type=NVMe, name="test_volume", size=100, location=Locations.FIN_01) + type=NVMe, name='test_volume', size=100, location=Locations.FIN_01 + ) # assert volume is created assert volume.id is not None diff --git a/tests/unit_tests/authentication/test_authentication.py b/tests/unit_tests/authentication/test_authentication.py index 29e1648..523a017 100644 --- a/tests/unit_tests/authentication/test_authentication.py +++ b/tests/unit_tests/authentication/test_authentication.py @@ -9,8 +9,8 @@ INVALID_REQUEST = 'invalid_request' INVALID_REQUEST_MESSAGE = 'Your existence is invalid' -BASE_URL = "https://api-testing.datacrunch.io/v1" -CLIENT_ID = "0123456789xyz" +BASE_URL = 'https://api-testing.datacrunch.io/v1' +CLIENT_ID = '0123456789xyz' CLIENT_SECRET = 'zyx987654321' ACCESS_TOKEN = 'access' @@ -24,14 +24,13 @@ class TestAuthenticationService: - @pytest.fixture def authentication_service(self): return AuthenticationService(CLIENT_ID, CLIENT_SECRET, BASE_URL) @pytest.fixture def endpoint(self, http_client): - return http_client._base_url + "/oauth2/token" + return http_client._base_url + '/oauth2/token' def test_authenticate_successful(self, authentication_service, endpoint): # arrange - add response mock @@ -43,9 +42,9 @@ def test_authenticate_successful(self, authentication_service, endpoint): 'refresh_token': REFRESH_TOKEN, 'scope': SCOPE, 'token_type': TOKEN_TYPE, - 'expires_in': EXPIRES_IN + 'expires_in': EXPIRES_IN, }, - status=200 + status=200, ) # act @@ -65,8 +64,8 @@ def test_authenticate_failed(self, authentication_service, endpoint): responses.add( responses.POST, endpoint, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act @@ -77,7 +76,9 @@ def test_authenticate_failed(self, authentication_service, endpoint): assert excinfo.value.code == INVALID_REQUEST assert excinfo.value.message == INVALID_REQUEST_MESSAGE assert responses.assert_call_count(endpoint, 1) is True - assert responses.calls[0].request.body == f'{{"grant_type": "client_credentials", "client_id": "{CLIENT_ID}", "client_secret": "{CLIENT_SECRET}"}}'.encode( + assert ( + responses.calls[0].request.body + == f'{{"grant_type": "client_credentials", "client_id": "{CLIENT_ID}", "client_secret": "{CLIENT_SECRET}"}}'.encode() ) def test_refresh_successful(self, authentication_service, endpoint): @@ -90,11 +91,18 @@ def test_refresh_successful(self, authentication_service, endpoint): 'refresh_token': REFRESH_TOKEN, 'scope': SCOPE, 'token_type': TOKEN_TYPE, - 'expires_in': EXPIRES_IN + 'expires_in': EXPIRES_IN, }, - match=[matchers.json_params_matcher( - {"grant_type": "client_credentials", "client_id": CLIENT_ID, "client_secret": CLIENT_SECRET})], - status=200 + match=[ + matchers.json_params_matcher( + { + 'grant_type': 'client_credentials', + 'client_id': CLIENT_ID, + 'client_secret': CLIENT_SECRET, + } + ) + ], + status=200, ) # add another response for the refresh token grant @@ -106,11 +114,14 @@ def test_refresh_successful(self, authentication_service, endpoint): 'refresh_token': REFRESH_TOKEN2, 'scope': SCOPE, 'token_type': TOKEN_TYPE, - 'expires_in': EXPIRES_IN + 'expires_in': EXPIRES_IN, }, - match=[matchers.json_params_matcher( - {"grant_type": "refresh_token", "refresh_token": REFRESH_TOKEN})], - status=200 + match=[ + matchers.json_params_matcher( + {'grant_type': 'refresh_token', 'refresh_token': REFRESH_TOKEN} + ) + ], + status=200, ) # act @@ -123,7 +134,9 @@ def test_refresh_successful(self, authentication_service, endpoint): assert authentication_service._scope == SCOPE assert authentication_service._token_type == TOKEN_TYPE assert authentication_service._expires_at is not None - assert responses.calls[0].request.body == f'{{"grant_type": "client_credentials", "client_id": "{CLIENT_ID}", "client_secret": "{CLIENT_SECRET}"}}'.encode( + assert ( + responses.calls[0].request.body + == f'{{"grant_type": "client_credentials", "client_id": "{CLIENT_ID}", "client_secret": "{CLIENT_SECRET}"}}'.encode() ) auth_data2 = authentication_service.refresh() # refresh @@ -134,7 +147,9 @@ def test_refresh_successful(self, authentication_service, endpoint): assert authentication_service._scope == SCOPE assert authentication_service._token_type == TOKEN_TYPE assert authentication_service._expires_at is not None - assert responses.calls[1].request.body == f'{{"grant_type": "refresh_token", "refresh_token": "{REFRESH_TOKEN}"}}'.encode( + assert ( + responses.calls[1].request.body + == f'{{"grant_type": "refresh_token", "refresh_token": "{REFRESH_TOKEN}"}}'.encode() ) assert responses.assert_call_count(endpoint, 2) is True @@ -149,21 +164,31 @@ def test_refresh_failed(self, authentication_service, endpoint): 'refresh_token': REFRESH_TOKEN, 'scope': SCOPE, 'token_type': TOKEN_TYPE, - 'expires_in': EXPIRES_IN + 'expires_in': EXPIRES_IN, }, - match=[matchers.json_params_matcher( - {"grant_type": "client_credentials", "client_id": CLIENT_ID, "client_secret": CLIENT_SECRET})], - status=200 + match=[ + matchers.json_params_matcher( + { + 'grant_type': 'client_credentials', + 'client_id': CLIENT_ID, + 'client_secret': CLIENT_SECRET, + } + ) + ], + status=200, ) # second response for the refresh - failed responses.add( responses.POST, endpoint, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - match=[matchers.json_params_matcher( - {"grant_type": "refresh_token", "refresh_token": REFRESH_TOKEN})], - status=500 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + match=[ + matchers.json_params_matcher( + {'grant_type': 'refresh_token', 'refresh_token': REFRESH_TOKEN} + ) + ], + status=500, ) # act @@ -176,9 +201,13 @@ def test_refresh_failed(self, authentication_service, endpoint): assert excinfo.value.code == INVALID_REQUEST assert excinfo.value.message == INVALID_REQUEST_MESSAGE assert responses.assert_call_count(endpoint, 2) is True - assert responses.calls[0].request.body == f'{{"grant_type": "client_credentials", "client_id": "{CLIENT_ID}", "client_secret": "{CLIENT_SECRET}"}}'.encode( + assert ( + responses.calls[0].request.body + == f'{{"grant_type": "client_credentials", "client_id": "{CLIENT_ID}", "client_secret": "{CLIENT_SECRET}"}}'.encode() ) - assert responses.calls[1].request.body == f'{{"grant_type": "refresh_token", "refresh_token": "{REFRESH_TOKEN}"}}'.encode( + assert ( + responses.calls[1].request.body + == f'{{"grant_type": "refresh_token", "refresh_token": "{REFRESH_TOKEN}"}}'.encode() ) def test_is_expired(self, authentication_service, endpoint): diff --git a/tests/unit_tests/balance/test_balance.py b/tests/unit_tests/balance/test_balance.py index a844998..93e508c 100644 --- a/tests/unit_tests/balance/test_balance.py +++ b/tests/unit_tests/balance/test_balance.py @@ -1,4 +1,4 @@ -import responses # https://github.com/getsentry/responses +import responses # https://github.com/getsentry/responses from datacrunch.balance.balance import BalanceService, Balance @@ -7,9 +7,9 @@ def test_balance(http_client): # arrange - add response mock responses.add( responses.GET, - http_client._base_url + "/balance", - json={"amount": 50.5, "currency": "usd"}, - status=200 + http_client._base_url + '/balance', + json={'amount': 50.5, 'currency': 'usd'}, + status=200, ) balance_service = BalanceService(http_client) @@ -22,4 +22,4 @@ def test_balance(http_client): assert isinstance(balance.amount, float) assert isinstance(balance.currency, str) assert balance.amount == 50.5 - assert balance.currency == "usd" + assert balance.currency == 'usd' diff --git a/tests/unit_tests/conftest.py b/tests/unit_tests/conftest.py index 413787d..aa7bf6b 100644 --- a/tests/unit_tests/conftest.py +++ b/tests/unit_tests/conftest.py @@ -3,10 +3,10 @@ from datacrunch.http_client.http_client import HTTPClient -BASE_URL = "https://api-testing.datacrunch.io/v1" -ACCESS_TOKEN = "test-token" -CLIENT_ID = "0123456789xyz" -CLIENT_SECRET = "0123456789xyz" +BASE_URL = 'https://api-testing.datacrunch.io/v1' +ACCESS_TOKEN = 'test-token' +CLIENT_ID = '0123456789xyz' +CLIENT_SECRET = '0123456789xyz' @pytest.fixture diff --git a/tests/unit_tests/containers/__init__.py b/tests/unit_tests/containers/__init__.py index 0519ecb..e69de29 100644 --- a/tests/unit_tests/containers/__init__.py +++ b/tests/unit_tests/containers/__init__.py @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/tests/unit_tests/containers/test_containers.py b/tests/unit_tests/containers/test_containers.py index fb105a3..3e5c3cb 100644 --- a/tests/unit_tests/containers/test_containers.py +++ b/tests/unit_tests/containers/test_containers.py @@ -35,146 +35,111 @@ ) from datacrunch.exceptions import APIException -DEPLOYMENT_NAME = "test-deployment" -CONTAINER_NAME = "test-container" -COMPUTE_RESOURCE_NAME_GENERAL_COMPUTE = "General Compute" -COMPUTE_RESOURCE_NAME_H100 = "H100" -SECRET_NAME = "test-secret" -SECRET_VALUE = "test-secret-value" -REGISTRY_CREDENTIAL_NAME = "test-credential" -ENV_VAR_NAME = "TEST_VAR" -ENV_VAR_VALUE = "test-value" - -INVALID_REQUEST = "INVALID_REQUEST" -INVALID_REQUEST_MESSAGE = "Invalid request" +DEPLOYMENT_NAME = 'test-deployment' +CONTAINER_NAME = 'test-container' +COMPUTE_RESOURCE_NAME_GENERAL_COMPUTE = 'General Compute' +COMPUTE_RESOURCE_NAME_H100 = 'H100' +SECRET_NAME = 'test-secret' +SECRET_VALUE = 'test-secret-value' +REGISTRY_CREDENTIAL_NAME = 'test-credential' +ENV_VAR_NAME = 'TEST_VAR' +ENV_VAR_VALUE = 'test-value' + +INVALID_REQUEST = 'INVALID_REQUEST' +INVALID_REQUEST_MESSAGE = 'Invalid request' # Sample deployment data for testing DEPLOYMENT_DATA = { - "name": DEPLOYMENT_NAME, - "container_registry_settings": { - "is_private": False - }, - "containers": [ + 'name': DEPLOYMENT_NAME, + 'container_registry_settings': {'is_private': False}, + 'containers': [ { - "name": CONTAINER_NAME, - "image": "nginx:latest", - "exposed_port": 80, - "healthcheck": { - "enabled": True, - "port": 80, - "path": "/health" - }, - "entrypoint_overrides": { - "enabled": False - }, - "env": [ + 'name': CONTAINER_NAME, + 'image': 'nginx:latest', + 'exposed_port': 80, + 'healthcheck': {'enabled': True, 'port': 80, 'path': '/health'}, + 'entrypoint_overrides': {'enabled': False}, + 'env': [ { - "name": "ENV_VAR1", - "value_or_reference_to_secret": "value1", - "type": "plain" + 'name': 'ENV_VAR1', + 'value_or_reference_to_secret': 'value1', + 'type': 'plain', } ], - "volume_mounts": [ - { - "type": "scratch", - "mount_path": "/data" - } - ] + 'volume_mounts': [{'type': 'scratch', 'mount_path': '/data'}], } ], - "compute": { - "name": COMPUTE_RESOURCE_NAME_GENERAL_COMPUTE, - "size": 1, - "is_available": True + 'compute': { + 'name': COMPUTE_RESOURCE_NAME_GENERAL_COMPUTE, + 'size': 1, + 'is_available': True, }, - "is_spot": False, - "endpoint_base_url": "https://test-deployment.datacrunch.io", - "scaling": { - "min_replica_count": 1, - "max_replica_count": 3, - "scale_down_policy": { - "delay_seconds": 300 - }, - "scale_up_policy": { - "delay_seconds": 60 + 'is_spot': False, + 'endpoint_base_url': 'https://test-deployment.datacrunch.io', + 'scaling': { + 'min_replica_count': 1, + 'max_replica_count': 3, + 'scale_down_policy': {'delay_seconds': 300}, + 'scale_up_policy': {'delay_seconds': 60}, + 'queue_message_ttl_seconds': 3600, + 'concurrent_requests_per_replica': 10, + 'scaling_triggers': { + 'queue_load': {'threshold': 0.75}, + 'cpu_utilization': {'enabled': True, 'threshold': 0.8}, + 'gpu_utilization': {'enabled': False}, }, - "queue_message_ttl_seconds": 3600, - "concurrent_requests_per_replica": 10, - "scaling_triggers": { - "queue_load": { - "threshold": 0.75 - }, - "cpu_utilization": { - "enabled": True, - "threshold": 0.8 - }, - "gpu_utilization": { - "enabled": False - } - } }, - "created_at": "2023-01-01T00:00:00+00:00" + 'created_at': '2023-01-01T00:00:00+00:00', } # Sample compute resources data COMPUTE_RESOURCES_DATA = [ - { - "name": COMPUTE_RESOURCE_NAME_GENERAL_COMPUTE, - "size": 1, - "is_available": True - }, - { - "name": COMPUTE_RESOURCE_NAME_H100, - "size": 4, - "is_available": True - } + {'name': COMPUTE_RESOURCE_NAME_GENERAL_COMPUTE, 'size': 1, 'is_available': True}, + {'name': COMPUTE_RESOURCE_NAME_H100, 'size': 4, 'is_available': True}, ] # Sample secrets data SECRETS_DATA = [ { - "name": SECRET_NAME, - "created_at": "2023-01-01T00:00:00+00:00", - "secret_type": "generic" + 'name': SECRET_NAME, + 'created_at': '2023-01-01T00:00:00+00:00', + 'secret_type': 'generic', } ] # Sample registry credentials data REGISTRY_CREDENTIALS_DATA = [ - { - "name": REGISTRY_CREDENTIAL_NAME, - "created_at": "2023-01-01T00:00:00+00:00" - } + {'name': REGISTRY_CREDENTIAL_NAME, 'created_at': '2023-01-01T00:00:00+00:00'} ] # Sample deployment status data -DEPLOYMENT_STATUS_DATA = { - "status": "healthy" -} +DEPLOYMENT_STATUS_DATA = {'status': 'healthy'} # Sample replicas data REPLICAS_DATA = { - "list": [ + 'list': [ { - "id": "replica-1", - "status": "running", - "started_at": "2023-01-01T00:00:00+00:00" + 'id': 'replica-1', + 'status': 'running', + 'started_at': '2023-01-01T00:00:00+00:00', } ] } # Sample environment variables data -ENV_VARS_DATA = [{ - "container_name": CONTAINER_NAME, - "env": [ - { - "name": ENV_VAR_NAME, - "value_or_reference_to_secret": ENV_VAR_VALUE, - "type": "plain" - } - ] -}] +ENV_VARS_DATA = [ + { + 'container_name': CONTAINER_NAME, + 'env': [ + { + 'name': ENV_VAR_NAME, + 'value_or_reference_to_secret': ENV_VAR_VALUE, + 'type': 'plain', + } + ], + } +] class TestContainersService: @@ -201,12 +166,7 @@ def registry_credentials_endpoint(self, http_client): @responses.activate def test_get_deployments(self, containers_service, deployments_endpoint): # arrange - add response mock - responses.add( - responses.GET, - deployments_endpoint, - json=[DEPLOYMENT_DATA], - status=200 - ) + responses.add(responses.GET, deployments_endpoint, json=[DEPLOYMENT_DATA], status=200) # act deployments = containers_service.get_deployments() @@ -226,13 +186,8 @@ def test_get_deployments(self, containers_service, deployments_endpoint): @responses.activate def test_get_deployment_by_name(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}" - responses.add( - responses.GET, - url, - json=DEPLOYMENT_DATA, - status=200 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}' + responses.add(responses.GET, url, json=DEPLOYMENT_DATA, status=200) # act deployment = containers_service.get_deployment_by_name(DEPLOYMENT_NAME) @@ -248,17 +203,17 @@ def test_get_deployment_by_name(self, containers_service, deployments_endpoint): @responses.activate def test_get_deployment_by_name_error(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/nonexistent" + url = f'{deployments_endpoint}/nonexistent' responses.add( responses.GET, url, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act with pytest.raises(APIException) as excinfo: - containers_service.get_deployment_by_name("nonexistent") + containers_service.get_deployment_by_name('nonexistent') # assert assert excinfo.value.code == INVALID_REQUEST @@ -268,30 +223,26 @@ def test_get_deployment_by_name_error(self, containers_service, deployments_endp @responses.activate def test_create_deployment(self, containers_service, deployments_endpoint): # arrange - add response mock - responses.add( - responses.POST, - deployments_endpoint, - json=DEPLOYMENT_DATA, - status=200 - ) + responses.add(responses.POST, deployments_endpoint, json=DEPLOYMENT_DATA, status=200) container = Container( - image="nginx:latest", + image='nginx:latest', exposed_port=80, - healthcheck=HealthcheckSettings( - enabled=True, port=80, path="/health"), + healthcheck=HealthcheckSettings(enabled=True, port=80, path='/health'), entrypoint_overrides=EntrypointOverridesSettings(enabled=False), - env=[EnvVar( - name="ENV_VAR1", value_or_reference_to_secret="value1", type=EnvVarType.PLAIN)], - volume_mounts=[VolumeMount( - type=VolumeMountType.SCRATCH, mount_path="/data")] + env=[ + EnvVar( + name='ENV_VAR1', + value_or_reference_to_secret='value1', + type=EnvVarType.PLAIN, + ) + ], + volume_mounts=[VolumeMount(type=VolumeMountType.SCRATCH, mount_path='/data')], ) - compute = ComputeResource( - name=COMPUTE_RESOURCE_NAME_GENERAL_COMPUTE, size=1) + compute = ComputeResource(name=COMPUTE_RESOURCE_NAME_GENERAL_COMPUTE, size=1) - container_registry_settings = ContainerRegistrySettings( - is_private=False) + container_registry_settings = ContainerRegistrySettings(is_private=False) # create deployment object deployment = Deployment( @@ -299,7 +250,7 @@ def test_create_deployment(self, containers_service, deployments_endpoint): container_registry_settings=container_registry_settings, containers=[container], compute=compute, - is_spot=False + is_spot=False, ) # act @@ -316,37 +267,25 @@ def test_create_deployment(self, containers_service, deployments_endpoint): @responses.activate def test_update_deployment(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}" - responses.add( - responses.PATCH, - url, - json=DEPLOYMENT_DATA, - status=200 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}' + responses.add(responses.PATCH, url, json=DEPLOYMENT_DATA, status=200) # create deployment object - container = Container( - name=CONTAINER_NAME, - image="nginx:latest", - exposed_port=80 - ) + container = Container(name=CONTAINER_NAME, image='nginx:latest', exposed_port=80) - container_registry_settings = ContainerRegistrySettings( - is_private=False) + container_registry_settings = ContainerRegistrySettings(is_private=False) - compute = ComputeResource( - name=COMPUTE_RESOURCE_NAME_GENERAL_COMPUTE, size=1) + compute = ComputeResource(name=COMPUTE_RESOURCE_NAME_GENERAL_COMPUTE, size=1) deployment = Deployment( name=DEPLOYMENT_NAME, container_registry_settings=container_registry_settings, containers=[container], - compute=compute + compute=compute, ) # act - updated_deployment = containers_service.update_deployment( - DEPLOYMENT_NAME, deployment) + updated_deployment = containers_service.update_deployment(DEPLOYMENT_NAME, deployment) # assert assert isinstance(updated_deployment, Deployment) @@ -359,12 +298,8 @@ def test_update_deployment(self, containers_service, deployments_endpoint): @responses.activate def test_delete_deployment(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}" - responses.add( - responses.DELETE, - url, - status=204 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}' + responses.add(responses.DELETE, url, status=204) # act containers_service.delete_deployment(DEPLOYMENT_NAME) @@ -375,13 +310,8 @@ def test_delete_deployment(self, containers_service, deployments_endpoint): @responses.activate def test_get_deployment_status(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}/status" - responses.add( - responses.GET, - url, - json=DEPLOYMENT_STATUS_DATA, - status=200 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}/status' + responses.add(responses.GET, url, json=DEPLOYMENT_STATUS_DATA, status=200) # act status = containers_service.get_deployment_status(DEPLOYMENT_NAME) @@ -393,12 +323,8 @@ def test_get_deployment_status(self, containers_service, deployments_endpoint): @responses.activate def test_restart_deployment(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}/restart" - responses.add( - responses.POST, - url, - status=204 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}/restart' + responses.add(responses.POST, url, status=204) # act containers_service.restart_deployment(DEPLOYMENT_NAME) @@ -409,17 +335,11 @@ def test_restart_deployment(self, containers_service, deployments_endpoint): @responses.activate def test_get_deployment_scaling_options(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}/scaling" - responses.add( - responses.GET, - url, - json=DEPLOYMENT_DATA["scaling"], - status=200 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}/scaling' + responses.add(responses.GET, url, json=DEPLOYMENT_DATA['scaling'], status=200) # act - scaling_options = containers_service.get_deployment_scaling_options( - DEPLOYMENT_NAME) + scaling_options = containers_service.get_deployment_scaling_options(DEPLOYMENT_NAME) # assert assert isinstance(scaling_options, ScalingOptions) @@ -430,13 +350,8 @@ def test_get_deployment_scaling_options(self, containers_service, deployments_en @responses.activate def test_update_deployment_scaling_options(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}/scaling" - responses.add( - responses.PATCH, - url, - json=DEPLOYMENT_DATA["scaling"], - status=200 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}/scaling' + responses.add(responses.PATCH, url, json=DEPLOYMENT_DATA['scaling'], status=200) # create scaling options object scaling_options = ScalingOptions( @@ -448,15 +363,15 @@ def test_update_deployment_scaling_options(self, containers_service, deployments concurrent_requests_per_replica=10, scaling_triggers=ScalingTriggers( queue_load=QueueLoadScalingTrigger(threshold=0.75), - cpu_utilization=UtilizationScalingTrigger( - enabled=True, threshold=0.8), - gpu_utilization=UtilizationScalingTrigger(enabled=False) - ) + cpu_utilization=UtilizationScalingTrigger(enabled=True, threshold=0.8), + gpu_utilization=UtilizationScalingTrigger(enabled=False), + ), ) # act updated_scaling = containers_service.update_deployment_scaling_options( - DEPLOYMENT_NAME, scaling_options) + DEPLOYMENT_NAME, scaling_options + ) # assert assert isinstance(updated_scaling, ScalingOptions) @@ -467,32 +382,22 @@ def test_update_deployment_scaling_options(self, containers_service, deployments @responses.activate def test_get_deployment_replicas(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}/replicas" - responses.add( - responses.GET, - url, - json=REPLICAS_DATA, - status=200 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}/replicas' + responses.add(responses.GET, url, json=REPLICAS_DATA, status=200) # act replicas = containers_service.get_deployment_replicas(DEPLOYMENT_NAME) # assert assert len(replicas) == 1 - assert replicas[0] == ReplicaInfo( - "replica-1", "running", "2023-01-01T00:00:00+00:00") + assert replicas[0] == ReplicaInfo('replica-1', 'running', '2023-01-01T00:00:00+00:00') assert responses.assert_call_count(url, 1) is True @responses.activate def test_purge_deployment_queue(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}/purge-queue" - responses.add( - responses.POST, - url, - status=204 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}/purge-queue' + responses.add(responses.POST, url, status=204) # act containers_service.purge_deployment_queue(DEPLOYMENT_NAME) @@ -503,12 +408,8 @@ def test_purge_deployment_queue(self, containers_service, deployments_endpoint): @responses.activate def test_pause_deployment(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}/pause" - responses.add( - responses.POST, - url, - status=204 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}/pause' + responses.add(responses.POST, url, status=204) # act containers_service.pause_deployment(DEPLOYMENT_NAME) @@ -519,12 +420,8 @@ def test_pause_deployment(self, containers_service, deployments_endpoint): @responses.activate def test_resume_deployment(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}/resume" - responses.add( - responses.POST, - url, - status=204 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}/resume' + responses.add(responses.POST, url, status=204) # act containers_service.resume_deployment(DEPLOYMENT_NAME) @@ -535,103 +432,103 @@ def test_resume_deployment(self, containers_service, deployments_endpoint): @responses.activate def test_get_deployment_environment_variables(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}/environment-variables" - responses.add( - responses.GET, - url, - json=ENV_VARS_DATA, - status=200 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}/environment-variables' + responses.add(responses.GET, url, json=ENV_VARS_DATA, status=200) # act - env_vars = containers_service.get_deployment_environment_variables( - DEPLOYMENT_NAME) + env_vars = containers_service.get_deployment_environment_variables(DEPLOYMENT_NAME) # assert - assert env_vars[CONTAINER_NAME] == [EnvVar( - name=ENV_VAR_NAME, - value_or_reference_to_secret=ENV_VAR_VALUE, - type=EnvVarType.PLAIN - )] + assert env_vars[CONTAINER_NAME] == [ + EnvVar( + name=ENV_VAR_NAME, + value_or_reference_to_secret=ENV_VAR_VALUE, + type=EnvVarType.PLAIN, + ) + ] assert responses.assert_call_count(url, 1) is True @responses.activate def test_add_deployment_environment_variables(self, containers_service, deployments_endpoint): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}/environment-variables" - responses.add( - responses.POST, - url, - json=ENV_VARS_DATA, - status=200 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}/environment-variables' + responses.add(responses.POST, url, json=ENV_VARS_DATA, status=200) # act - env_vars = [EnvVar( - name=ENV_VAR_NAME, - value_or_reference_to_secret=ENV_VAR_VALUE, - type=EnvVarType.PLAIN - )] + env_vars = [ + EnvVar( + name=ENV_VAR_NAME, + value_or_reference_to_secret=ENV_VAR_VALUE, + type=EnvVarType.PLAIN, + ) + ] result = containers_service.add_deployment_environment_variables( - DEPLOYMENT_NAME, CONTAINER_NAME, env_vars) + DEPLOYMENT_NAME, CONTAINER_NAME, env_vars + ) # assert - assert result[CONTAINER_NAME] == [EnvVar( - name=ENV_VAR_NAME, - value_or_reference_to_secret=ENV_VAR_VALUE, - type=EnvVarType.PLAIN - )] + assert result[CONTAINER_NAME] == [ + EnvVar( + name=ENV_VAR_NAME, + value_or_reference_to_secret=ENV_VAR_VALUE, + type=EnvVarType.PLAIN, + ) + ] assert responses.assert_call_count(url, 1) is True @responses.activate - def test_update_deployment_environment_variables(self, containers_service, deployments_endpoint): + def test_update_deployment_environment_variables( + self, containers_service, deployments_endpoint + ): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}/environment-variables" - responses.add( - responses.PATCH, - url, - json=ENV_VARS_DATA[0], - status=200 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}/environment-variables' + responses.add(responses.PATCH, url, json=ENV_VARS_DATA[0], status=200) # act - env_vars = [EnvVar( - name=ENV_VAR_NAME, - value_or_reference_to_secret=ENV_VAR_VALUE, - type=EnvVarType.PLAIN - )] + env_vars = [ + EnvVar( + name=ENV_VAR_NAME, + value_or_reference_to_secret=ENV_VAR_VALUE, + type=EnvVarType.PLAIN, + ) + ] result = containers_service.update_deployment_environment_variables( - DEPLOYMENT_NAME, CONTAINER_NAME, env_vars) + DEPLOYMENT_NAME, CONTAINER_NAME, env_vars + ) # assert - assert result[CONTAINER_NAME] == [EnvVar( - name=ENV_VAR_NAME, - value_or_reference_to_secret=ENV_VAR_VALUE, - type=EnvVarType.PLAIN - )] + assert result[CONTAINER_NAME] == [ + EnvVar( + name=ENV_VAR_NAME, + value_or_reference_to_secret=ENV_VAR_VALUE, + type=EnvVarType.PLAIN, + ) + ] assert responses.assert_call_count(url, 1) is True @responses.activate - def test_delete_deployment_environment_variables(self, containers_service, deployments_endpoint): + def test_delete_deployment_environment_variables( + self, containers_service, deployments_endpoint + ): # arrange - add response mock - url = f"{deployments_endpoint}/{DEPLOYMENT_NAME}/environment-variables" - responses.add( - responses.DELETE, - url, - json=ENV_VARS_DATA, - status=200 - ) + url = f'{deployments_endpoint}/{DEPLOYMENT_NAME}/environment-variables' + responses.add(responses.DELETE, url, json=ENV_VARS_DATA, status=200) # act result = containers_service.delete_deployment_environment_variables( - DEPLOYMENT_NAME, CONTAINER_NAME, ["random-env-var-name"]) + DEPLOYMENT_NAME, CONTAINER_NAME, ['random-env-var-name'] + ) # assert - assert result == {CONTAINER_NAME: [EnvVar( - name=ENV_VAR_NAME, - value_or_reference_to_secret=ENV_VAR_VALUE, - type=EnvVarType.PLAIN - )]} + assert result == { + CONTAINER_NAME: [ + EnvVar( + name=ENV_VAR_NAME, + value_or_reference_to_secret=ENV_VAR_VALUE, + type=EnvVarType.PLAIN, + ) + ] + } assert responses.assert_call_count(url, 1) is True @responses.activate @@ -642,7 +539,7 @@ def test_get_compute_resources(self, containers_service, compute_resources_endpo compute_resources_endpoint, # Wrap in list to simulate resource groups json=[COMPUTE_RESOURCES_DATA], - status=200 + status=200, ) # act @@ -655,17 +552,18 @@ def test_get_compute_resources(self, containers_service, compute_resources_endpo assert resources[0].name == COMPUTE_RESOURCE_NAME_GENERAL_COMPUTE assert resources[0].size == 1 assert resources[0].is_available - assert responses.assert_call_count( - compute_resources_endpoint, 1) is True + assert responses.assert_call_count(compute_resources_endpoint, 1) is True @responses.activate - def test_get_compute_resources_filter_by_size(self, containers_service, compute_resources_endpoint): + def test_get_compute_resources_filter_by_size( + self, containers_service, compute_resources_endpoint + ): # arrange - add response mock responses.add( responses.GET, compute_resources_endpoint, json=[COMPUTE_RESOURCES_DATA], - status=200 + status=200, ) # act @@ -678,17 +576,18 @@ def test_get_compute_resources_filter_by_size(self, containers_service, compute_ assert resources[0].name == COMPUTE_RESOURCE_NAME_H100 assert resources[0].size == 4 assert resources[0].is_available - assert responses.assert_call_count( - compute_resources_endpoint, 1) is True + assert responses.assert_call_count(compute_resources_endpoint, 1) is True @responses.activate - def test_get_compute_resources_filter_by_availability(self, containers_service, compute_resources_endpoint): + def test_get_compute_resources_filter_by_availability( + self, containers_service, compute_resources_endpoint + ): # arrange - add response mock responses.add( responses.GET, compute_resources_endpoint, json=[COMPUTE_RESOURCES_DATA], - status=200 + status=200, ) # act @@ -698,22 +597,22 @@ def test_get_compute_resources_filter_by_availability(self, containers_service, assert isinstance(resources, list) assert len(resources) == 2 assert all(r.is_available for r in resources) - assert responses.assert_call_count( - compute_resources_endpoint, 1) is True + assert responses.assert_call_count(compute_resources_endpoint, 1) is True @responses.activate - def test_get_compute_resources_filter_by_size_and_availability(self, containers_service, compute_resources_endpoint): + def test_get_compute_resources_filter_by_size_and_availability( + self, containers_service, compute_resources_endpoint + ): # arrange - add response mock responses.add( responses.GET, compute_resources_endpoint, json=[COMPUTE_RESOURCES_DATA], - status=200 + status=200, ) # act - resources = containers_service.get_compute_resources( - size=1, is_available=True) + resources = containers_service.get_compute_resources(size=1, is_available=True) # assert assert isinstance(resources, list) @@ -721,18 +620,12 @@ def test_get_compute_resources_filter_by_size_and_availability(self, containers_ assert resources[0].name == COMPUTE_RESOURCE_NAME_GENERAL_COMPUTE assert resources[0].size == 1 assert resources[0].is_available - assert responses.assert_call_count( - compute_resources_endpoint, 1) is True + assert responses.assert_call_count(compute_resources_endpoint, 1) is True @responses.activate def test_get_secrets(self, containers_service, secrets_endpoint): # arrange - add response mock - responses.add( - responses.GET, - secrets_endpoint, - json=SECRETS_DATA, - status=200 - ) + responses.add(responses.GET, secrets_endpoint, json=SECRETS_DATA, status=200) # act secrets = containers_service.get_secrets() @@ -754,9 +647,9 @@ def test_create_secret(self, containers_service, secrets_endpoint): match=[ matchers.json_params_matcher( # The test will now fail if the request body doesn't match the expected JSON structure - {"name": SECRET_NAME, "value": SECRET_VALUE} + {'name': SECRET_NAME, 'value': SECRET_VALUE} ) - ] + ], ) # act @@ -768,12 +661,8 @@ def test_create_secret(self, containers_service, secrets_endpoint): @responses.activate def test_delete_secret(self, containers_service, secrets_endpoint): # arrange - add response mock - url = f"{secrets_endpoint}/{SECRET_NAME}?force=false" - responses.add( - responses.DELETE, - url, - status=200 - ) + url = f'{secrets_endpoint}/{SECRET_NAME}?force=false' + responses.add(responses.DELETE, url, status=200) # act containers_service.delete_secret(SECRET_NAME) @@ -781,17 +670,13 @@ def test_delete_secret(self, containers_service, secrets_endpoint): # assert assert responses.assert_call_count(url, 1) is True request = responses.calls[0].request - assert "force=false" in request.url + assert 'force=false' in request.url @responses.activate def test_delete_secret_with_force(self, containers_service, secrets_endpoint): # arrange - url = f"{secrets_endpoint}/{SECRET_NAME}?force=true" - responses.add( - responses.DELETE, - url, - status=200 - ) + url = f'{secrets_endpoint}/{SECRET_NAME}?force=true' + responses.add(responses.DELETE, url, status=200) # act containers_service.delete_secret(SECRET_NAME, force=True) @@ -799,7 +684,7 @@ def test_delete_secret_with_force(self, containers_service, secrets_endpoint): # assert assert responses.assert_call_count(url, 1) is True request = responses.calls[0].request - assert "force=true" in request.url + assert 'force=true' in request.url @responses.activate def test_get_registry_credentials(self, containers_service, registry_credentials_endpoint): @@ -808,7 +693,7 @@ def test_get_registry_credentials(self, containers_service, registry_credentials responses.GET, registry_credentials_endpoint, json=REGISTRY_CREDENTIALS_DATA, - status=200 + status=200, ) # act @@ -819,141 +704,122 @@ def test_get_registry_credentials(self, containers_service, registry_credentials assert len(credentials) == 1 assert isinstance(credentials[0], RegistryCredential) assert credentials[0].name == REGISTRY_CREDENTIAL_NAME - assert responses.assert_call_count( - registry_credentials_endpoint, 1) is True + assert responses.assert_call_count(registry_credentials_endpoint, 1) is True @responses.activate def test_add_registry_credentials(self, containers_service, registry_credentials_endpoint): - USERNAME = "username" - ACCESS_TOKEN = "token" + USERNAME = 'username' + ACCESS_TOKEN = 'token' # arrange - add response mock - responses.add( - responses.POST, - registry_credentials_endpoint, - status=201 - ) + responses.add(responses.POST, registry_credentials_endpoint, status=201) # act creds = DockerHubCredentials( - name=REGISTRY_CREDENTIAL_NAME, - username=USERNAME, - access_token=ACCESS_TOKEN + name=REGISTRY_CREDENTIAL_NAME, username=USERNAME, access_token=ACCESS_TOKEN ) containers_service.add_registry_credentials(creds) # assert - assert responses.assert_call_count( - registry_credentials_endpoint, 1) is True - assert responses.calls[0].request.body.decode( - 'utf-8') == '{"name": "test-credential", "type": "dockerhub", "username": "username", "access_token": "token"}' + assert responses.assert_call_count(registry_credentials_endpoint, 1) is True + assert ( + responses.calls[0].request.body.decode('utf-8') + == '{"name": "test-credential", "type": "dockerhub", "username": "username", "access_token": "token"}' + ) @responses.activate - def test_add_registry_credentials_github(self, containers_service, registry_credentials_endpoint): + def test_add_registry_credentials_github( + self, containers_service, registry_credentials_endpoint + ): # arrange - responses.add( - responses.POST, - registry_credentials_endpoint, - status=201 - ) + responses.add(responses.POST, registry_credentials_endpoint, status=201) # act creds = GithubCredentials( name=REGISTRY_CREDENTIAL_NAME, - username="test-username", - access_token="test-token" + username='test-username', + access_token='test-token', ) containers_service.add_registry_credentials(creds) # assert - assert responses.assert_call_count( - registry_credentials_endpoint, 1) is True - assert responses.calls[0].request.body.decode( - 'utf-8') == '{"name": "test-credential", "type": "ghcr", "username": "test-username", "access_token": "test-token"}' + assert responses.assert_call_count(registry_credentials_endpoint, 1) is True + assert ( + responses.calls[0].request.body.decode('utf-8') + == '{"name": "test-credential", "type": "ghcr", "username": "test-username", "access_token": "test-token"}' + ) @responses.activate def test_add_registry_credentials_gcr(self, containers_service, registry_credentials_endpoint): # arrange - responses.add( - responses.POST, - registry_credentials_endpoint, - status=201 - ) + responses.add(responses.POST, registry_credentials_endpoint, status=201) # act service_account_key = '{"key": "value"}' creds = GCRCredentials( - name=REGISTRY_CREDENTIAL_NAME, - service_account_key=service_account_key + name=REGISTRY_CREDENTIAL_NAME, service_account_key=service_account_key ) containers_service.add_registry_credentials(creds) # assert - assert responses.assert_call_count( - registry_credentials_endpoint, 1) is True - assert responses.calls[0].request.body.decode( - 'utf-8') == '{"name": "test-credential", "type": "gcr", "service_account_key": "{\\"key\\": \\"value\\"}"}' + assert responses.assert_call_count(registry_credentials_endpoint, 1) is True + assert ( + responses.calls[0].request.body.decode('utf-8') + == '{"name": "test-credential", "type": "gcr", "service_account_key": "{\\"key\\": \\"value\\"}"}' + ) @responses.activate - def test_add_registry_credentials_aws_ecr(self, containers_service, registry_credentials_endpoint): + def test_add_registry_credentials_aws_ecr( + self, containers_service, registry_credentials_endpoint + ): # arrange - responses.add( - responses.POST, - registry_credentials_endpoint, - status=201 - ) + responses.add(responses.POST, registry_credentials_endpoint, status=201) # act creds = AWSECRCredentials( name=REGISTRY_CREDENTIAL_NAME, - access_key_id="test-key", - secret_access_key="test-secret", - region="us-west-2", - ecr_repo="test.ecr.aws.com" + access_key_id='test-key', + secret_access_key='test-secret', + region='us-west-2', + ecr_repo='test.ecr.aws.com', ) containers_service.add_registry_credentials(creds) # assert - assert responses.assert_call_count( - registry_credentials_endpoint, 1) is True - assert responses.calls[0].request.body.decode( - 'utf-8') == '{"name": "test-credential", "type": "aws-ecr", "access_key_id": "test-key", "secret_access_key": "test-secret", "region": "us-west-2", "ecr_repo": "test.ecr.aws.com"}' + assert responses.assert_call_count(registry_credentials_endpoint, 1) is True + assert ( + responses.calls[0].request.body.decode('utf-8') + == '{"name": "test-credential", "type": "aws-ecr", "access_key_id": "test-key", "secret_access_key": "test-secret", "region": "us-west-2", "ecr_repo": "test.ecr.aws.com"}' + ) @responses.activate - def test_add_registry_credentials_custom(self, containers_service, registry_credentials_endpoint): + def test_add_registry_credentials_custom( + self, containers_service, registry_credentials_endpoint + ): # arrange - responses.add( - responses.POST, - registry_credentials_endpoint, - status=201 - ) + responses.add(responses.POST, registry_credentials_endpoint, status=201) # act docker_config = '{"auths": {"registry.example.com": {"auth": "base64-encoded"}}}' creds = CustomRegistryCredentials( - name=REGISTRY_CREDENTIAL_NAME, - docker_config_json=docker_config + name=REGISTRY_CREDENTIAL_NAME, docker_config_json=docker_config ) containers_service.add_registry_credentials(creds) # assert - assert responses.assert_call_count( - registry_credentials_endpoint, 1) is True - assert responses.calls[0].request.body.decode( - 'utf-8') == '{"name": "test-credential", "type": "custom", "docker_config_json": "{\\"auths\\": {\\"registry.example.com\\": {\\"auth\\": \\"base64-encoded\\"}}}"}' + assert responses.assert_call_count(registry_credentials_endpoint, 1) is True + assert ( + responses.calls[0].request.body.decode('utf-8') + == '{"name": "test-credential", "type": "custom", "docker_config_json": "{\\"auths\\": {\\"registry.example.com\\": {\\"auth\\": \\"base64-encoded\\"}}}"}' + ) @responses.activate def test_delete_registry_credentials(self, containers_service, registry_credentials_endpoint): # arrange - add response mock - url = f"{registry_credentials_endpoint}/{REGISTRY_CREDENTIAL_NAME}" - responses.add( - responses.DELETE, - url, - status=200 - ) + url = f'{registry_credentials_endpoint}/{REGISTRY_CREDENTIAL_NAME}' + responses.add(responses.DELETE, url, status=200) # act - containers_service.delete_registry_credentials( - REGISTRY_CREDENTIAL_NAME) + containers_service.delete_registry_credentials(REGISTRY_CREDENTIAL_NAME) # assert assert responses.assert_call_count(url, 1) is True diff --git a/tests/unit_tests/http_client/test_http_client.py b/tests/unit_tests/http_client/test_http_client.py index a8c9605..034efba 100644 --- a/tests/unit_tests/http_client/test_http_client.py +++ b/tests/unit_tests/http_client/test_http_client.py @@ -13,7 +13,7 @@ class TestHttpClient: def test_add_base_url(self, http_client): # arrange - path = "/test" + path = '/test' base = http_client._base_url # act @@ -83,8 +83,9 @@ def test_get_successful(self, http_client): responses.add( method=responses.GET, url=(http_client._base_url + '/test'), - status=200, body='{}', - content_type='application/json' + status=200, + body='{}', + content_type='application/json', ) # act @@ -103,8 +104,9 @@ def test_post_successful(self, http_client): responses.add( method=responses.POST, url=(http_client._base_url + '/test'), - status=200, body='{}', - content_type='application/json' + status=200, + body='{}', + content_type='application/json', ) # act @@ -124,7 +126,7 @@ def test_delete_successful(self, http_client): method=responses.DELETE, url=(http_client._base_url + '/test'), status=200, - content_type='application/json' + content_type='application/json', ) # act @@ -142,8 +144,11 @@ def test_get_failed(self, http_client): method=responses.GET, url=(http_client._base_url + '/test'), status=401, - json={'code': UNAUTHORIZED_REQUEST, 'message': UNAUTHORIZED_REQUEST_MESSAGE}, - content_type='application/json' + json={ + 'code': UNAUTHORIZED_REQUEST, + 'message': UNAUTHORIZED_REQUEST_MESSAGE, + }, + content_type='application/json', ) error_str = f'error code: {UNAUTHORIZED_REQUEST}\nmessage: {UNAUTHORIZED_REQUEST_MESSAGE}' @@ -163,7 +168,7 @@ def test_post_failed(self, http_client): url=(http_client._base_url + '/test'), status=400, json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, - content_type='application/json' + content_type='application/json', ) # act @@ -181,7 +186,7 @@ def test_delete_failed(self, http_client): url=(http_client._base_url + '/test'), status=400, json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, - content_type='application/json' + content_type='application/json', ) # act diff --git a/tests/unit_tests/images/test_images.py b/tests/unit_tests/images/test_images.py index 61ff98a..9dd455c 100644 --- a/tests/unit_tests/images/test_images.py +++ b/tests/unit_tests/images/test_images.py @@ -1,4 +1,4 @@ -import responses # https://github.com/getsentry/responses +import responses # https://github.com/getsentry/responses from datacrunch.images.images import ImagesService, Image @@ -7,19 +7,16 @@ def test_images(http_client): # arrange - add response mock responses.add( responses.GET, - http_client._base_url + "/images", + http_client._base_url + '/images', json=[ { - "id": "0888da25-bb0d-41cc-a191-dccae45d96fd", - "name": "Ubuntu 20.04 + CUDA 11.0", - "details": [ - "Ubuntu 20.04", - "CUDA 11.0" - ], - "image_type": "ubuntu-20.04-cuda-11.0" + 'id': '0888da25-bb0d-41cc-a191-dccae45d96fd', + 'name': 'Ubuntu 20.04 + CUDA 11.0', + 'details': ['Ubuntu 20.04', 'CUDA 11.0'], + 'image_type': 'ubuntu-20.04-cuda-11.0', } ], - status=200 + status=200, ) image_service = ImagesService(http_client) @@ -35,7 +32,6 @@ def test_images(http_client): assert images[0].name == 'Ubuntu 20.04 + CUDA 11.0' assert images[0].image_type == 'ubuntu-20.04-cuda-11.0' assert isinstance(images[0].details, list) - assert images[0].details[0] == "Ubuntu 20.04" - assert images[0].details[1] == "CUDA 11.0" + assert images[0].details[0] == 'Ubuntu 20.04' + assert images[0].details[1] == 'CUDA 11.0' assert isinstance(images[0].__str__(), str) - \ No newline at end of file diff --git a/tests/unit_tests/instance_types/test_instance_types.py b/tests/unit_tests/instance_types/test_instance_types.py index 97fa884..210414e 100644 --- a/tests/unit_tests/instance_types/test_instance_types.py +++ b/tests/unit_tests/instance_types/test_instance_types.py @@ -2,58 +2,58 @@ from datacrunch.instance_types.instance_types import InstanceTypesService, InstanceType -TYPE_ID = "01cf5dc1-a5d2-4972-ae4e-d429115d055b" -CPU_DESCRIPTION = "48 CPU 3.5GHz" +TYPE_ID = '01cf5dc1-a5d2-4972-ae4e-d429115d055b' +CPU_DESCRIPTION = '48 CPU 3.5GHz' NUMBER_OF_CORES = 48 -GPU_DESCRIPTION = "8x NVidia Tesla V100" +GPU_DESCRIPTION = '8x NVidia Tesla V100' NUMBER_OF_GPUS = 8 -MEMORY_DESCRIPTION = "192GB RAM" +MEMORY_DESCRIPTION = '192GB RAM' MEMORY_SIZE = 192 -GPU_MEMORY_DESCRIPTION = "128GB VRAM" +GPU_MEMORY_DESCRIPTION = '128GB VRAM' GPU_MEMORY_SIZE = 128 -STORAGE_DESCRIPTION = "1800GB NVME" +STORAGE_DESCRIPTION = '1800GB NVME' STORAGE_SIZE = 1800 -INSTANCE_TYPE_DESCRIPTION = "Dedicated Bare metal Server" +INSTANCE_TYPE_DESCRIPTION = 'Dedicated Bare metal Server' PRICE_PER_HOUR = 5.0 SPOT_PRICE_PER_HOUR = 2.5 -INSTANCE_TYPE = "8V100.48M" +INSTANCE_TYPE = '8V100.48M' def test_instance_types(http_client): # arrange - add response mock responses.add( responses.GET, - http_client._base_url + "/instance-types", + http_client._base_url + '/instance-types', json=[ { - "id": TYPE_ID, - "cpu": { - "description": CPU_DESCRIPTION, - "number_of_cores": NUMBER_OF_CORES + 'id': TYPE_ID, + 'cpu': { + 'description': CPU_DESCRIPTION, + 'number_of_cores': NUMBER_OF_CORES, }, - "gpu": { - "description": GPU_DESCRIPTION, - "number_of_gpus": NUMBER_OF_GPUS + 'gpu': { + 'description': GPU_DESCRIPTION, + 'number_of_gpus': NUMBER_OF_GPUS, }, - "memory": { - "description": MEMORY_DESCRIPTION, - "size_in_gigabytes": MEMORY_SIZE + 'memory': { + 'description': MEMORY_DESCRIPTION, + 'size_in_gigabytes': MEMORY_SIZE, }, - "gpu_memory": { - "description": GPU_MEMORY_DESCRIPTION, - "size_in_gigabytes": GPU_MEMORY_SIZE + 'gpu_memory': { + 'description': GPU_MEMORY_DESCRIPTION, + 'size_in_gigabytes': GPU_MEMORY_SIZE, }, - "storage": { - "description": STORAGE_DESCRIPTION, - "size_in_gigabytes": STORAGE_SIZE + 'storage': { + 'description': STORAGE_DESCRIPTION, + 'size_in_gigabytes': STORAGE_SIZE, }, - "description": INSTANCE_TYPE_DESCRIPTION, - "price_per_hour": "5.00", - "spot_price": "2.50", - "instance_type": INSTANCE_TYPE + 'description': INSTANCE_TYPE_DESCRIPTION, + 'price_per_hour': '5.00', + 'spot_price': '2.50', + 'instance_type': INSTANCE_TYPE, } ], - status=200 + status=200, ) instance_types_service = InstanceTypesService(http_client) diff --git a/tests/unit_tests/instances/test_instances.py b/tests/unit_tests/instances/test_instances.py index a61ae4b..2410250 100644 --- a/tests/unit_tests/instances/test_instances.py +++ b/tests/unit_tests/instances/test_instances.py @@ -12,56 +12,41 @@ SSH_KEY_ID = '12345dc1-a5d2-4972-ae4e-d429115d055b' OS_VOLUME_ID = '46fc0247-8f65-4d8a-ad73-852a8b3dc1d3' -INSTANCE_TYPE = "1V100.6V" -INSTANCE_IMAGE = "ubuntu-24.04-cuda-12.8-open-docker" +INSTANCE_TYPE = '1V100.6V' +INSTANCE_IMAGE = 'ubuntu-24.04-cuda-12.8-open-docker' INSTANCE_HOSTNAME = "I'll be your host for today" -INSTANCE_DESCRIPTION = "hope you enjoy your GPU" +INSTANCE_DESCRIPTION = 'hope you enjoy your GPU' INSTANCE_STATUS = 'running' INSTANCE_PRICE_PER_HOUR = 0.60 INSTANCE_LOCATION = Locations.FIN_01 INSTANCE_IP = '1.2.3.4' -INSTANCE_CREATED_AT = "whatchalookingatboy?" -INSTANCE_OS_VOLUME = {"name": "os volume", "size": 50} +INSTANCE_CREATED_AT = 'whatchalookingatboy?' +INSTANCE_OS_VOLUME = {'name': 'os volume', 'size': 50} PAYLOAD = [ { - "created_at": INSTANCE_CREATED_AT, - "status": INSTANCE_STATUS, - "ip": INSTANCE_IP, - "cpu": { - "description": "super-duper-cpu", - "number_of_cores": 6 - }, - "gpu": { - "description": "super-duper-gpu", - "number_of_gpus": 1 - }, - "memory": { - "description": "super-duper-memory", - "size_in_gigabytes": 32 - }, - "gpu_memory": { - "description": "super-duper-memory", - "size_in_gigabytes": 20 - }, - "storage": { - "description": "super-duper-storage", - "size_in_gigabytes": 320 - }, - "hostname": INSTANCE_HOSTNAME, - "description": INSTANCE_DESCRIPTION, - "location": INSTANCE_LOCATION, - "price_per_hour": INSTANCE_PRICE_PER_HOUR, - "instance_type": INSTANCE_TYPE, - "image": INSTANCE_IMAGE, - "id": INSTANCE_ID, - "ssh_key_ids": [SSH_KEY_ID], - "os_volume_id": OS_VOLUME_ID + 'created_at': INSTANCE_CREATED_AT, + 'status': INSTANCE_STATUS, + 'ip': INSTANCE_IP, + 'cpu': {'description': 'super-duper-cpu', 'number_of_cores': 6}, + 'gpu': {'description': 'super-duper-gpu', 'number_of_gpus': 1}, + 'memory': {'description': 'super-duper-memory', 'size_in_gigabytes': 32}, + 'gpu_memory': {'description': 'super-duper-memory', 'size_in_gigabytes': 20}, + 'storage': {'description': 'super-duper-storage', 'size_in_gigabytes': 320}, + 'hostname': INSTANCE_HOSTNAME, + 'description': INSTANCE_DESCRIPTION, + 'location': INSTANCE_LOCATION, + 'price_per_hour': INSTANCE_PRICE_PER_HOUR, + 'instance_type': INSTANCE_TYPE, + 'image': INSTANCE_IMAGE, + 'id': INSTANCE_ID, + 'ssh_key_ids': [SSH_KEY_ID], + 'os_volume_id': OS_VOLUME_ID, } ] PAYLOAD_SPOT = PAYLOAD -PAYLOAD_SPOT[0]["is_spot"] = True +PAYLOAD_SPOT[0]['is_spot'] = True class TestInstancesService: @@ -71,16 +56,11 @@ def instances_service(self, http_client): @pytest.fixture def endpoint(self, http_client): - return http_client._base_url + "/instances" + return http_client._base_url + '/instances' def test_get_instances(self, instances_service, endpoint): # arrange - add response mock - responses.add( - responses.GET, - endpoint, - json=PAYLOAD, - status=200 - ) + responses.add(responses.GET, endpoint, json=PAYLOAD, status=200) # act instances = instances_service.get() @@ -110,13 +90,8 @@ def test_get_instances(self, instances_service, endpoint): def test_get_instances_by_status_successful(self, instances_service, endpoint): # arrange - add response mock - url = endpoint + "?status=running" - responses.add( - responses.GET, - url, - json=PAYLOAD, - status=200 - ) + url = endpoint + '?status=running' + responses.add(responses.GET, url, json=PAYLOAD, status=200) # act instances = instances_service.get(status='running') @@ -146,12 +121,12 @@ def test_get_instances_by_status_successful(self, instances_service, endpoint): def test_get_instances_by_status_failed(self, instances_service, endpoint): # arrange - add response mock - url = endpoint + "?status=blabbering" + url = endpoint + '?status=blabbering' responses.add( responses.GET, url, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act @@ -166,12 +141,7 @@ def test_get_instances_by_status_failed(self, instances_service, endpoint): def test_get_instance_by_id_successful(self, instances_service, endpoint): # arrange - add response mock url = endpoint + '/' + INSTANCE_ID - responses.add( - responses.GET, - url, - json=PAYLOAD[0], - status=200 - ) + responses.add(responses.GET, url, json=PAYLOAD[0], status=200) # act instance = instances_service.get_by_id(INSTANCE_ID) @@ -201,8 +171,8 @@ def test_get_instance_by_id_failed(self, instances_service, endpoint): responses.add( responses.GET, url, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act @@ -217,20 +187,10 @@ def test_get_instance_by_id_failed(self, instances_service, endpoint): def test_create_instance_successful(self, instances_service, endpoint): # arrange - add response mock # create instance - responses.add( - responses.POST, - endpoint, - body=INSTANCE_ID, - status=200 - ) + responses.add(responses.POST, endpoint, body=INSTANCE_ID, status=200) # get instance by id url = endpoint + '/' + INSTANCE_ID - responses.add( - responses.GET, - url, - json=PAYLOAD[0], - status=200 - ) + responses.add(responses.GET, url, json=PAYLOAD[0], status=200) # act instance = instances_service.create( @@ -239,7 +199,7 @@ def test_create_instance_successful(self, instances_service, endpoint): ssh_key_ids=[SSH_KEY_ID], hostname=INSTANCE_HOSTNAME, description=INSTANCE_DESCRIPTION, - os_volume=INSTANCE_OS_VOLUME + os_volume=INSTANCE_OS_VOLUME, ) # assert @@ -268,20 +228,10 @@ def test_create_instance_successful(self, instances_service, endpoint): def test_create_spot_instance_successful(self, instances_service, endpoint): # arrange - add response mock # add response mock for the create instance endpoint - responses.add( - responses.POST, - endpoint, - body=INSTANCE_ID, - status=200 - ) + responses.add(responses.POST, endpoint, body=INSTANCE_ID, status=200) # add response mock for the get instance by id endpoint url = endpoint + '/' + INSTANCE_ID - responses.add( - responses.GET, - url, - json=PAYLOAD_SPOT[0], - status=200 - ) + responses.add(responses.GET, url, json=PAYLOAD_SPOT[0], status=200) # act instance = instances_service.create( @@ -290,7 +240,7 @@ def test_create_spot_instance_successful(self, instances_service, endpoint): ssh_key_ids=[SSH_KEY_ID], hostname=INSTANCE_HOSTNAME, description=INSTANCE_DESCRIPTION, - os_volume=INSTANCE_OS_VOLUME + os_volume=INSTANCE_OS_VOLUME, ) # assert @@ -319,20 +269,10 @@ def test_create_spot_instance_successful(self, instances_service, endpoint): def test_create_instance_attached_os_volume_successful(self, instances_service, endpoint): # arrange - add response mock # create instance - responses.add( - responses.POST, - endpoint, - body=INSTANCE_ID, - status=200 - ) + responses.add(responses.POST, endpoint, body=INSTANCE_ID, status=200) # get instance by id url = endpoint + '/' + INSTANCE_ID - responses.add( - responses.GET, - url, - json=PAYLOAD[0], - status=200 - ) + responses.add(responses.GET, url, json=PAYLOAD[0], status=200) # act instance = instances_service.create( @@ -369,8 +309,8 @@ def test_create_instance_failed(self, instances_service, endpoint): responses.add( responses.POST, endpoint, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act @@ -391,15 +331,10 @@ def test_create_instance_failed(self, instances_service, endpoint): def test_action_successful(self, instances_service, endpoint): # arrange - add response mock url = endpoint - responses.add( - responses.PUT, - url, - status=202 - ) + responses.add(responses.PUT, url, status=202) # act - result = instances_service.action( - id_list=[INSTANCE_ID], action=Actions.SHUTDOWN) + result = instances_service.action(id_list=[INSTANCE_ID], action=Actions.SHUTDOWN) # assert assert result is None @@ -411,14 +346,13 @@ def test_action_failed(self, instances_service, endpoint): responses.add( responses.PUT, url, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act with pytest.raises(APIException) as excinfo: - instances_service.action( - id_list=[INSTANCE_ID], action="fluxturcate") + instances_service.action(id_list=[INSTANCE_ID], action='fluxturcate') # assert assert excinfo.value.code == INVALID_REQUEST @@ -427,14 +361,13 @@ def test_action_failed(self, instances_service, endpoint): def test_is_available_successful(self, instances_service): # arrange - add response mock - url = instances_service._http_client._base_url + \ - '/instance-availability/' + INSTANCE_TYPE + "?isSpot=false" - responses.add( - responses.GET, - url, - json=True, - status=200 + url = ( + instances_service._http_client._base_url + + '/instance-availability/' + + INSTANCE_TYPE + + '?isSpot=false' ) + responses.add(responses.GET, url, json=True, status=200) # act is_available = instances_service.is_available(INSTANCE_TYPE) @@ -445,18 +378,16 @@ def test_is_available_successful(self, instances_service): def test_is_spot_available_successful(self, instances_service): # arrange - add response mock - url = instances_service._http_client._base_url + \ - '/instance-availability/' + INSTANCE_TYPE + '?isSpot=true' - responses.add( - responses.GET, - url, - json=True, - status=200 + url = ( + instances_service._http_client._base_url + + '/instance-availability/' + + INSTANCE_TYPE + + '?isSpot=true' ) + responses.add(responses.GET, url, json=True, status=200) # act - is_available = instances_service.is_available( - INSTANCE_TYPE, is_spot=True) + is_available = instances_service.is_available(INSTANCE_TYPE, is_spot=True) # assert assert is_available is True @@ -464,13 +395,14 @@ def test_is_spot_available_successful(self, instances_service): def test_is_available_failed(self, instances_service): # arrange - add response mock - url = instances_service._http_client._base_url + \ - '/instance-availability/x' + "?isSpot=false" + url = ( + instances_service._http_client._base_url + '/instance-availability/x' + '?isSpot=false' + ) responses.add( responses.GET, url, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act diff --git a/tests/unit_tests/ssh_keys/test_ssh_keys.py b/tests/unit_tests/ssh_keys/test_ssh_keys.py index 2dc80b5..f6f5893 100644 --- a/tests/unit_tests/ssh_keys/test_ssh_keys.py +++ b/tests/unit_tests/ssh_keys/test_ssh_keys.py @@ -1,5 +1,5 @@ import pytest -import responses # https://github.com/getsentry/responses +import responses # https://github.com/getsentry/responses from datacrunch.exceptions import APIException from datacrunch.ssh_keys.ssh_keys import SSHKeysService, SSHKey @@ -13,37 +13,25 @@ KEY_ID_2 = '12345dc1-a5d2-4972-ae4e-d429115d055b' -PAYLOAD = [ - { - 'id': KEY_ID, - 'name': KEY_NAME, - 'key': KEY_VALUE - } -] +PAYLOAD = [{'id': KEY_ID, 'name': KEY_NAME, 'key': KEY_VALUE}] class TestSSHKeys: - @pytest.fixture def ssh_key_service(self, http_client): return SSHKeysService(http_client) @pytest.fixture def endpoint(self, http_client): - return http_client._base_url + "/sshkeys" + return http_client._base_url + '/sshkeys' def test_get_keys(self, ssh_key_service, endpoint): # arrange - add response mock - responses.add( - responses.GET, - endpoint, - json=PAYLOAD, - status=200 - ) + responses.add(responses.GET, endpoint, json=PAYLOAD, status=200) # act keys = ssh_key_service.get() - + # assert assert isinstance(keys, list) assert len(keys) == 1 @@ -56,16 +44,11 @@ def test_get_keys(self, ssh_key_service, endpoint): def test_get_key_by_id_successful(self, ssh_key_service, endpoint): # arrange - add response mock url = endpoint + '/' + KEY_ID - responses.add( - responses.GET, - url, - json=PAYLOAD, - status=200 - ) + responses.add(responses.GET, url, json=PAYLOAD, status=200) # act key = ssh_key_service.get_by_id(KEY_ID) - + # assert assert isinstance(key, SSHKey) assert key.id == KEY_ID @@ -79,8 +62,8 @@ def test_get_key_by_id_failed(self, ssh_key_service, endpoint): responses.add( responses.GET, url, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act @@ -94,20 +77,15 @@ def test_get_key_by_id_failed(self, ssh_key_service, endpoint): def test_create_key_successful(self, ssh_key_service, endpoint): # arrange - add response mock - responses.add( - responses.POST, - endpoint, - body=KEY_ID, - status=201 - ) + responses.add(responses.POST, endpoint, body=KEY_ID, status=201) # act key = ssh_key_service.create(KEY_NAME, KEY_VALUE) - + # assert assert isinstance(key, SSHKey) assert isinstance(key.id, str) - assert key.id == KEY_ID + assert key.id == KEY_ID assert responses.assert_call_count(endpoint, 1) is True def test_create_key_failed(self, ssh_key_service, endpoint): @@ -115,8 +93,8 @@ def test_create_key_failed(self, ssh_key_service, endpoint): responses.add( responses.POST, endpoint, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act @@ -130,11 +108,7 @@ def test_create_key_failed(self, ssh_key_service, endpoint): def test_delete_keys_successful(self, ssh_key_service, endpoint): # arrange - add response mock - responses.add( - responses.DELETE, - endpoint, - status=200 - ) + responses.add(responses.DELETE, endpoint, status=200) # act result = ssh_key_service.delete([KEY_ID, KEY_ID_2]) @@ -142,15 +116,14 @@ def test_delete_keys_successful(self, ssh_key_service, endpoint): # assert assert result is None assert responses.assert_call_count(endpoint, 1) is True - def test_delete_keys_failed(self, ssh_key_service, endpoint): # arrange - add response mock responses.add( responses.DELETE, endpoint, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act @@ -165,11 +138,7 @@ def test_delete_keys_failed(self, ssh_key_service, endpoint): def test_delete_key_by_id_successful(self, ssh_key_service, endpoint): # arrange - add response mock url = endpoint + '/' + KEY_ID - responses.add( - responses.DELETE, - url, - status=200 - ) + responses.add(responses.DELETE, url, status=200) # act result = ssh_key_service.delete_by_id(KEY_ID) @@ -184,8 +153,8 @@ def test_delete_key_by_id_failed(self, ssh_key_service, endpoint): responses.add( responses.DELETE, url, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act @@ -195,4 +164,4 @@ def test_delete_key_by_id_failed(self, ssh_key_service, endpoint): # assert assert excinfo.value.code == INVALID_REQUEST assert excinfo.value.message == INVALID_REQUEST_MESSAGE - assert responses.assert_call_count(url, 1) is True \ No newline at end of file + assert responses.assert_call_count(url, 1) is True diff --git a/tests/unit_tests/startup_scripts/test_startup_scripts.py b/tests/unit_tests/startup_scripts/test_startup_scripts.py index 32f3888..7b2b675 100644 --- a/tests/unit_tests/startup_scripts/test_startup_scripts.py +++ b/tests/unit_tests/startup_scripts/test_startup_scripts.py @@ -2,7 +2,10 @@ import responses # https://github.com/getsentry/responses from datacrunch.exceptions import APIException -from datacrunch.startup_scripts.startup_scripts import StartupScriptsService, StartupScript +from datacrunch.startup_scripts.startup_scripts import ( + StartupScriptsService, + StartupScript, +) INVALID_REQUEST = 'invalid_request' INVALID_REQUEST_MESSAGE = 'Your existence is invalid' @@ -13,33 +16,21 @@ script_ID_2 = 'beefbeef-a5d2-4972-ae4e-d429115d055b' -PAYLOAD = [ - { - 'id': SCRIPT_ID, - 'name': SCRIPT_NAME, - 'script': SCRIPT_VALUE - } -] +PAYLOAD = [{'id': SCRIPT_ID, 'name': SCRIPT_NAME, 'script': SCRIPT_VALUE}] class TestStartupScripts: - @pytest.fixture def startup_script_service(self, http_client): return StartupScriptsService(http_client) @pytest.fixture def endpoint(self, http_client): - return http_client._base_url + "/scripts" + return http_client._base_url + '/scripts' def test_get_scripts(self, startup_script_service, endpoint): # arrange - add response mock - responses.add( - responses.GET, - endpoint, - json=PAYLOAD, - status=200 - ) + responses.add(responses.GET, endpoint, json=PAYLOAD, status=200) # act scripts = startup_script_service.get() @@ -56,12 +47,7 @@ def test_get_scripts(self, startup_script_service, endpoint): def test_get_script_by_id_successful(self, startup_script_service, endpoint): # arrange - add response mock url = endpoint + '/' + SCRIPT_ID - responses.add( - responses.GET, - url, - json=PAYLOAD, - status=200 - ) + responses.add(responses.GET, url, json=PAYLOAD, status=200) # act script = startup_script_service.get_by_id(SCRIPT_ID) @@ -79,8 +65,8 @@ def test_get_script_by_id_failed(self, startup_script_service, endpoint): responses.add( responses.GET, url, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act @@ -94,12 +80,7 @@ def test_get_script_by_id_failed(self, startup_script_service, endpoint): def test_create_script_successful(self, startup_script_service, endpoint): # arrange - add response mock - responses.add( - responses.POST, - endpoint, - body=SCRIPT_ID, - status=201 - ) + responses.add(responses.POST, endpoint, body=SCRIPT_ID, status=201) # act script = startup_script_service.create(SCRIPT_NAME, SCRIPT_VALUE) @@ -114,8 +95,8 @@ def test_create_script_failed(self, startup_script_service, endpoint): responses.add( responses.POST, endpoint, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act @@ -129,11 +110,7 @@ def test_create_script_failed(self, startup_script_service, endpoint): def test_delete_scripts_successful(self, startup_script_service, endpoint): # arrange - add response mock - responses.add( - responses.DELETE, - endpoint, - status=200 - ) + responses.add(responses.DELETE, endpoint, status=200) # act result = startup_script_service.delete([SCRIPT_ID, script_ID_2]) @@ -147,8 +124,8 @@ def test_delete_scripts_failed(self, startup_script_service, endpoint): responses.add( responses.DELETE, endpoint, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act @@ -163,11 +140,7 @@ def test_delete_scripts_failed(self, startup_script_service, endpoint): def test_delete_script_by_id_successful(self, startup_script_service, endpoint): # arrange - add response mock url = endpoint + '/' + SCRIPT_ID - responses.add( - responses.DELETE, - url, - status=200 - ) + responses.add(responses.DELETE, url, status=200) # act result = startup_script_service.delete_by_id(SCRIPT_ID) @@ -182,8 +155,8 @@ def test_delete_script_by_id_failed(self, startup_script_service, endpoint): responses.add( responses.DELETE, url, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act diff --git a/tests/unit_tests/test_datacrunch.py b/tests/unit_tests/test_datacrunch.py index e192aaa..b2454fd 100644 --- a/tests/unit_tests/test_datacrunch.py +++ b/tests/unit_tests/test_datacrunch.py @@ -1,47 +1,42 @@ import pytest -import responses # https://github.com/getsentry/responses +import responses # https://github.com/getsentry/responses from datacrunch.datacrunch import DataCrunchClient from datacrunch.exceptions import APIException -BASE_URL = "https://api-testing.datacrunch.io/v1" +BASE_URL = 'https://api-testing.datacrunch.io/v1' response_json = { - "access_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJoZXkiOiJ5b3UgYWN1YWxseSBjaGVja2VkIHRoaXM_In0.0RjcdKQ1NJP9gbRyXITE6LFFLwKGzeeshuubnkkfkb8", - "token_type": "Bearer", - "expires_in": 3600, - "refresh_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ3b3ciOiJhbmQgdGhpcyB0b28_In0.AC5gk-o-MOptUgrouEErlhr8WT3Hg_RR6px6A0I7ZEk", - "scope": "fullAccess" + 'access_token': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJoZXkiOiJ5b3UgYWN1YWxseSBjaGVja2VkIHRoaXM_In0.0RjcdKQ1NJP9gbRyXITE6LFFLwKGzeeshuubnkkfkb8', + 'token_type': 'Bearer', + 'expires_in': 3600, + 'refresh_token': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ3b3ciOiJhbmQgdGhpcyB0b28_In0.AC5gk-o-MOptUgrouEErlhr8WT3Hg_RR6px6A0I7ZEk', + 'scope': 'fullAccess', } -class TestDataCrunchClient: +class TestDataCrunchClient: def test_client(self): # arrange - add response mock - responses.add( - responses.POST, - BASE_URL + "/oauth2/token", - json=response_json, - status=200 - ) + responses.add(responses.POST, BASE_URL + '/oauth2/token', json=response_json, status=200) # act - client = DataCrunchClient("XXXXXXXXXXXXXX", "XXXXXXXXXXXXXX", BASE_URL) + client = DataCrunchClient('XXXXXXXXXXXXXX', 'XXXXXXXXXXXXXX', BASE_URL) # assert assert client.constants.base_url == BASE_URL def test_client_with_default_base_url(self): # arrange - add response mock - DEFAULT_BASE_URL = "https://api.datacrunch.io/v1" + DEFAULT_BASE_URL = 'https://api.datacrunch.io/v1' responses.add( responses.POST, - DEFAULT_BASE_URL + "/oauth2/token", + DEFAULT_BASE_URL + '/oauth2/token', json=response_json, - status=200 + status=200, ) # act - client = DataCrunchClient("XXXXXXXXXXXXXX", "XXXXXXXXXXXXXX") + client = DataCrunchClient('XXXXXXXXXXXXXX', 'XXXXXXXXXXXXXX') # assert assert client.constants.base_url == DEFAULT_BASE_URL @@ -50,16 +45,18 @@ def test_invalid_client_credentials(self): # arrange - add response mock responses.add( responses.POST, - BASE_URL + "/oauth2/token", - json={"code": "unauthorized_request", "message": "Invalid client id or client secret"}, - status=401 + BASE_URL + '/oauth2/token', + json={ + 'code': 'unauthorized_request', + 'message': 'Invalid client id or client secret', + }, + status=401, ) # act with pytest.raises(APIException) as excinfo: - DataCrunchClient("x", "y", BASE_URL) + DataCrunchClient('x', 'y', BASE_URL) # assert assert excinfo.value.code == 'unauthorized_request' assert excinfo.value.message == 'Invalid client id or client secret' - \ No newline at end of file diff --git a/tests/unit_tests/test_exceptions.py b/tests/unit_tests/test_exceptions.py index e247970..4cab370 100644 --- a/tests/unit_tests/test_exceptions.py +++ b/tests/unit_tests/test_exceptions.py @@ -2,7 +2,7 @@ from datacrunch.exceptions import APIException ERROR_CODE = 'test_code' -ERROR_MESSAGE = "test message" +ERROR_MESSAGE = 'test message' def test_api_exception_with_code(): @@ -13,7 +13,7 @@ def test_api_exception_with_code(): with pytest.raises(APIException) as excinfo: raise APIException(ERROR_CODE, ERROR_MESSAGE) - # assert + # assert assert excinfo.value.code == ERROR_CODE assert excinfo.value.message == ERROR_MESSAGE assert excinfo.value.__str__() == error_str @@ -27,7 +27,7 @@ def test_api_exception_without_code(): with pytest.raises(APIException) as excinfo: raise APIException(None, ERROR_MESSAGE) - # assert + # assert assert excinfo.value.code is None assert excinfo.value.message == ERROR_MESSAGE assert excinfo.value.__str__() == error_str diff --git a/tests/unit_tests/volume_types/test_volume_types.py b/tests/unit_tests/volume_types/test_volume_types.py index 2da6211..0682aa3 100644 --- a/tests/unit_tests/volume_types/test_volume_types.py +++ b/tests/unit_tests/volume_types/test_volume_types.py @@ -4,7 +4,7 @@ from datacrunch.constants import VolumeTypes -USD = "usd" +USD = 'usd' NVMe_PRICE = 0.2 HDD_PRICE = 0.05 @@ -12,24 +12,18 @@ def test_volume_types(http_client): responses.add( responses.GET, - http_client._base_url + "/volume-types", + http_client._base_url + '/volume-types', json=[ { - "type": VolumeTypes.NVMe, - "price": { - "currency": USD, - "price_per_month_per_gb": NVMe_PRICE - } + 'type': VolumeTypes.NVMe, + 'price': {'currency': USD, 'price_per_month_per_gb': NVMe_PRICE}, }, { - "type": VolumeTypes.HDD, - "price": { - "currency": USD, - "price_per_month_per_gb": HDD_PRICE - } - } + 'type': VolumeTypes.HDD, + 'price': {'currency': USD, 'price_per_month_per_gb': HDD_PRICE}, + }, ], - status=200 + status=200, ) volume_types_service = VolumeTypesService(http_client) diff --git a/tests/unit_tests/volumes/test_volumes.py b/tests/unit_tests/volumes/test_volumes.py index c617c4b..b40a218 100644 --- a/tests/unit_tests/volumes/test_volumes.py +++ b/tests/unit_tests/volumes/test_volumes.py @@ -4,60 +4,66 @@ from datacrunch.exceptions import APIException from datacrunch.volumes.volumes import VolumesService, Volume -from datacrunch.constants import VolumeStatus, VolumeTypes, VolumeActions, ErrorCodes, Locations +from datacrunch.constants import ( + VolumeStatus, + VolumeTypes, + VolumeActions, + ErrorCodes, + Locations, +) INVALID_REQUEST = ErrorCodes.INVALID_REQUEST INVALID_REQUEST_MESSAGE = 'Your existence is invalid' -INSTANCE_ID = "4fee633c-b119-4447-af9c-70ba17675fc5" +INSTANCE_ID = '4fee633c-b119-4447-af9c-70ba17675fc5' -NVME = "NVMe" -HDD = "HDD" -TARGET_VDA = "vda" +NVME = 'NVMe' +HDD = 'HDD' +TARGET_VDA = 'vda' SSH_KEY_ID = '12345dc1-a5d2-4972-ae4e-d429115d055b' -NVME_VOL_ID = "cf995e26-ce69-4149-84a3-cdd1e100670f" +NVME_VOL_ID = 'cf995e26-ce69-4149-84a3-cdd1e100670f' NVME_VOL_STATUS = VolumeStatus.ATTACHED -NVME_VOL_NAME = "Volume-nxC2tf9F" +NVME_VOL_NAME = 'Volume-nxC2tf9F' NVME_VOL_SIZE = 50 -NVME_VOL_CREATED_AT = "2021-06-02T12:56:49.582Z" +NVME_VOL_CREATED_AT = '2021-06-02T12:56:49.582Z' -HDD_VOL_ID = "ea4edc62-9838-4b7c-bd5b-862f2efec675" +HDD_VOL_ID = 'ea4edc62-9838-4b7c-bd5b-862f2efec675' HDD_VOL_STATUS = VolumeStatus.DETACHED -HDD_VOL_NAME = "Volume-iHdL4ysR" +HDD_VOL_NAME = 'Volume-iHdL4ysR' HDD_VOL_SIZE = 100 -HDD_VOL_CREATED_AT = "2021-06-02T12:56:49.582Z" +HDD_VOL_CREATED_AT = '2021-06-02T12:56:49.582Z' RANDOM_VOL_ID = '07d864ee-ba86-451e-85b3-34ef551bd4a2' RANDOM_VOL2_ID = '72c5c082-7fe7-4d13-bd9e-f529c97d63b3' NVME_VOLUME = { - "id": NVME_VOL_ID, - "status": NVME_VOL_STATUS, - "instance_id": INSTANCE_ID, - "name": NVME_VOL_NAME, - "size": NVME_VOL_SIZE, - "type": NVME, - "location": Locations.FIN_01, - "is_os_volume": True, - "created_at": NVME_VOL_CREATED_AT, - "target": TARGET_VDA, - "ssh_key_ids": SSH_KEY_ID + 'id': NVME_VOL_ID, + 'status': NVME_VOL_STATUS, + 'instance_id': INSTANCE_ID, + 'name': NVME_VOL_NAME, + 'size': NVME_VOL_SIZE, + 'type': NVME, + 'location': Locations.FIN_01, + 'is_os_volume': True, + 'created_at': NVME_VOL_CREATED_AT, + 'target': TARGET_VDA, + 'ssh_key_ids': SSH_KEY_ID, } HDD_VOLUME = { - "id": HDD_VOL_ID, - "status": HDD_VOL_STATUS, - "instance_id": None, - "name": HDD_VOL_NAME, - "size": HDD_VOL_SIZE, - "type": HDD, - "location": Locations.FIN_01, - "is_os_volume": False, - "created_at": HDD_VOL_CREATED_AT, - "target": None, - "ssh_key_ids": [] + 'id': HDD_VOL_ID, + 'status': HDD_VOL_STATUS, + 'instance_id': None, + 'name': HDD_VOL_NAME, + 'size': HDD_VOL_SIZE, + 'type': HDD, + 'location': Locations.FIN_01, + 'is_os_volume': False, + 'created_at': HDD_VOL_CREATED_AT, + 'target': None, + 'ssh_key_ids': [], } PAYLOAD = [NVME_VOLUME, HDD_VOLUME] @@ -70,11 +76,18 @@ def volumes_service(self, http_client): @pytest.fixture def endpoint(self, http_client): - return http_client._base_url + "/volumes" + return http_client._base_url + '/volumes' def test_initialize_a_volume(self): - volume = Volume(RANDOM_VOL_ID, VolumeStatus.DETACHED, HDD_VOL_NAME, HDD_VOL_SIZE, - HDD, False, HDD_VOL_CREATED_AT) + volume = Volume( + RANDOM_VOL_ID, + VolumeStatus.DETACHED, + HDD_VOL_NAME, + HDD_VOL_SIZE, + HDD, + False, + HDD_VOL_CREATED_AT, + ) assert volume.id == RANDOM_VOL_ID assert volume.status == VolumeStatus.DETACHED @@ -90,12 +103,7 @@ def test_initialize_a_volume(self): def test_get_volumes(self, volumes_service, endpoint): # arrange - add response mock - responses.add( - responses.GET, - endpoint, - json=PAYLOAD, - status=200 - ) + responses.add(responses.GET, endpoint, json=PAYLOAD, status=200) # act volumes = volumes_service.get() @@ -135,9 +143,9 @@ def test_get_volumes_by_status_successful(self, volumes_service, endpoint): # arrange - add response mock responses.add( responses.GET, - endpoint + "?status=" + VolumeStatus.ATTACHED, + endpoint + '?status=' + VolumeStatus.ATTACHED, json=[NVME_VOLUME], - status=200 + status=200, ) # act @@ -160,12 +168,12 @@ def test_get_volumes_by_status_successful(self, volumes_service, endpoint): assert volume_nvme.target == TARGET_VDA def test_get_volumes_by_status_failed(self, volumes_service, endpoint): - url = endpoint + "?status=flummoxed" + url = endpoint + '?status=flummoxed' responses.add( responses.GET, url, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act @@ -179,13 +187,8 @@ def test_get_volumes_by_status_failed(self, volumes_service, endpoint): def test_get_volume_by_id_successful(self, volumes_service, endpoint): # arrange - add response mock - url = endpoint + "/" + NVME_VOL_ID - responses.add( - responses.GET, - url, - json=NVME_VOLUME, - status=200 - ) + url = endpoint + '/' + NVME_VOL_ID + responses.add(responses.GET, url, json=NVME_VOLUME, status=200) # act volume_nvme = volumes_service.get_by_id(NVME_VOL_ID) @@ -205,12 +208,12 @@ def test_get_volume_by_id_successful(self, volumes_service, endpoint): def test_get_volume_by_id_failed(self, volumes_service, endpoint): # arrange - add response mock - url = endpoint + "/x" + url = endpoint + '/x' responses.add( responses.GET, url, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act @@ -224,22 +227,11 @@ def test_get_volume_by_id_failed(self, volumes_service, endpoint): def test_create_volume_successful(self, volumes_service, endpoint): # arrange - add response mock - responses.add( - responses.POST, - endpoint, - body=NVME_VOL_ID, - status=202 - ) - responses.add( - responses.GET, - endpoint + "/" + NVME_VOL_ID, - json=NVME_VOLUME, - status=200 - ) + responses.add(responses.POST, endpoint, body=NVME_VOL_ID, status=202) + responses.add(responses.GET, endpoint + '/' + NVME_VOL_ID, json=NVME_VOLUME, status=200) # act - volume = volumes_service.create( - VolumeTypes.NVMe, NVME_VOL_NAME, NVME_VOL_SIZE) + volume = volumes_service.create(VolumeTypes.NVMe, NVME_VOL_NAME, NVME_VOL_SIZE) # assert assert volume.id == NVME_VOL_ID @@ -250,14 +242,13 @@ def test_create_volume_failed(self, volumes_service, endpoint): responses.add( responses.POST, endpoint, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, - status=400 + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, ) # act with pytest.raises(APIException) as excinfo: - volumes_service.create( - VolumeTypes.NVMe, NVME_VOL_NAME, 100000000000000000000000) + volumes_service.create(VolumeTypes.NVMe, NVME_VOL_NAME, 100000000000000000000000) # assert assert excinfo.value.code == INVALID_REQUEST @@ -271,12 +262,14 @@ def test_attach_volume_successful(self, volumes_service, endpoint): endpoint, status=202, match=[ - matchers.json_params_matcher({ - "id": NVME_VOL_ID, - "action": VolumeActions.ATTACH, - "instance_id": INSTANCE_ID - }) - ] + matchers.json_params_matcher( + { + 'id': NVME_VOL_ID, + 'action': VolumeActions.ATTACH, + 'instance_id': INSTANCE_ID, + } + ) + ], ) # act @@ -291,15 +284,17 @@ def test_attach_volume_failed(self, volumes_service, endpoint): responses.add( responses.PUT, endpoint, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, status=400, match=[ - matchers.json_params_matcher({ - "id": NVME_VOL_ID, - "action": VolumeActions.ATTACH, - "instance_id": INSTANCE_ID - }) - ] + matchers.json_params_matcher( + { + 'id': NVME_VOL_ID, + 'action': VolumeActions.ATTACH, + 'instance_id': INSTANCE_ID, + } + ) + ], ) # act @@ -318,11 +313,8 @@ def test_detach_volume_successful(self, volumes_service, endpoint): endpoint, status=202, match=[ - matchers.json_params_matcher({ - "id": NVME_VOL_ID, - "action": VolumeActions.DETACH - }) - ] + matchers.json_params_matcher({'id': NVME_VOL_ID, 'action': VolumeActions.DETACH}) + ], ) # act @@ -337,14 +329,11 @@ def test_detach_volume_failed(self, volumes_service, endpoint): responses.add( responses.PUT, endpoint, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, status=400, match=[ - matchers.json_params_matcher({ - "id": NVME_VOL_ID, - "action": VolumeActions.DETACH - }) - ] + matchers.json_params_matcher({'id': NVME_VOL_ID, 'action': VolumeActions.DETACH}) + ], ) # act @@ -357,7 +346,7 @@ def test_detach_volume_failed(self, volumes_service, endpoint): assert responses.assert_call_count(endpoint, 1) is True def test_rename_volume_successful(self, volumes_service, endpoint): - new_name = "bob" + new_name = 'bob' # arrange - add response mock responses.add( @@ -365,12 +354,14 @@ def test_rename_volume_successful(self, volumes_service, endpoint): endpoint, status=202, match=[ - matchers.json_params_matcher({ - "id": NVME_VOL_ID, - "action": VolumeActions.RENAME, - "name": new_name, - }) - ] + matchers.json_params_matcher( + { + 'id': NVME_VOL_ID, + 'action': VolumeActions.RENAME, + 'name': new_name, + } + ) + ], ) # act @@ -381,21 +372,23 @@ def test_rename_volume_successful(self, volumes_service, endpoint): assert responses.assert_call_count(endpoint, 1) is True def test_rename_volume_failed(self, volumes_service, endpoint): - new_name = "bob" + new_name = 'bob' # arrange - add response mock responses.add( responses.PUT, endpoint, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, status=400, match=[ - matchers.json_params_matcher({ - "id": NVME_VOL_ID, - "action": VolumeActions.RENAME, - "name": new_name - }) - ] + matchers.json_params_matcher( + { + 'id': NVME_VOL_ID, + 'action': VolumeActions.RENAME, + 'name': new_name, + } + ) + ], ) # act @@ -416,12 +409,14 @@ def test_increase_volume_size_successful(self, volumes_service, endpoint): endpoint, status=202, match=[ - matchers.json_params_matcher({ - "id": NVME_VOL_ID, - "action": VolumeActions.INCREASE_SIZE, - "size": new_size, - }) - ] + matchers.json_params_matcher( + { + 'id': NVME_VOL_ID, + 'action': VolumeActions.INCREASE_SIZE, + 'size': new_size, + } + ) + ], ) # act @@ -438,15 +433,17 @@ def test_increase_volume_size_failed(self, volumes_service, endpoint): responses.add( responses.PUT, endpoint, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, status=400, match=[ - matchers.json_params_matcher({ - "id": NVME_VOL_ID, - "action": VolumeActions.INCREASE_SIZE, - "size": new_size - }) - ] + matchers.json_params_matcher( + { + 'id': NVME_VOL_ID, + 'action': VolumeActions.INCREASE_SIZE, + 'size': new_size, + } + ) + ], ) # act @@ -465,12 +462,14 @@ def test_delete_volume_successful(self, volumes_service, endpoint): endpoint, status=202, match=[ - matchers.json_params_matcher({ - "id": NVME_VOL_ID, - "action": VolumeActions.DELETE, - "is_permanent": False - }) - ] + matchers.json_params_matcher( + { + 'id': NVME_VOL_ID, + 'action': VolumeActions.DELETE, + 'is_permanent': False, + } + ) + ], ) # act @@ -485,15 +484,17 @@ def test_delete_volume_failed(self, volumes_service, endpoint): responses.add( responses.PUT, endpoint, - json={"code": INVALID_REQUEST, "message": INVALID_REQUEST_MESSAGE}, + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, status=400, match=[ - matchers.json_params_matcher({ - "id": NVME_VOL_ID, - "action": VolumeActions.DELETE, - "is_permanent": False - }) - ] + matchers.json_params_matcher( + { + 'id': NVME_VOL_ID, + 'action': VolumeActions.DELETE, + 'is_permanent': False, + } + ) + ], ) # act @@ -507,7 +508,7 @@ def test_delete_volume_failed(self, volumes_service, endpoint): def test_clone_volume_with_input_name_successful(self, volumes_service, endpoint): # arrange - CLONED_VOLUME_NAME = "cloned-volume" + CLONED_VOLUME_NAME = 'cloned-volume' # mock response for cloning the volume responses.add( @@ -516,13 +517,15 @@ def test_clone_volume_with_input_name_successful(self, volumes_service, endpoint status=202, json=[RANDOM_VOL_ID], match=[ - matchers.json_params_matcher({ - "id": NVME_VOL_ID, - "action": VolumeActions.CLONE, - "name": CLONED_VOLUME_NAME, - "type": None - }) - ] + matchers.json_params_matcher( + { + 'id': NVME_VOL_ID, + 'action': VolumeActions.CLONE, + 'name': CLONED_VOLUME_NAME, + 'type': None, + } + ) + ], ) # mock object for the cloned volume @@ -534,7 +537,7 @@ def test_clone_volume_with_input_name_successful(self, volumes_service, endpoint # mock response for getting the cloned volume responses.add( responses.GET, - endpoint + "/" + RANDOM_VOL_ID, + endpoint + '/' + RANDOM_VOL_ID, status=200, json=CLONED_VOL_GET_MOCK, ) @@ -546,9 +549,11 @@ def test_clone_volume_with_input_name_successful(self, volumes_service, endpoint assert responses.assert_call_count(endpoint, 1) is True assert cloned_volume.name == CLONED_VOLUME_NAME - def test_clone_volume_without_input_name_successful(self, volumes_service: VolumesService, endpoint): + def test_clone_volume_without_input_name_successful( + self, volumes_service: VolumesService, endpoint + ): # arrange - CLONED_VOLUME_NAME = "CLONE-" + NVME_VOL_NAME + CLONED_VOLUME_NAME = 'CLONE-' + NVME_VOL_NAME # mock response for cloning the volume responses.add( @@ -557,13 +562,15 @@ def test_clone_volume_without_input_name_successful(self, volumes_service: Volum status=202, json=[RANDOM_VOL_ID], match=[ - matchers.json_params_matcher({ - "id": NVME_VOL_ID, - "action": VolumeActions.CLONE, - "name": None, - "type": None - }) - ] + matchers.json_params_matcher( + { + 'id': NVME_VOL_ID, + 'action': VolumeActions.CLONE, + 'name': None, + 'type': None, + } + ) + ], ) # mock object for the cloned volume @@ -575,7 +582,7 @@ def test_clone_volume_without_input_name_successful(self, volumes_service: Volum # mock response for getting the cloned volume responses.add( responses.GET, - endpoint + "/" + RANDOM_VOL_ID, + endpoint + '/' + RANDOM_VOL_ID, status=200, json=CLONED_VOL_GET_MOCK, ) @@ -589,8 +596,8 @@ def test_clone_volume_without_input_name_successful(self, volumes_service: Volum def test_clone_two_volumes_successful(self, volumes_service: VolumesService, endpoint): # arrange - CLONED_VOL1_NAME = "CLONE-" + NVME_VOL_NAME - CLONED_VOL2_NAME = "CLONE-" + HDD_VOL_NAME + CLONED_VOL1_NAME = 'CLONE-' + NVME_VOL_NAME + CLONED_VOL2_NAME = 'CLONE-' + HDD_VOL_NAME # mock response for cloning the volumes responses.add( @@ -599,13 +606,15 @@ def test_clone_two_volumes_successful(self, volumes_service: VolumesService, end status=202, json=[RANDOM_VOL_ID, RANDOM_VOL2_ID], match=[ - matchers.json_params_matcher({ - "id": [NVME_VOL_ID, HDD_VOL_ID], - "action": VolumeActions.CLONE, - "name": None, - "type": None - }) - ] + matchers.json_params_matcher( + { + 'id': [NVME_VOL_ID, HDD_VOL_ID], + 'action': VolumeActions.CLONE, + 'name': None, + 'type': None, + } + ) + ], ) # mock object for the cloned volumes @@ -622,13 +631,13 @@ def test_clone_two_volumes_successful(self, volumes_service: VolumesService, end # mock response for getting the cloned volumes responses.add( responses.GET, - endpoint + "/" + RANDOM_VOL_ID, + endpoint + '/' + RANDOM_VOL_ID, status=200, json=CLONED_VOL1_GET_MOCK, ) responses.add( responses.GET, - endpoint + "/" + RANDOM_VOL2_ID, + endpoint + '/' + RANDOM_VOL2_ID, status=200, json=CLONED_VOL2_GET_MOCK, )