From d7c2e6438aaa836d7dd971d363c50ee9ba136d6b Mon Sep 17 00:00:00 2001 From: Tamir Date: Thu, 3 Apr 2025 09:41:42 +0300 Subject: [PATCH 1/5] replaced fastai image --- README.md | 2 +- .../examples/advanced_create_instance.rst | 4 ++-- docs/source/examples/instance_actions.rst | 2 +- .../source/examples/instances_and_volumes.rst | 4 ++-- .../examples/simple_create_instance.rst | 2 +- docs/source/index.rst | 2 +- examples/advanced_create_instance.py | 10 ++++---- examples/instance_actions.py | 15 +++++++----- examples/instances_and_volumes.py | 24 ++++++++++--------- examples/simple_create_instance.py | 5 ++-- tests/unit_tests/instances/test_instances.py | 2 +- 11 files changed, 39 insertions(+), 33 deletions(-) diff --git a/README.md b/README.md index b1fef0f..a490856 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ DataCrunch's Public API documentation [is available here](https://api.datacrunch # Create a new instance instance = datacrunch.instances.create(instance_type='1V100.6V', - image='fastai', + image='ubuntu-24.04-cuda-12.8-open-docker', ssh_key_ids=ssh_keys, hostname='example', description='example instance') diff --git a/docs/source/examples/advanced_create_instance.rst b/docs/source/examples/advanced_create_instance.rst index 880effd..fc04494 100644 --- a/docs/source/examples/advanced_create_instance.rst +++ b/docs/source/examples/advanced_create_instance.rst @@ -56,7 +56,7 @@ Advanced Create Instance if price_per_hour * DURATION < balance.amount: # Deploy a new 8V instance instance = datacrunch.instances.create(instance_type=INSTANCE_TYPE_8V, - image='fastai', + image='ubuntu-24.04-cuda-12.8-open-docker', ssh_key_ids=ssh_keys_ids, hostname='example', description='large instance' @@ -67,7 +67,7 @@ Advanced Create Instance else: # Deploy a new 4V instance instance = datacrunch.instances.create(instance_type=INSTANCE_TYPE_4V, - image='fastai', + image='ubuntu-24.04-cuda-12.8-open-docker', ssh_key_ids=ssh_keys_ids, hostname='example', description='medium instance') diff --git a/docs/source/examples/instance_actions.rst b/docs/source/examples/instance_actions.rst index 77b115b..c59690b 100644 --- a/docs/source/examples/instance_actions.rst +++ b/docs/source/examples/instance_actions.rst @@ -22,7 +22,7 @@ Instance Actions # Create a new 1V100.6V instance instance = datacrunch.instances.create(instance_type='1V100.6V', - image='fastai', + image='ubuntu-24.04-cuda-12.8-open-docker', ssh_key_ids=ssh_keys_ids, hostname='example', description='example instance') diff --git a/docs/source/examples/instances_and_volumes.rst b/docs/source/examples/instances_and_volumes.rst index 6701c4c..d3852e1 100644 --- a/docs/source/examples/instances_and_volumes.rst +++ b/docs/source/examples/instances_and_volumes.rst @@ -27,7 +27,7 @@ Instances and Volumes # Create instance with extra attached volumes instance_with_extra_volumes = datacrunch.instances.create(instance_type='1V100.6V', - image='fastai', + image='ubuntu-24.04-cuda-12.8-open-docker', ssh_key_ids=ssh_keys, hostname='example', description='example instance', @@ -38,7 +38,7 @@ Instances and Volumes # Create instance with custom OS volume size and name instance_with_custom_os_volume = datacrunch.instances.create(instance_type='1V100.6V', - image='fastai', + image='ubuntu-24.04-cuda-12.8-open-docker', ssh_key_ids=ssh_keys, hostname='example', description='example instance', diff --git a/docs/source/examples/simple_create_instance.rst b/docs/source/examples/simple_create_instance.rst index 72f3051..c845321 100644 --- a/docs/source/examples/simple_create_instance.rst +++ b/docs/source/examples/simple_create_instance.rst @@ -19,7 +19,7 @@ Simple Create Instance # Create a new instance instance = datacrunch.instances.create(instance_type='1V100.6V', - image='fastai', + image='ubuntu-24.04-cuda-12.8-open-docker', ssh_key_ids=ssh_keys_ids, hostname='example', description='example instance') diff --git a/docs/source/index.rst b/docs/source/index.rst index a96c7cb..33ddc67 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -37,7 +37,7 @@ Deploy a new instance: # Create a new instance instance = datacrunch.instances.create(instance_type='1V100.6V', - image='fastai', + image='ubuntu-24.04-cuda-12.8-open-docker', ssh_key_ids=ssh_keys_ids, hostname='example', description='example instance') diff --git a/examples/advanced_create_instance.py b/examples/advanced_create_instance.py index 564b032..b46f9bb 100644 --- a/examples/advanced_create_instance.py +++ b/examples/advanced_create_instance.py @@ -51,18 +51,18 @@ if price_per_hour * DURATION < balance.amount: # Deploy a new 8V instance instance = datacrunch.instances.create(instance_type=INSTANCE_TYPE_8V, - image='fastai', + image='ubuntu-24.04-cuda-12.8-open-docker', ssh_key_ids=ssh_keys_ids, hostname='example', description='large instance', os_volume={ - "name": "Large OS volume", - "size": 95 - }) + "name": "Large OS volume", + "size": 95 + }) else: # Deploy a new 4V instance instance = datacrunch.instances.create(instance_type=INSTANCE_TYPE_4V, - image='fastai', + image='ubuntu-24.04-cuda-12.8-open-docker', ssh_key_ids=ssh_keys_ids, hostname='example', description='medium instance') diff --git a/examples/instance_actions.py b/examples/instance_actions.py index ceb7006..0a0909b 100644 --- a/examples/instance_actions.py +++ b/examples/instance_actions.py @@ -17,7 +17,7 @@ # Create a new 1V100.6V instance instance = datacrunch.instances.create(instance_type='1V100.6V', - image='fastai', + image='ubuntu-24.04-cuda-12.8-open-docker', ssh_key_ids=ssh_keys_ids, hostname='example', description='example instance') @@ -27,28 +27,31 @@ # Try to shutdown instance right away, # encounter an error (because it's still provisioning) try: - datacrunch.instances.action(instance.id, datacrunch.constants.instance_actions.SHUTDOWN) + datacrunch.instances.action( + instance.id, datacrunch.constants.instance_actions.SHUTDOWN) except APIException as exception: print(exception) # we were too eager... # Wait until instance is running (check every 30sec), only then shut it down -while(instance.status != datacrunch.constants.instance_status.RUNNING): +while (instance.status != datacrunch.constants.instance_status.RUNNING): time.sleep(30) instance = datacrunch.instances.get_by_id(instance.id) # Shutdown! try: - datacrunch.instances.action(instance.id, datacrunch.constants.instance_actions.SHUTDOWN) + datacrunch.instances.action( + instance.id, datacrunch.constants.instance_actions.SHUTDOWN) except APIException as exception: print(exception) # no exception this time # Wait until instance is offline (check every 30sec), only then hibernate -while(instance.status != datacrunch.constants.instance_status.OFFLINE): +while (instance.status != datacrunch.constants.instance_status.OFFLINE): time.sleep(30) instance = datacrunch.instances.get_by_id(instance.id) # Hibernate the instance try: - datacrunch.instances.action(instance.id, datacrunch.constants.instance_actions.HIBERNATE) + datacrunch.instances.action( + instance.id, datacrunch.constants.instance_actions.HIBERNATE) except APIException as exception: print(exception) diff --git a/examples/instances_and_volumes.py b/examples/instances_and_volumes.py index 369b5b3..624584d 100644 --- a/examples/instances_and_volumes.py +++ b/examples/instances_and_volumes.py @@ -21,25 +21,27 @@ # Create instance with extra attached volumes instance_with_extra_volumes = datacrunch.instances.create(instance_type='1V100.6V', - image='fastai', + image='ubuntu-24.04-cuda-12.8-open-docker', ssh_key_ids=ssh_keys, hostname='example', description='example instance', volumes=[ - {"type": HDD, "name": "volume-1", "size": 95}, - {"type": NVMe, "name": "volume-2", "size": 95} + {"type": HDD, "name": "volume-1", + "size": 95}, + {"type": NVMe, + "name": "volume-2", "size": 95} ]) # Create instance with custom OS volume size and name instance_with_custom_os_volume = datacrunch.instances.create(instance_type='1V100.6V', - image='fastai', - ssh_key_ids=ssh_keys, - hostname='example', - description='example instance', - os_volume={ - "name": "OS volume", - "size": 95 - }) + image='ubuntu-24.04-cuda-12.8-open-docker', + ssh_key_ids=ssh_keys, + hostname='example', + description='example instance', + os_volume={ + "name": "OS volume", + "size": 95 + }) # Create instance with existing OS volume as an image instance_with_existing_os_volume = datacrunch.instances.create(instance_type='1V100.6V', diff --git a/examples/simple_create_instance.py b/examples/simple_create_instance.py index 0742322..b98173f 100644 --- a/examples/simple_create_instance.py +++ b/examples/simple_create_instance.py @@ -14,10 +14,11 @@ # Create a new instance instance = datacrunch.instances.create(instance_type='1V100.6V', - image='fastai', + image='ubuntu-24.04-cuda-12.8-open-docker', ssh_key_ids=ssh_keys_ids, hostname='example', description='example instance') # Delete instance -datacrunch.instances.action(instance.id, datacrunch.constants.instance_actions.DELETE) +datacrunch.instances.action( + instance.id, datacrunch.constants.instance_actions.DELETE) diff --git a/tests/unit_tests/instances/test_instances.py b/tests/unit_tests/instances/test_instances.py index 6ad85da..7030ea6 100644 --- a/tests/unit_tests/instances/test_instances.py +++ b/tests/unit_tests/instances/test_instances.py @@ -13,7 +13,7 @@ OS_VOLUME_ID = '46fc0247-8f65-4d8a-ad73-852a8b3dc1d3' INSTANCE_TYPE = "1V100.6V" -INSTANCE_IMAGE = "fastai" +INSTANCE_IMAGE = "ubuntu-24.04-cuda-12.8-open-docker" INSTANCE_HOSTNAME = "I'll be your host for today" INSTANCE_DESCRIPTION = "hope you enjoy your GPU" INSTANCE_STATUS = 'running' From b6135f0a7d90e673a72c9290ab0da715273eac2c Mon Sep 17 00:00:00 2001 From: Tamir Date: Thu, 3 Apr 2025 09:54:51 +0300 Subject: [PATCH 2/5] added env var to deployment creation example --- .../containers/container_deployments_example.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/examples/containers/container_deployments_example.py b/examples/containers/container_deployments_example.py index 4ecb524..3bd76b3 100644 --- a/examples/containers/container_deployments_example.py +++ b/examples/containers/container_deployments_example.py @@ -12,6 +12,8 @@ from datacrunch.containers.containers import ( Container, ComputeResource, + EnvVar, + EnvVarType, ScalingOptions, ScalingPolicy, ScalingTriggers, @@ -104,6 +106,21 @@ def main() -> None: type=VolumeMountType.SCRATCH, mount_path="/data" ) + ], + env=[ + # Secret environment variables needed to be added beforehand + EnvVar( + name="HF_TOKEN", + # This is a reference to a secret already created + value_or_reference_to_secret="hf_token", + type=EnvVarType.SECRET + ), + # Plain environment variables can be added directly + EnvVar( + name="VERSION", + value_or_reference_to_secret="1.5.2", + type=EnvVarType.PLAIN + ) ] ) From bf62238dd358d8a6678181a2801246a54074a003 Mon Sep 17 00:00:00 2001 From: Tamir Date: Thu, 3 Apr 2025 09:56:23 +0300 Subject: [PATCH 3/5] changelog --- CHANGELOG.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 620174f..b46ef9a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,9 @@ Changelog ========= +* Added environment variables to container deployment example +* Updated examples image from 'fastai' to 'ubuntu-24.04-cuda-12.8-open-docker' + v1.8.4 (2025-03-25) ------------------- From 2c87a68977ed2cc0d516311159d456d5da3b8647 Mon Sep 17 00:00:00 2001 From: Tamir Date: Thu, 3 Apr 2025 10:35:58 +0300 Subject: [PATCH 4/5] consistent load of credentials and naming in examples --- datacrunch/datacrunch.py | 5 +++ examples/advanced_create_instance.py | 9 ++--- .../containers/compute_resources_example.py | 23 ++++++------ .../container_deployments_example.py | 36 ++++++++----------- .../environment_variables_example.py | 14 ++++---- .../registry_credentials_example.py | 25 +++++++------ examples/containers/secrets_example.py | 15 ++++---- .../containers/sglang_deployment_example.py | 32 +++++++---------- .../update_deployment_scaling_example.py | 16 +++------ examples/instance_actions.py | 8 ++--- examples/instances_and_volumes.py | 10 +++--- examples/simple_create_instance.py | 8 ++--- examples/ssh_keys.py | 8 ++--- examples/startup_scripts.py | 8 ++--- examples/storage_volumes.py | 8 ++--- 15 files changed, 107 insertions(+), 118 deletions(-) diff --git a/datacrunch/datacrunch.py b/datacrunch/datacrunch.py index 2f5f98b..ce5005f 100644 --- a/datacrunch/datacrunch.py +++ b/datacrunch/datacrunch.py @@ -28,6 +28,11 @@ def __init__(self, client_id: str, client_secret: str, base_url: str = "https:// :type base_url: str, optional """ + # Validate that client_id and client_secret are not empty + if not client_id or not client_secret: + raise ValueError( + "client_id and client_secret must be provided") + # Constants self.constants: Constants = Constants(base_url, VERSION) """Constants""" diff --git a/examples/advanced_create_instance.py b/examples/advanced_create_instance.py index b46f9bb..6b46b36 100644 --- a/examples/advanced_create_instance.py +++ b/examples/advanced_create_instance.py @@ -20,13 +20,14 @@ # Arbitrary duration for the example DURATION = 24 * 7 # one week -# Get client secret from environment variable -CLIENT_SECRET = os.environ['DATACRUNCH_CLIENT_SECRET'] -CLIENT_ID = 'Ibk5bdxV64lKAWOqYnvSi' # Replace with your client ID +# Get client secret and id from environment variables +DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') +DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') try: # Create datcrunch client - datacrunch = DataCrunchClient(CLIENT_ID, CLIENT_SECRET) + datacrunch = DataCrunchClient( + DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Create new SSH key public_key = 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI0qq2Qjt5GPi7DKdcnBHOkvk8xNsG9dA607tnWagOkHC test_key' diff --git a/examples/containers/compute_resources_example.py b/examples/containers/compute_resources_example.py index 501194d..a2501c0 100644 --- a/examples/containers/compute_resources_example.py +++ b/examples/containers/compute_resources_example.py @@ -1,7 +1,12 @@ +import os from datacrunch import DataCrunchClient from typing import List from datacrunch.containers.containers import ComputeResource +# Get client secret and id from environment variables +DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') +DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') + def list_all_compute_resources(client: DataCrunchClient) -> List[ComputeResource]: """List all available compute resources. @@ -44,27 +49,25 @@ def list_compute_resources_by_size(client: DataCrunchClient, size: int) -> List[ def main(): # Initialize the client with your credentials - client = DataCrunchClient( - client_id="your_client_id", - client_secret="your_client_secret" - ) + datacrunch = DataCrunchClient( + DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Example 1: List all compute resources - print("\nAll compute resources:") - all_resources = list_all_compute_resources(client) + print("All compute resources:") + all_resources = list_all_compute_resources(datacrunch) for resource in all_resources: print( f"Name: {resource.name}, Size: {resource.size}, Available: {resource.is_available}") # Example 2: List available compute resources - print("\nAvailable compute resources:") - available_resources = list_available_compute_resources(client) + print("Available compute resources:") + available_resources = list_available_compute_resources(datacrunch) for resource in available_resources: print(f"Name: {resource.name}, Size: {resource.size}") # Example 3: List compute resources of size 8 - print("\nCompute resources with size 8:") - size_8_resources = list_compute_resources_by_size(client, 8) + print("Compute resources with size 8:") + size_8_resources = list_compute_resources_by_size(datacrunch, 8) for resource in size_8_resources: print(f"Name: {resource.name}, Available: {resource.is_available}") diff --git a/examples/containers/container_deployments_example.py b/examples/containers/container_deployments_example.py index 3bd76b3..cdc7652 100644 --- a/examples/containers/container_deployments_example.py +++ b/examples/containers/container_deployments_example.py @@ -31,12 +31,12 @@ DEPLOYMENT_NAME = "my-deployment" IMAGE_NAME = "your-image-name:version" -# Environment variables +# Get client secret and id from environment variables DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') # DataCrunch client instance -datacrunch_client = None +datacrunch = None def wait_for_deployment_health(client: DataCrunchClient, deployment_name: str, max_attempts: int = 10, delay: int = 30) -> bool: @@ -81,15 +81,9 @@ def cleanup_resources(client: DataCrunchClient) -> None: def main() -> None: """Main function demonstrating deployment lifecycle management.""" try: - # Check required environment variables - if not DATACRUNCH_CLIENT_ID or not DATACRUNCH_CLIENT_SECRET: - print( - "Please set DATACRUNCH_CLIENT_ID and DATACRUNCH_CLIENT_SECRET environment variables") - return - # Initialize client - global datacrunch_client - datacrunch_client = DataCrunchClient( + global datacrunch + datacrunch = DataCrunchClient( DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Create container configuration @@ -160,19 +154,19 @@ def main() -> None: ) # Create the deployment - created_deployment = datacrunch_client.containers.create_deployment( + created_deployment = datacrunch.containers.create_deployment( deployment) print(f"Created deployment: {created_deployment.name}") # Wait for deployment to be healthy - if not wait_for_deployment_health(datacrunch_client, DEPLOYMENT_NAME): + if not wait_for_deployment_health(datacrunch, DEPLOYMENT_NAME): print("Deployment health check failed") - cleanup_resources(datacrunch_client) + cleanup_resources(datacrunch) return # Update scaling configuration try: - deployment = datacrunch_client.containers.get_deployment_by_name( + deployment = datacrunch.containers.get_deployment_by_name( DEPLOYMENT_NAME) # Create new scaling options with increased replica counts deployment.scaling = ScalingOptions( @@ -194,7 +188,7 @@ def main() -> None: ) ) ) - updated_deployment = datacrunch_client.containers.update_deployment( + updated_deployment = datacrunch.containers.update_deployment( DEPLOYMENT_NAME, deployment) print(f"Updated deployment scaling: {updated_deployment.name}") except APIException as e: @@ -203,33 +197,33 @@ def main() -> None: # Demonstrate deployment operations try: # Pause deployment - datacrunch_client.containers.pause_deployment(DEPLOYMENT_NAME) + datacrunch.containers.pause_deployment(DEPLOYMENT_NAME) print("Deployment paused") time.sleep(60) # Resume deployment - datacrunch_client.containers.resume_deployment(DEPLOYMENT_NAME) + datacrunch.containers.resume_deployment(DEPLOYMENT_NAME) print("Deployment resumed") # Restart deployment - datacrunch_client.containers.restart_deployment(DEPLOYMENT_NAME) + datacrunch.containers.restart_deployment(DEPLOYMENT_NAME) print("Deployment restarted") # Purge queue - datacrunch_client.containers.purge_deployment_queue( + datacrunch.containers.purge_deployment_queue( DEPLOYMENT_NAME) print("Queue purged") except APIException as e: print(f"Error in deployment operations: {e}") # Clean up - cleanup_resources(datacrunch_client) + cleanup_resources(datacrunch) except Exception as e: print(f"Unexpected error: {e}") # Attempt cleanup even if there was an error try: - cleanup_resources(datacrunch_client) + cleanup_resources(datacrunch) except Exception as cleanup_error: print(f"Error during cleanup after failure: {cleanup_error}") diff --git a/examples/containers/environment_variables_example.py b/examples/containers/environment_variables_example.py index 3a98220..8e33bf5 100644 --- a/examples/containers/environment_variables_example.py +++ b/examples/containers/environment_variables_example.py @@ -12,12 +12,12 @@ from datacrunch import DataCrunchClient from typing import Dict, List +# Get client secret and id from environment variables DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') # Initialize DataCrunch client -datacrunch_client = DataCrunchClient(client_id=DATACRUNCH_CLIENT_ID, - client_secret=DATACRUNCH_CLIENT_SECRET) +datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Example deployment and container names DEPLOYMENT_NAME = "my-deployment" @@ -36,13 +36,13 @@ def print_env_vars(env_vars: Dict[str, List[EnvVar]]) -> None: def main(): # First, let's get the current environment variables print("Getting current environment variables...") - env_vars = datacrunch_client.containers.get_deployment_environment_variables( + env_vars = datacrunch.containers.get_deployment_environment_variables( DEPLOYMENT_NAME) print_env_vars(env_vars) # Create a new secret secret_name = "my-secret-key" - datacrunch_client.containers.create_secret( + datacrunch.containers.create_secret( secret_name, "my-secret-value" ) @@ -62,7 +62,7 @@ def main(): ) ] - env_vars = datacrunch_client.containers.add_deployment_environment_variables( + env_vars = datacrunch.containers.add_deployment_environment_variables( deployment_name=DEPLOYMENT_NAME, container_name=CONTAINER_NAME, env_vars=new_env_vars @@ -79,7 +79,7 @@ def main(): ), ] - env_vars = datacrunch_client.containers.update_deployment_environment_variables( + env_vars = datacrunch.containers.update_deployment_environment_variables( deployment_name=DEPLOYMENT_NAME, container_name=CONTAINER_NAME, env_vars=updated_env_vars @@ -88,7 +88,7 @@ def main(): # Delete environment variables print("\nDeleting environment variables...") - env_vars = datacrunch_client.containers.delete_deployment_environment_variables( + env_vars = datacrunch.containers.delete_deployment_environment_variables( deployment_name=DEPLOYMENT_NAME, container_name=CONTAINER_NAME, env_var_names=["DEBUG"] diff --git a/examples/containers/registry_credentials_example.py b/examples/containers/registry_credentials_example.py index 6c20f94..e13400f 100644 --- a/examples/containers/registry_credentials_example.py +++ b/examples/containers/registry_credentials_example.py @@ -8,13 +8,12 @@ CustomRegistryCredentials ) -# Environment variables +# Get client secret and id from environment variables DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') # Initialize DataCrunch client -datacrunch_client = DataCrunchClient(client_id=DATACRUNCH_CLIENT_ID, - client_secret=DATACRUNCH_CLIENT_SECRET) +datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Example 1: DockerHub Credentials dockerhub_creds = DockerHubCredentials( @@ -22,7 +21,7 @@ username="your-dockerhub-username", access_token="your-dockerhub-access-token" ) -datacrunch_client.containers.add_registry_credentials(dockerhub_creds) +datacrunch.containers.add_registry_credentials(dockerhub_creds) print("Created DockerHub credentials") # Example 2: GitHub Container Registry Credentials @@ -31,7 +30,7 @@ username="your-github-username", access_token="your-github-token" ) -datacrunch_client.containers.add_registry_credentials(github_creds) +datacrunch.containers.add_registry_credentials(github_creds) print("Created GitHub credentials") # Example 3: Google Container Registry (GCR) Credentials @@ -53,7 +52,7 @@ name="my-gcr-creds", service_account_key=gcr_service_account_key ) -datacrunch_client.containers.add_registry_credentials(gcr_creds) +datacrunch.containers.add_registry_credentials(gcr_creds) print("Created GCR credentials") # Example 4: AWS ECR Credentials @@ -64,7 +63,7 @@ region="eu-north-1", ecr_repo="887841266746.dkr.ecr.eu-north-1.amazonaws.com" ) -datacrunch_client.containers.add_registry_credentials(aws_creds) +datacrunch.containers.add_registry_credentials(aws_creds) print("Created AWS ECR credentials") # Example 5: Custom Registry Credentials @@ -80,13 +79,13 @@ name="my-custom-registry-creds", docker_config_json=custom_docker_config ) -datacrunch_client.containers.add_registry_credentials(custom_creds) +datacrunch.containers.add_registry_credentials(custom_creds) print("Created Custom registry credentials") # Delete all registry credentials -datacrunch_client.containers.delete_registry_credentials('my-dockerhub-creds') -datacrunch_client.containers.delete_registry_credentials('my-github-creds') -datacrunch_client.containers.delete_registry_credentials('my-gcr-creds') -datacrunch_client.containers.delete_registry_credentials('my-aws-ecr-creds') -datacrunch_client.containers.delete_registry_credentials( +datacrunch.containers.delete_registry_credentials('my-dockerhub-creds') +datacrunch.containers.delete_registry_credentials('my-github-creds') +datacrunch.containers.delete_registry_credentials('my-gcr-creds') +datacrunch.containers.delete_registry_credentials('my-aws-ecr-creds') +datacrunch.containers.delete_registry_credentials( 'my-custom-registry-creds') diff --git a/examples/containers/secrets_example.py b/examples/containers/secrets_example.py index ed12d65..7c46826 100644 --- a/examples/containers/secrets_example.py +++ b/examples/containers/secrets_example.py @@ -1,16 +1,15 @@ import os from datacrunch import DataCrunchClient -# Environment variables +# Get client secret and id from environment variables DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') # Initialize DataCrunch client -datacrunch_client = DataCrunchClient(client_id=DATACRUNCH_CLIENT_ID, - client_secret=DATACRUNCH_CLIENT_SECRET) +datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # List all secrets -secrets = datacrunch_client.containers.get_secrets() +secrets = datacrunch.containers.get_secrets() print("Available secrets:") for secret in secrets: print(f"- {secret.name} (created at: {secret.created_at})") @@ -18,21 +17,21 @@ # Create a new secret secret_name = "my-api-key" secret_value = "super-secret-value" -datacrunch_client.containers.create_secret( +datacrunch.containers.create_secret( name=secret_name, value=secret_value ) print(f"\nCreated new secret: {secret_name}") # Delete a secret (with force=False by default) -datacrunch_client.containers.delete_secret(secret_name) +datacrunch.containers.delete_secret(secret_name) print(f"\nDeleted secret: {secret_name}") # Delete a secret with force=True (will delete even if secret is in use) secret_name = "another-secret" -datacrunch_client.containers.create_secret( +datacrunch.containers.create_secret( name=secret_name, value=secret_value ) -datacrunch_client.containers.delete_secret(secret_name, force=True) +datacrunch.containers.delete_secret(secret_name, force=True) print(f"\nForce deleted secret: {secret_name}") diff --git a/examples/containers/sglang_deployment_example.py b/examples/containers/sglang_deployment_example.py index e6d5c23..df668db 100644 --- a/examples/containers/sglang_deployment_example.py +++ b/examples/containers/sglang_deployment_example.py @@ -35,16 +35,16 @@ MODEL_PATH = "deepseek-ai/deepseek-llm-7b-chat" HF_SECRET_NAME = "huggingface-token" IMAGE_URL = "docker.io/lmsysorg/sglang:v0.4.1.post6-cu124" +CONTAINERS_API_URL = f'https://containers.datacrunch.io/{DEPLOYMENT_NAME}' -# Environment variables +# Get confidential values from environment variables DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') HF_TOKEN = os.environ.get('HF_TOKEN') INFERENCE_API_KEY = os.environ.get('INFERENCE_API_KEY') -CONTAINERS_API_URL = f'https://containers.datacrunch.io/{DEPLOYMENT_NAME}' # DataCrunch client instance (global for graceful shutdown) -datacrunch_client = None +datacrunch = None def wait_for_deployment_health(datacrunch_client: DataCrunchClient, deployment_name: str, max_attempts: int = 20, delay: int = 30) -> bool: @@ -93,7 +93,7 @@ def graceful_shutdown(signum, frame) -> None: """Handle graceful shutdown on signals.""" print(f"\nSignal {signum} received, cleaning up resources...") try: - cleanup_resources(datacrunch_client) + cleanup_resources(datacrunch) except Exception as e: print(f"Error during cleanup: {e}") sys.exit(0) @@ -166,19 +166,13 @@ def test_deployment(base_url: str, api_key: str) -> None: def main() -> None: """Main function demonstrating SGLang deployment.""" try: - # Check required environment variables - if not DATACRUNCH_CLIENT_ID or not DATACRUNCH_CLIENT_SECRET: - print( - "Please set DATACRUNCH_CLIENT_ID and DATACRUNCH_CLIENT_SECRET environment variables") - return - if not HF_TOKEN: print("Please set HF_TOKEN environment variable with your Hugging Face token") return # Initialize client - global datacrunch_client - datacrunch_client = DataCrunchClient( + global datacrunch + datacrunch = DataCrunchClient( DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Register signal handlers for cleanup @@ -189,12 +183,12 @@ def main() -> None: print(f"Creating secret for Hugging Face token: {HF_SECRET_NAME}") try: # Check if secret already exists - existing_secrets = datacrunch_client.containers.get_secrets() + existing_secrets = datacrunch.containers.get_secrets() secret_exists = any( secret.name == HF_SECRET_NAME for secret in existing_secrets) if not secret_exists: - datacrunch_client.containers.create_secret( + datacrunch.containers.create_secret( HF_SECRET_NAME, HF_TOKEN) print(f"Secret '{HF_SECRET_NAME}' created successfully") else: @@ -264,14 +258,14 @@ def main() -> None: ) # Create the deployment - created_deployment = datacrunch_client.containers.create(deployment) + created_deployment = datacrunch.containers.create(deployment) print(f"Created deployment: {created_deployment.name}") print("This will take several minutes while the model is downloaded and the server starts...") # Wait for deployment to be healthy - if not wait_for_deployment_health(datacrunch_client, DEPLOYMENT_NAME): + if not wait_for_deployment_health(datacrunch, DEPLOYMENT_NAME): print("Deployment health check failed") - cleanup_resources(datacrunch_client) + cleanup_resources(datacrunch) return # Get the deployment endpoint URL and inference API key @@ -301,7 +295,7 @@ def main() -> None: keep_running = input( "\nDo you want to keep the deployment running? (y/n): ") if keep_running.lower() != 'y': - cleanup_resources(datacrunch_client) + cleanup_resources(datacrunch) else: print( f"Deployment {DEPLOYMENT_NAME} is running. Don't forget to delete it when finished.") @@ -312,7 +306,7 @@ def main() -> None: print(f"Unexpected error: {e}") # Attempt cleanup even if there was an error try: - cleanup_resources(datacrunch_client) + cleanup_resources(datacrunch) except Exception as cleanup_error: print(f"Error during cleanup after failure: {cleanup_error}") diff --git a/examples/containers/update_deployment_scaling_example.py b/examples/containers/update_deployment_scaling_example.py index e698b40..d06f9d4 100644 --- a/examples/containers/update_deployment_scaling_example.py +++ b/examples/containers/update_deployment_scaling_example.py @@ -18,7 +18,7 @@ # Configuration - replace with your deployment name DEPLOYMENT_NAME = "my-deployment" -# Environment variables +# Get client secret and id from environment variables DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') @@ -87,26 +87,20 @@ def update_deployment_scaling(client: DataCrunchClient, deployment_name: str) -> def main() -> None: """Main function demonstrating scaling updates.""" try: - # Check required environment variables - if not DATACRUNCH_CLIENT_ID or not DATACRUNCH_CLIENT_SECRET: - print( - "Please set DATACRUNCH_CLIENT_ID and DATACRUNCH_CLIENT_SECRET environment variables") - return - # Initialize client - datacrunch_client = DataCrunchClient( + datacrunch = DataCrunchClient( DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Verify deployment exists - if not check_deployment_exists(datacrunch_client, DEPLOYMENT_NAME): + if not check_deployment_exists(datacrunch, DEPLOYMENT_NAME): print(f"Deployment {DEPLOYMENT_NAME} does not exist.") return # Update scaling options using the API - update_deployment_scaling(datacrunch_client, DEPLOYMENT_NAME) + update_deployment_scaling(datacrunch, DEPLOYMENT_NAME) # Get current scaling options - scaling_options = datacrunch_client.containers.get_deployment_scaling_options( + scaling_options = datacrunch.containers.get_deployment_scaling_options( DEPLOYMENT_NAME) print(f"\nCurrent scaling configuration:") print(f"Min replicas: {scaling_options.min_replica_count}") diff --git a/examples/instance_actions.py b/examples/instance_actions.py index 0a0909b..4021405 100644 --- a/examples/instance_actions.py +++ b/examples/instance_actions.py @@ -4,12 +4,12 @@ from datacrunch.exceptions import APIException -# Get client secret from environment variable -CLIENT_SECRET = os.environ['DATACRUNCH_CLIENT_SECRET'] -CLIENT_ID = 'Ibk5bdxV64lKAWOqYnvSi' # Replace with your client ID +# Get client secret and id from environment variables +DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') +DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') # Create datcrunch client -datacrunch = DataCrunchClient(CLIENT_ID, CLIENT_SECRET) +datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Get all SSH keys ssh_keys = datacrunch.ssh_keys.get() diff --git a/examples/instances_and_volumes.py b/examples/instances_and_volumes.py index 624584d..db83d29 100644 --- a/examples/instances_and_volumes.py +++ b/examples/instances_and_volumes.py @@ -1,12 +1,12 @@ import os from datacrunch import DataCrunchClient -# Get client secret from environment variable -CLIENT_SECRET = os.environ['DATACRUNCH_CLIENT_SECRET'] -CLIENT_ID = 'Ibk5bdxV64lKAWOqYnvSi' # Replace with your client ID +# Get client secret and id from environment variables +DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') +DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') # Create datcrunch client -datacrunch = DataCrunchClient(CLIENT_ID, CLIENT_SECRET) +datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Get some volume type constants NVMe = datacrunch.constants.volume_types.NVMe @@ -27,7 +27,7 @@ description='example instance', volumes=[ {"type": HDD, "name": "volume-1", - "size": 95}, + "size": 95}, {"type": NVMe, "name": "volume-2", "size": 95} ]) diff --git a/examples/simple_create_instance.py b/examples/simple_create_instance.py index b98173f..6f4cb40 100644 --- a/examples/simple_create_instance.py +++ b/examples/simple_create_instance.py @@ -1,12 +1,12 @@ import os from datacrunch import DataCrunchClient -# Get client secret from environment variable -CLIENT_SECRET = os.environ['DATACRUNCH_CLIENT_SECRET'] -CLIENT_ID = 'Ibk5bdxV64lKAWOqYnvSi' # Replace with your client ID +# Get client secret and id from environment variables +DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') +DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') # Create datcrunch client -datacrunch = DataCrunchClient(CLIENT_ID, CLIENT_SECRET) +datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Get all SSH keys id's ssh_keys = datacrunch.ssh_keys.get() diff --git a/examples/ssh_keys.py b/examples/ssh_keys.py index 80328cb..dc91639 100644 --- a/examples/ssh_keys.py +++ b/examples/ssh_keys.py @@ -1,12 +1,12 @@ import os from datacrunch import DataCrunchClient -# Get client secret from environment variable -CLIENT_SECRET = os.environ['DATACRUNCH_CLIENT_SECRET'] -CLIENT_ID = 'Ibk5bdxV64lKAWOqYnvSi' # Replace with your client ID +# Get client secret and id from environment variables +DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') +DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') # Create datcrunch client -datacrunch = DataCrunchClient(CLIENT_ID, CLIENT_SECRET) +datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Create new SSH key public_key = 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI0qq2Qjt5GPi7DKdcnBHOkvk8xNsG9dA607tnWagOkHC test_key' diff --git a/examples/startup_scripts.py b/examples/startup_scripts.py index fed365e..baa8587 100644 --- a/examples/startup_scripts.py +++ b/examples/startup_scripts.py @@ -1,12 +1,12 @@ import os from datacrunch import DataCrunchClient -# Get client secret from environment variable -CLIENT_SECRET = os.environ['DATACRUNCH_CLIENT_SECRET'] -CLIENT_ID = 'Ibk5bdxV64lKAWOqYnvSi' # Replace with your client ID +# Get client secret and id from environment variables +DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') +DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') # Create datcrunch client -datacrunch = DataCrunchClient(CLIENT_ID, CLIENT_SECRET) +datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Create new startup script bash_script = """echo this is a test script for serious cat business diff --git a/examples/storage_volumes.py b/examples/storage_volumes.py index 72e4a2e..1dc51d0 100644 --- a/examples/storage_volumes.py +++ b/examples/storage_volumes.py @@ -1,12 +1,12 @@ import os from datacrunch import DataCrunchClient -# Get client secret from environment variable -CLIENT_SECRET = os.environ['DATACRUNCH_CLIENT_SECRET'] -CLIENT_ID = 'Ibk5bdxV64lKAWOqYnvSi' # Replace with your client ID +# Get client secret and id from environment variables +DATACRUNCH_CLIENT_ID = os.environ.get('DATACRUNCH_CLIENT_ID') +DATACRUNCH_CLIENT_SECRET = os.environ.get('DATACRUNCH_CLIENT_SECRET') # Create datcrunch client -datacrunch = DataCrunchClient(CLIENT_ID, CLIENT_SECRET) +datacrunch = DataCrunchClient(DATACRUNCH_CLIENT_ID, DATACRUNCH_CLIENT_SECRET) # Get some volume type constants NVMe = datacrunch.constants.volume_types.NVMe From 36469cf716fe0585a1505959db8cd5aca812731d Mon Sep 17 00:00:00 2001 From: Tamir Date: Thu, 3 Apr 2025 10:41:56 +0300 Subject: [PATCH 5/5] changelog entry --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b46ef9a..ca00f6a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -3,6 +3,7 @@ Changelog * Added environment variables to container deployment example * Updated examples image from 'fastai' to 'ubuntu-24.04-cuda-12.8-open-docker' +* Consistent naming and load of credentials from env variables in examples v1.8.4 (2025-03-25) -------------------