dapi is a library that simplifies the process of submitting, running, and monitoring [TAPIS v2 / …
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/dapi/jobs.html b/docs/dapi/jobs.html
new file mode 100644
index 0000000..ca4ff96
--- /dev/null
+++ b/docs/dapi/jobs.html
@@ -0,0 +1,605 @@
+
+
+
+
+
+
+dapi.jobs API documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Module dapi.jobs
+
+
+
+
+Expand source code
+
+
import time
+from datetime import datetime, timedelta, timezone
+from tqdm import tqdm
+import logging
+
+# Configuring the logging system
+# logging.basicConfig(
+# level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
+# )
+
+def get_status(ag, job_id, time_lapse=15):
+ """
+ Retrieves and monitors the status of a job from Agave.
+
+ This function initially waits for the job to start, displaying its progress using
+ a tqdm progress bar. Once the job starts, it monitors the job's status up to
+ a maximum duration specified by the job's "maxHours". If the job completes or fails
+ before reaching this maximum duration, it returns the job's final status.
+
+ Args:
+ ag (object): The Agave job object used to interact with the job.
+ job_id (str): The unique identifier of the job to monitor.
+ time_lapse (int, optional): Time interval, in seconds, to wait between status
+ checks. Defaults to 15 seconds.
+
+ Returns:
+ str: The final status of the job. Typical values include "FINISHED", "FAILED",
+ and "STOPPED".
+
+ Raises:
+ No exceptions are explicitly raised, but potential exceptions raised by the Agave
+ job object or other called functions/methods will propagate.
+ """
+
+ previous_status = None
+ # Initially check if the job is already running
+ status = ag.jobs.getStatus(jobId=job_id)["status"]
+
+ job_details = ag.jobs.get(jobId=job_id)
+ max_hours = job_details["maxHours"]
+
+ # Using tqdm to provide visual feedback while waiting for job to start
+ with tqdm(desc="Waiting for job to start", dynamic_ncols=True) as pbar:
+ while status not in ["RUNNING", "FINISHED", "FAILED", "STOPPED"]:
+ time.sleep(time_lapse)
+ status = ag.jobs.getStatus(jobId=job_id)["status"]
+ pbar.update(1)
+ pbar.set_postfix_str(f"Status: {status}")
+
+ # Once the job is running, monitor it for up to maxHours
+ max_iterations = int(max_hours * 3600 // time_lapse)
+
+ # Using tqdm for progress bar
+ for _ in tqdm(range(max_iterations), desc="Monitoring job", ncols=100):
+ status = ag.jobs.getStatus(jobId=job_id)["status"]
+
+ # Print status if it has changed
+ if status != previous_status:
+ tqdm.write(f"\tStatus: {status}")
+ previous_status = status
+
+ # Break the loop if job reaches one of these statuses
+ if status in ["FINISHED", "FAILED", "STOPPED"]:
+ break
+
+ time.sleep(time_lapse)
+ else:
+ # This block will execute if the for loop completes without a 'break'
+ logging.warn("Warning: Maximum monitoring time reached!")
+
+ return status
+
+
+def runtime_summary(ag, job_id, verbose=False):
+ """Get the runtime of a job.
+
+ Args:
+ ag (object): The Agave object that has the job details.
+ job_id (str): The ID of the job for which the runtime needs to be determined.
+ verbose (bool): If True, prints all statuses. Otherwise, prints only specific statuses.
+
+ Returns:
+ None: This function doesn't return a value, but it prints the runtime details.
+
+ """
+
+ print("Runtime Summary")
+ print("---------------")
+
+ job_history = ag.jobs.getHistory(jobId=job_id)
+ total_time = job_history[-1]["created"] - job_history[0]["created"]
+
+ status_times = {}
+
+ for i in range(len(job_history) - 1):
+ current_status = job_history[i]["status"]
+ elapsed_time = job_history[i + 1]["created"] - job_history[i]["created"]
+
+ # Aggregate times for each status
+ if current_status in status_times:
+ status_times[current_status] += elapsed_time
+ else:
+ status_times[current_status] = elapsed_time
+
+ # Filter the statuses if verbose is False
+ if not verbose:
+ filtered_statuses = {
+ "PENDING",
+ "QUEUED",
+ "RUNNING",
+ "FINISHED",
+ "FAILED",
+ }
+ status_times = {
+ status: time
+ for status, time in status_times.items()
+ if status in filtered_statuses
+ }
+
+ # Determine the max width of status names for alignment
+ max_status_width = max(len(status) for status in status_times.keys())
+
+ # Print the aggregated times for each unique status in a table format
+ for status, time in status_times.items():
+ print(f"{status.upper():<{max_status_width + 2}} time: {time}")
+
+ print(f"{'TOTAL':<{max_status_width + 2}} time: {total_time}")
+ print("---------------")
+
+
+def generate_job_info(
+ ag,
+ appid: str,
+ jobname: str = "dsjob",
+ queue: str = "development",
+ nnodes: int = 1,
+ nprocessors: int = 1,
+ runtime: str = "00:10:00",
+ inputs=None,
+ parameters=None,
+) -> dict:
+ """Generate a job information dictionary based on provided arguments.
+
+ Args:
+ ag (object): The Agave object to interact with the platform.
+ appid (str): The application ID for the job.
+ jobname (str, optional): The name of the job. Defaults to 'dsjob'.
+ queue (str, optional): The batch queue name. Defaults to 'skx-dev'.
+ nnodes (int, optional): The number of nodes required. Defaults to 1.
+ nprocessors (int, optional): The number of processors per node. Defaults to 1.
+ runtime (str, optional): The maximum runtime in the format 'HH:MM:SS'. Defaults to '00:10:00'.
+ inputs (dict, optional): The inputs for the job. Defaults to None.
+ parameters (dict, optional): The parameters for the job. Defaults to None.
+
+ Returns:
+ dict: A dictionary containing the job information.
+
+ Raises:
+ ValueError: If the provided appid is not valid.
+ """
+
+ try:
+ app = ag.apps.get(appId=appid)
+ except Exception:
+ raise ValueError(f"Invalid app ID: {appid}")
+
+ job_info = {
+ "appId": appid,
+ "name": jobname,
+ "batchQueue": queue,
+ "nodeCount": nnodes,
+ "processorsPerNode": nprocessors,
+ "memoryPerNode": "1",
+ "maxRunTime": runtime,
+ "archive": True,
+ "inputs": inputs,
+ "parameters": parameters,
+ }
+
+ return job_info
+
+
+def get_archive_path(ag, job_id):
+ """
+ Get the archive path for a given job ID and modifies the user directory
+ to '/home/jupyter/MyData'.
+
+ Args:
+ ag (object): The Agave object to interact with the platform.
+ job_id (str): The job ID to retrieve the archive path for.
+
+ Returns:
+ str: The modified archive path.
+
+ Raises:
+ ValueError: If the archivePath format is unexpected.
+ """
+
+ # Fetch the job info.
+ job_info = ag.jobs.get(jobId=job_id)
+
+ # Try to split the archive path to extract the user.
+ try:
+ user, _ = job_info.archivePath.split("/", 1)
+ except ValueError:
+ raise ValueError(f"Unexpected archivePath format for jobId={job_id}")
+
+ # Construct the new path.
+ new_path = job_info.archivePath.replace(user, "/home/jupyter/MyData")
+
+ return new_path
Generate a job information dictionary based on provided arguments.
+
Args
+
+
ag : object
+
The Agave object to interact with the platform.
+
appid : str
+
The application ID for the job.
+
jobname : str, optional
+
The name of the job. Defaults to 'dsjob'.
+
queue : str, optional
+
The batch queue name. Defaults to 'skx-dev'.
+
nnodes : int, optional
+
The number of nodes required. Defaults to 1.
+
nprocessors : int, optional
+
The number of processors per node. Defaults to 1.
+
runtime : str, optional
+
The maximum runtime in the format 'HH:MM:SS'. Defaults to '00:10:00'.
+
inputs : dict, optional
+
The inputs for the job. Defaults to None.
+
parameters : dict, optional
+
The parameters for the job. Defaults to None.
+
+
Returns
+
+
dict
+
A dictionary containing the job information.
+
+
Raises
+
+
ValueError
+
If the provided appid is not valid.
+
+
+
+Expand source code
+
+
def generate_job_info(
+ ag,
+ appid: str,
+ jobname: str = "dsjob",
+ queue: str = "development",
+ nnodes: int = 1,
+ nprocessors: int = 1,
+ runtime: str = "00:10:00",
+ inputs=None,
+ parameters=None,
+) -> dict:
+ """Generate a job information dictionary based on provided arguments.
+
+ Args:
+ ag (object): The Agave object to interact with the platform.
+ appid (str): The application ID for the job.
+ jobname (str, optional): The name of the job. Defaults to 'dsjob'.
+ queue (str, optional): The batch queue name. Defaults to 'skx-dev'.
+ nnodes (int, optional): The number of nodes required. Defaults to 1.
+ nprocessors (int, optional): The number of processors per node. Defaults to 1.
+ runtime (str, optional): The maximum runtime in the format 'HH:MM:SS'. Defaults to '00:10:00'.
+ inputs (dict, optional): The inputs for the job. Defaults to None.
+ parameters (dict, optional): The parameters for the job. Defaults to None.
+
+ Returns:
+ dict: A dictionary containing the job information.
+
+ Raises:
+ ValueError: If the provided appid is not valid.
+ """
+
+ try:
+ app = ag.apps.get(appId=appid)
+ except Exception:
+ raise ValueError(f"Invalid app ID: {appid}")
+
+ job_info = {
+ "appId": appid,
+ "name": jobname,
+ "batchQueue": queue,
+ "nodeCount": nnodes,
+ "processorsPerNode": nprocessors,
+ "memoryPerNode": "1",
+ "maxRunTime": runtime,
+ "archive": True,
+ "inputs": inputs,
+ "parameters": parameters,
+ }
+
+ return job_info
+
+
+
+def get_archive_path(ag, job_id)
+
+
+
Get the archive path for a given job ID and modifies the user directory
+to '/home/jupyter/MyData'.
+
Args
+
+
ag : object
+
The Agave object to interact with the platform.
+
job_id : str
+
The job ID to retrieve the archive path for.
+
+
Returns
+
+
str
+
The modified archive path.
+
+
Raises
+
+
ValueError
+
If the archivePath format is unexpected.
+
+
+
+Expand source code
+
+
def get_archive_path(ag, job_id):
+ """
+ Get the archive path for a given job ID and modifies the user directory
+ to '/home/jupyter/MyData'.
+
+ Args:
+ ag (object): The Agave object to interact with the platform.
+ job_id (str): The job ID to retrieve the archive path for.
+
+ Returns:
+ str: The modified archive path.
+
+ Raises:
+ ValueError: If the archivePath format is unexpected.
+ """
+
+ # Fetch the job info.
+ job_info = ag.jobs.get(jobId=job_id)
+
+ # Try to split the archive path to extract the user.
+ try:
+ user, _ = job_info.archivePath.split("/", 1)
+ except ValueError:
+ raise ValueError(f"Unexpected archivePath format for jobId={job_id}")
+
+ # Construct the new path.
+ new_path = job_info.archivePath.replace(user, "/home/jupyter/MyData")
+
+ return new_path
+
+
+
+def get_status(ag, job_id, time_lapse=15)
+
+
+
Retrieves and monitors the status of a job from Agave.
+
This function initially waits for the job to start, displaying its progress using
+a tqdm progress bar. Once the job starts, it monitors the job's status up to
+a maximum duration specified by the job's "maxHours". If the job completes or fails
+before reaching this maximum duration, it returns the job's final status.
+
Args
+
+
ag : object
+
The Agave job object used to interact with the job.
+
job_id : str
+
The unique identifier of the job to monitor.
+
time_lapse : int, optional
+
Time interval, in seconds, to wait between status
+checks. Defaults to 15 seconds.
+
+
Returns
+
+
str
+
The final status of the job. Typical values include "FINISHED", "FAILED",
+and "STOPPED".
+
+
Raises
+
No exceptions are explicitly raised, but potential exceptions raised by the Agave
+job object or other called functions/methods will propagate.
+
+
+Expand source code
+
+
def get_status(ag, job_id, time_lapse=15):
+ """
+ Retrieves and monitors the status of a job from Agave.
+
+ This function initially waits for the job to start, displaying its progress using
+ a tqdm progress bar. Once the job starts, it monitors the job's status up to
+ a maximum duration specified by the job's "maxHours". If the job completes or fails
+ before reaching this maximum duration, it returns the job's final status.
+
+ Args:
+ ag (object): The Agave job object used to interact with the job.
+ job_id (str): The unique identifier of the job to monitor.
+ time_lapse (int, optional): Time interval, in seconds, to wait between status
+ checks. Defaults to 15 seconds.
+
+ Returns:
+ str: The final status of the job. Typical values include "FINISHED", "FAILED",
+ and "STOPPED".
+
+ Raises:
+ No exceptions are explicitly raised, but potential exceptions raised by the Agave
+ job object or other called functions/methods will propagate.
+ """
+
+ previous_status = None
+ # Initially check if the job is already running
+ status = ag.jobs.getStatus(jobId=job_id)["status"]
+
+ job_details = ag.jobs.get(jobId=job_id)
+ max_hours = job_details["maxHours"]
+
+ # Using tqdm to provide visual feedback while waiting for job to start
+ with tqdm(desc="Waiting for job to start", dynamic_ncols=True) as pbar:
+ while status not in ["RUNNING", "FINISHED", "FAILED", "STOPPED"]:
+ time.sleep(time_lapse)
+ status = ag.jobs.getStatus(jobId=job_id)["status"]
+ pbar.update(1)
+ pbar.set_postfix_str(f"Status: {status}")
+
+ # Once the job is running, monitor it for up to maxHours
+ max_iterations = int(max_hours * 3600 // time_lapse)
+
+ # Using tqdm for progress bar
+ for _ in tqdm(range(max_iterations), desc="Monitoring job", ncols=100):
+ status = ag.jobs.getStatus(jobId=job_id)["status"]
+
+ # Print status if it has changed
+ if status != previous_status:
+ tqdm.write(f"\tStatus: {status}")
+ previous_status = status
+
+ # Break the loop if job reaches one of these statuses
+ if status in ["FINISHED", "FAILED", "STOPPED"]:
+ break
+
+ time.sleep(time_lapse)
+ else:
+ # This block will execute if the for loop completes without a 'break'
+ logging.warn("Warning: Maximum monitoring time reached!")
+
+ return status
+
+
+
+def runtime_summary(ag, job_id, verbose=False)
+
+
+
Get the runtime of a job.
+
Args
+
+
ag : object
+
The Agave object that has the job details.
+
job_id : str
+
The ID of the job for which the runtime needs to be determined.
+
verbose : bool
+
If True, prints all statuses. Otherwise, prints only specific statuses.
+
+
Returns
+
+
None
+
This function doesn't return a value, but it prints the runtime details.
+
+
+
+Expand source code
+
+
def runtime_summary(ag, job_id, verbose=False):
+ """Get the runtime of a job.
+
+ Args:
+ ag (object): The Agave object that has the job details.
+ job_id (str): The ID of the job for which the runtime needs to be determined.
+ verbose (bool): If True, prints all statuses. Otherwise, prints only specific statuses.
+
+ Returns:
+ None: This function doesn't return a value, but it prints the runtime details.
+
+ """
+
+ print("Runtime Summary")
+ print("---------------")
+
+ job_history = ag.jobs.getHistory(jobId=job_id)
+ total_time = job_history[-1]["created"] - job_history[0]["created"]
+
+ status_times = {}
+
+ for i in range(len(job_history) - 1):
+ current_status = job_history[i]["status"]
+ elapsed_time = job_history[i + 1]["created"] - job_history[i]["created"]
+
+ # Aggregate times for each status
+ if current_status in status_times:
+ status_times[current_status] += elapsed_time
+ else:
+ status_times[current_status] = elapsed_time
+
+ # Filter the statuses if verbose is False
+ if not verbose:
+ filtered_statuses = {
+ "PENDING",
+ "QUEUED",
+ "RUNNING",
+ "FINISHED",
+ "FAILED",
+ }
+ status_times = {
+ status: time
+ for status, time in status_times.items()
+ if status in filtered_statuses
+ }
+
+ # Determine the max width of status names for alignment
+ max_status_width = max(len(status) for status in status_times.keys())
+
+ # Print the aggregated times for each unique status in a table format
+ for status, time in status_times.items():
+ print(f"{status.upper():<{max_status_width + 2}} time: {time}")
+
+ print(f"{'TOTAL':<{max_status_width + 2}} time: {total_time}")
+ print("---------------")
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/dapi/jobs/dir.html b/docs/dapi/jobs/dir.html
new file mode 100644
index 0000000..514cb97
--- /dev/null
+++ b/docs/dapi/jobs/dir.html
@@ -0,0 +1,193 @@
+
+
+
+
+
+
+dapi.jobs.dir API documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Module dapi.jobs.dir
+
+
+
+
+Expand source code
+
+
import os
+
+
+def get_ds_path_uri(ag, path):
+ """
+ Given a path on DesignSafe, determine the correct input URI.
+
+ Args:
+ ag (object): Agave object to fetch profiles or metadata.
+ path (str): The directory path.
+
+ Returns:
+ str: The corresponding input URI.
+
+ Raises:
+ ValueError: If no matching directory pattern is found.
+ """
+
+ # If any of the following directory patterns are found in the path,
+ # process them accordingly.
+ directory_patterns = [
+ ("jupyter/MyData", "designsafe.storage.default", True),
+ ("jupyter/mydata", "designsafe.storage.default", True),
+ ("jupyter/CommunityData", "designsafe.storage.community", False),
+ ("/MyData", "designsafe.storage.default", True),
+ ("/mydata", "designsafe.storage.default", True),
+ ]
+
+ for pattern, storage, use_username in directory_patterns:
+ if pattern in path:
+ path = path.split(pattern).pop()
+ input_dir = ag.profiles.get()["username"] + path if use_username else path
+ input_uri = f"agave://{storage}/{input_dir}"
+ return input_uri.replace(" ", "%20")
+
+ project_patterns = [
+ ("jupyter/MyProjects", "project-"),
+ ("jupyter/projects", "project-"),
+ ]
+
+ for pattern, prefix in project_patterns:
+ if pattern in path:
+ path = path.split(pattern + "/").pop()
+ project_id = path.split("/")[0]
+ query = {"value.projectId": str(project_id)}
+ path = path.split(project_id).pop()
+ project_uuid = ag.meta.listMetadata(q=str(query))[0]["uuid"]
+ input_uri = f"agave://{prefix}{project_uuid}{path}"
+ return input_uri.replace(" ", "%20")
+
+ raise ValueError(f"No matching directory pattern found for: {path}")
+
+
+
+
+
+
+
+
Functions
+
+
+def get_ds_path_uri(ag, path)
+
+
+
Given a path on DesignSafe, determine the correct input URI.
+
Args
+
+
ag : object
+
Agave object to fetch profiles or metadata.
+
path : str
+
The directory path.
+
+
Returns
+
+
str
+
The corresponding input URI.
+
+
Raises
+
+
ValueError
+
If no matching directory pattern is found.
+
+
+
+Expand source code
+
+
def get_ds_path_uri(ag, path):
+ """
+ Given a path on DesignSafe, determine the correct input URI.
+
+ Args:
+ ag (object): Agave object to fetch profiles or metadata.
+ path (str): The directory path.
+
+ Returns:
+ str: The corresponding input URI.
+
+ Raises:
+ ValueError: If no matching directory pattern is found.
+ """
+
+ # If any of the following directory patterns are found in the path,
+ # process them accordingly.
+ directory_patterns = [
+ ("jupyter/MyData", "designsafe.storage.default", True),
+ ("jupyter/mydata", "designsafe.storage.default", True),
+ ("jupyter/CommunityData", "designsafe.storage.community", False),
+ ("/MyData", "designsafe.storage.default", True),
+ ("/mydata", "designsafe.storage.default", True),
+ ]
+
+ for pattern, storage, use_username in directory_patterns:
+ if pattern in path:
+ path = path.split(pattern).pop()
+ input_dir = ag.profiles.get()["username"] + path if use_username else path
+ input_uri = f"agave://{storage}/{input_dir}"
+ return input_uri.replace(" ", "%20")
+
+ project_patterns = [
+ ("jupyter/MyProjects", "project-"),
+ ("jupyter/projects", "project-"),
+ ]
+
+ for pattern, prefix in project_patterns:
+ if pattern in path:
+ path = path.split(pattern + "/").pop()
+ project_id = path.split("/")[0]
+ query = {"value.projectId": str(project_id)}
+ path = path.split(project_id).pop()
+ project_uuid = ag.meta.listMetadata(q=str(query))[0]["uuid"]
+ input_uri = f"agave://{prefix}{project_uuid}{path}"
+ return input_uri.replace(" ", "%20")
+
+ raise ValueError(f"No matching directory pattern found for: {path}")
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/dsjobs/index.html b/docs/dapi/jobs/index.html
similarity index 81%
rename from docs/dsjobs/index.html
rename to docs/dapi/jobs/index.html
index 3685c53..e2e246c 100644
--- a/docs/dsjobs/index.html
+++ b/docs/dapi/jobs/index.html
@@ -4,8 +4,8 @@
-dsjobs API documentation
-
+dapi.jobs API documentation
+
@@ -19,40 +19,40 @@
-
Simplified TAPIS v2 Calls: No need to fiddle with complex API requests. dsjobs abstracts away the complexities.
+
Simplified TAPIS v2 Calls: No need to fiddle with complex API requests. dapi abstracts away the complexities.
Seamless Integration with DesignSafe Jupyter Notebooks: Launch DesignSafe applications directly from the Jupyter environment.
Installation
-
pip3 install dsjobs
+
pip3 install dapi
Expand source code
"""
-`dsjobs` is a library that simplifies the process of submitting, running, and monitoring [TAPIS v2 / AgavePy](https://agavepy.readthedocs.io/en/latest/index.html) jobs on [DesignSafe](https://designsafe-ci.org) via [Jupyter Notebooks](https://jupyter.designsafe-ci.org).
+`dapi` is a library that simplifies the process of submitting, running, and monitoring [TAPIS v2 / AgavePy](https://agavepy.readthedocs.io/en/latest/index.html) jobs on [DesignSafe](https://designsafe-ci.org) via [Jupyter Notebooks](https://jupyter.designsafe-ci.org).
## Features
-* Simplified TAPIS v2 Calls: No need to fiddle with complex API requests. `dsjobs` abstracts away the complexities.
+* Simplified TAPIS v2 Calls: No need to fiddle with complex API requests. `dapi` abstracts away the complexities.
* Seamless Integration with DesignSafe Jupyter Notebooks: Launch DesignSafe applications directly from the Jupyter environment.
## Installation
```shell
-pip3 install dsjobs
+pip3 install dapi
```
"""
@@ -63,11 +63,11 @@
import time
+from datetime import datetime, timedelta, timezone
+from tqdm import tqdm
+import logging
+
+# Configuring the logging system
+# logging.basicConfig(
+# level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
+# )
+
+def get_status(ag, job_id, time_lapse=15):
+ """
+ Retrieves and monitors the status of a job from Agave.
+
+ This function initially waits for the job to start, displaying its progress using
+ a tqdm progress bar. Once the job starts, it monitors the job's status up to
+ a maximum duration specified by the job's "maxHours". If the job completes or fails
+ before reaching this maximum duration, it returns the job's final status.
+
+ Args:
+ ag (object): The Agave job object used to interact with the job.
+ job_id (str): The unique identifier of the job to monitor.
+ time_lapse (int, optional): Time interval, in seconds, to wait between status
+ checks. Defaults to 15 seconds.
+
+ Returns:
+ str: The final status of the job. Typical values include "FINISHED", "FAILED",
+ and "STOPPED".
+
+ Raises:
+ No exceptions are explicitly raised, but potential exceptions raised by the Agave
+ job object or other called functions/methods will propagate.
+ """
+
+ previous_status = None
+ # Initially check if the job is already running
+ status = ag.jobs.getStatus(jobId=job_id)["status"]
+
+ job_details = ag.jobs.get(jobId=job_id)
+ max_hours = job_details["maxHours"]
+
+ # Using tqdm to provide visual feedback while waiting for job to start
+ with tqdm(desc="Waiting for job to start", dynamic_ncols=True) as pbar:
+ while status not in ["RUNNING", "FINISHED", "FAILED", "STOPPED"]:
+ time.sleep(time_lapse)
+ status = ag.jobs.getStatus(jobId=job_id)["status"]
+ pbar.update(1)
+ pbar.set_postfix_str(f"Status: {status}")
+
+ # Once the job is running, monitor it for up to maxHours
+ max_iterations = int(max_hours * 3600 // time_lapse)
+
+ # Using tqdm for progress bar
+ for _ in tqdm(range(max_iterations), desc="Monitoring job", ncols=100):
+ status = ag.jobs.getStatus(jobId=job_id)["status"]
+
+ # Print status if it has changed
+ if status != previous_status:
+ tqdm.write(f"\tStatus: {status}")
+ previous_status = status
+
+ # Break the loop if job reaches one of these statuses
+ if status in ["FINISHED", "FAILED", "STOPPED"]:
+ break
+
+ time.sleep(time_lapse)
+ else:
+ # This block will execute if the for loop completes without a 'break'
+ logging.warn("Warning: Maximum monitoring time reached!")
+
+ return status
+
+
+def runtime_summary(ag, job_id, verbose=False):
+ """Get the runtime of a job.
+
+ Args:
+ ag (object): The Agave object that has the job details.
+ job_id (str): The ID of the job for which the runtime needs to be determined.
+ verbose (bool): If True, prints all statuses. Otherwise, prints only specific statuses.
+
+ Returns:
+ None: This function doesn't return a value, but it prints the runtime details.
+
+ """
+
+ print("Runtime Summary")
+ print("---------------")
+
+ job_history = ag.jobs.getHistory(jobId=job_id)
+ total_time = job_history[-1]["created"] - job_history[0]["created"]
+
+ status_times = {}
+
+ for i in range(len(job_history) - 1):
+ current_status = job_history[i]["status"]
+ elapsed_time = job_history[i + 1]["created"] - job_history[i]["created"]
+
+ # Aggregate times for each status
+ if current_status in status_times:
+ status_times[current_status] += elapsed_time
+ else:
+ status_times[current_status] = elapsed_time
+
+ # Filter the statuses if verbose is False
+ if not verbose:
+ filtered_statuses = {
+ "PENDING",
+ "QUEUED",
+ "RUNNING",
+ "FINISHED",
+ "FAILED",
+ }
+ status_times = {
+ status: time
+ for status, time in status_times.items()
+ if status in filtered_statuses
+ }
+
+ # Determine the max width of status names for alignment
+ max_status_width = max(len(status) for status in status_times.keys())
+
+ # Print the aggregated times for each unique status in a table format
+ for status, time in status_times.items():
+ print(f"{status.upper():<{max_status_width + 2}} time: {time}")
+
+ print(f"{'TOTAL':<{max_status_width + 2}} time: {total_time}")
+ print("---------------")
+
+
+def generate_job_info(
+ ag,
+ appid: str,
+ jobname: str = "dsjob",
+ queue: str = "development",
+ nnodes: int = 1,
+ nprocessors: int = 1,
+ runtime: str = "00:10:00",
+ inputs=None,
+ parameters=None,
+) -> dict:
+ """Generate a job information dictionary based on provided arguments.
+
+ Args:
+ ag (object): The Agave object to interact with the platform.
+ appid (str): The application ID for the job.
+ jobname (str, optional): The name of the job. Defaults to 'dsjob'.
+ queue (str, optional): The batch queue name. Defaults to 'skx-dev'.
+ nnodes (int, optional): The number of nodes required. Defaults to 1.
+ nprocessors (int, optional): The number of processors per node. Defaults to 1.
+ runtime (str, optional): The maximum runtime in the format 'HH:MM:SS'. Defaults to '00:10:00'.
+ inputs (dict, optional): The inputs for the job. Defaults to None.
+ parameters (dict, optional): The parameters for the job. Defaults to None.
+
+ Returns:
+ dict: A dictionary containing the job information.
+
+ Raises:
+ ValueError: If the provided appid is not valid.
+ """
+
+ try:
+ app = ag.apps.get(appId=appid)
+ except Exception:
+ raise ValueError(f"Invalid app ID: {appid}")
+
+ job_info = {
+ "appId": appid,
+ "name": jobname,
+ "batchQueue": queue,
+ "nodeCount": nnodes,
+ "processorsPerNode": nprocessors,
+ "memoryPerNode": "1",
+ "maxRunTime": runtime,
+ "archive": True,
+ "inputs": inputs,
+ "parameters": parameters,
+ }
+
+ return job_info
+
+
+def get_archive_path(ag, job_id):
+ """
+ Get the archive path for a given job ID and modifies the user directory
+ to '/home/jupyter/MyData'.
+
+ Args:
+ ag (object): The Agave object to interact with the platform.
+ job_id (str): The job ID to retrieve the archive path for.
+
+ Returns:
+ str: The modified archive path.
+
+ Raises:
+ ValueError: If the archivePath format is unexpected.
+ """
+
+ # Fetch the job info.
+ job_info = ag.jobs.get(jobId=job_id)
+
+ # Try to split the archive path to extract the user.
+ try:
+ user, _ = job_info.archivePath.split("/", 1)
+ except ValueError:
+ raise ValueError(f"Unexpected archivePath format for jobId={job_id}")
+
+ # Construct the new path.
+ new_path = job_info.archivePath.replace(user, "/home/jupyter/MyData")
+
+ return new_path
Generate a job information dictionary based on provided arguments.
+
Args
+
+
ag : object
+
The Agave object to interact with the platform.
+
appid : str
+
The application ID for the job.
+
jobname : str, optional
+
The name of the job. Defaults to 'dsjob'.
+
queue : str, optional
+
The batch queue name. Defaults to 'skx-dev'.
+
nnodes : int, optional
+
The number of nodes required. Defaults to 1.
+
nprocessors : int, optional
+
The number of processors per node. Defaults to 1.
+
runtime : str, optional
+
The maximum runtime in the format 'HH:MM:SS'. Defaults to '00:10:00'.
+
inputs : dict, optional
+
The inputs for the job. Defaults to None.
+
parameters : dict, optional
+
The parameters for the job. Defaults to None.
+
+
Returns
+
+
dict
+
A dictionary containing the job information.
+
+
Raises
+
+
ValueError
+
If the provided appid is not valid.
+
+
+
+Expand source code
+
+
def generate_job_info(
+ ag,
+ appid: str,
+ jobname: str = "dsjob",
+ queue: str = "development",
+ nnodes: int = 1,
+ nprocessors: int = 1,
+ runtime: str = "00:10:00",
+ inputs=None,
+ parameters=None,
+) -> dict:
+ """Generate a job information dictionary based on provided arguments.
+
+ Args:
+ ag (object): The Agave object to interact with the platform.
+ appid (str): The application ID for the job.
+ jobname (str, optional): The name of the job. Defaults to 'dsjob'.
+ queue (str, optional): The batch queue name. Defaults to 'skx-dev'.
+ nnodes (int, optional): The number of nodes required. Defaults to 1.
+ nprocessors (int, optional): The number of processors per node. Defaults to 1.
+ runtime (str, optional): The maximum runtime in the format 'HH:MM:SS'. Defaults to '00:10:00'.
+ inputs (dict, optional): The inputs for the job. Defaults to None.
+ parameters (dict, optional): The parameters for the job. Defaults to None.
+
+ Returns:
+ dict: A dictionary containing the job information.
+
+ Raises:
+ ValueError: If the provided appid is not valid.
+ """
+
+ try:
+ app = ag.apps.get(appId=appid)
+ except Exception:
+ raise ValueError(f"Invalid app ID: {appid}")
+
+ job_info = {
+ "appId": appid,
+ "name": jobname,
+ "batchQueue": queue,
+ "nodeCount": nnodes,
+ "processorsPerNode": nprocessors,
+ "memoryPerNode": "1",
+ "maxRunTime": runtime,
+ "archive": True,
+ "inputs": inputs,
+ "parameters": parameters,
+ }
+
+ return job_info
+
+
+
+def get_archive_path(ag, job_id)
+
+
+
Get the archive path for a given job ID and modifies the user directory
+to '/home/jupyter/MyData'.
+
Args
+
+
ag : object
+
The Agave object to interact with the platform.
+
job_id : str
+
The job ID to retrieve the archive path for.
+
+
Returns
+
+
str
+
The modified archive path.
+
+
Raises
+
+
ValueError
+
If the archivePath format is unexpected.
+
+
+
+Expand source code
+
+
def get_archive_path(ag, job_id):
+ """
+ Get the archive path for a given job ID and modifies the user directory
+ to '/home/jupyter/MyData'.
+
+ Args:
+ ag (object): The Agave object to interact with the platform.
+ job_id (str): The job ID to retrieve the archive path for.
+
+ Returns:
+ str: The modified archive path.
+
+ Raises:
+ ValueError: If the archivePath format is unexpected.
+ """
+
+ # Fetch the job info.
+ job_info = ag.jobs.get(jobId=job_id)
+
+ # Try to split the archive path to extract the user.
+ try:
+ user, _ = job_info.archivePath.split("/", 1)
+ except ValueError:
+ raise ValueError(f"Unexpected archivePath format for jobId={job_id}")
+
+ # Construct the new path.
+ new_path = job_info.archivePath.replace(user, "/home/jupyter/MyData")
+
+ return new_path
+
+
+
+def get_status(ag, job_id, time_lapse=15)
+
+
+
Retrieves and monitors the status of a job from Agave.
+
This function initially waits for the job to start, displaying its progress using
+a tqdm progress bar. Once the job starts, it monitors the job's status up to
+a maximum duration specified by the job's "maxHours". If the job completes or fails
+before reaching this maximum duration, it returns the job's final status.
+
Args
+
+
ag : object
+
The Agave job object used to interact with the job.
+
job_id : str
+
The unique identifier of the job to monitor.
+
time_lapse : int, optional
+
Time interval, in seconds, to wait between status
+checks. Defaults to 15 seconds.
+
+
Returns
+
+
str
+
The final status of the job. Typical values include "FINISHED", "FAILED",
+and "STOPPED".
+
+
Raises
+
No exceptions are explicitly raised, but potential exceptions raised by the Agave
+job object or other called functions/methods will propagate.
+
+
+Expand source code
+
+
def get_status(ag, job_id, time_lapse=15):
+ """
+ Retrieves and monitors the status of a job from Agave.
+
+ This function initially waits for the job to start, displaying its progress using
+ a tqdm progress bar. Once the job starts, it monitors the job's status up to
+ a maximum duration specified by the job's "maxHours". If the job completes or fails
+ before reaching this maximum duration, it returns the job's final status.
+
+ Args:
+ ag (object): The Agave job object used to interact with the job.
+ job_id (str): The unique identifier of the job to monitor.
+ time_lapse (int, optional): Time interval, in seconds, to wait between status
+ checks. Defaults to 15 seconds.
+
+ Returns:
+ str: The final status of the job. Typical values include "FINISHED", "FAILED",
+ and "STOPPED".
+
+ Raises:
+ No exceptions are explicitly raised, but potential exceptions raised by the Agave
+ job object or other called functions/methods will propagate.
+ """
+
+ previous_status = None
+ # Initially check if the job is already running
+ status = ag.jobs.getStatus(jobId=job_id)["status"]
+
+ job_details = ag.jobs.get(jobId=job_id)
+ max_hours = job_details["maxHours"]
+
+ # Using tqdm to provide visual feedback while waiting for job to start
+ with tqdm(desc="Waiting for job to start", dynamic_ncols=True) as pbar:
+ while status not in ["RUNNING", "FINISHED", "FAILED", "STOPPED"]:
+ time.sleep(time_lapse)
+ status = ag.jobs.getStatus(jobId=job_id)["status"]
+ pbar.update(1)
+ pbar.set_postfix_str(f"Status: {status}")
+
+ # Once the job is running, monitor it for up to maxHours
+ max_iterations = int(max_hours * 3600 // time_lapse)
+
+ # Using tqdm for progress bar
+ for _ in tqdm(range(max_iterations), desc="Monitoring job", ncols=100):
+ status = ag.jobs.getStatus(jobId=job_id)["status"]
+
+ # Print status if it has changed
+ if status != previous_status:
+ tqdm.write(f"\tStatus: {status}")
+ previous_status = status
+
+ # Break the loop if job reaches one of these statuses
+ if status in ["FINISHED", "FAILED", "STOPPED"]:
+ break
+
+ time.sleep(time_lapse)
+ else:
+ # This block will execute if the for loop completes without a 'break'
+ logging.warn("Warning: Maximum monitoring time reached!")
+
+ return status
+
+
+
+def runtime_summary(ag, job_id, verbose=False)
+
+
+
Get the runtime of a job.
+
Args
+
+
ag : object
+
The Agave object that has the job details.
+
job_id : str
+
The ID of the job for which the runtime needs to be determined.
+
verbose : bool
+
If True, prints all statuses. Otherwise, prints only specific statuses.
+
+
Returns
+
+
None
+
This function doesn't return a value, but it prints the runtime details.
+
+
+
+Expand source code
+
+
def runtime_summary(ag, job_id, verbose=False):
+ """Get the runtime of a job.
+
+ Args:
+ ag (object): The Agave object that has the job details.
+ job_id (str): The ID of the job for which the runtime needs to be determined.
+ verbose (bool): If True, prints all statuses. Otherwise, prints only specific statuses.
+
+ Returns:
+ None: This function doesn't return a value, but it prints the runtime details.
+
+ """
+
+ print("Runtime Summary")
+ print("---------------")
+
+ job_history = ag.jobs.getHistory(jobId=job_id)
+ total_time = job_history[-1]["created"] - job_history[0]["created"]
+
+ status_times = {}
+
+ for i in range(len(job_history) - 1):
+ current_status = job_history[i]["status"]
+ elapsed_time = job_history[i + 1]["created"] - job_history[i]["created"]
+
+ # Aggregate times for each status
+ if current_status in status_times:
+ status_times[current_status] += elapsed_time
+ else:
+ status_times[current_status] = elapsed_time
+
+ # Filter the statuses if verbose is False
+ if not verbose:
+ filtered_statuses = {
+ "PENDING",
+ "QUEUED",
+ "RUNNING",
+ "FINISHED",
+ "FAILED",
+ }
+ status_times = {
+ status: time
+ for status, time in status_times.items()
+ if status in filtered_statuses
+ }
+
+ # Determine the max width of status names for alignment
+ max_status_width = max(len(status) for status in status_times.keys())
+
+ # Print the aggregated times for each unique status in a table format
+ for status, time in status_times.items():
+ print(f"{status.upper():<{max_status_width + 2}} time: {time}")
+
+ print(f"{'TOTAL':<{max_status_width + 2}} time: {total_time}")
+ print("---------------")
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/dsjobs.html b/docs/dsjobs.html
deleted file mode 100644
index 6a2550d..0000000
--- a/docs/dsjobs.html
+++ /dev/null
@@ -1,278 +0,0 @@
-
-
-
-
-
-
- dsjobs API documentation
-
-
-
-
-
-
-
-
-
-
-
-
Simplified TAPIS v2 Calls: No need to fiddle with complex API requests. dsjobs abstracts away the complexities.
-
Seamless Integration with DesignSafe Jupyter Notebooks: Launch DesignSafe applications directly from the Jupyter environment.
-
-
-
Installation
-
-
-
pip3installdsjobs
-
-
-
-
-
-
-
-
-
1"""
- 2`dsjobs` is a library that simplifies the process of submitting, running, and monitoring [TAPIS v2 / AgavePy](https://agavepy.readthedocs.io/en/latest/index.html) jobs on [DesignSafe](https://designsafe-ci.org) via [Jupyter Notebooks](https://jupyter.designsafe-ci.org).
- 3
- 4
- 5## Features
- 6
- 7* Simplified TAPIS v2 Calls: No need to fiddle with complex API requests. `dsjobs` abstracts away the complexities.
- 8
- 9* Seamless Integration with DesignSafe Jupyter Notebooks: Launch DesignSafe applications directly from the Jupyter environment.
-10
-11## Installation
-12
-13```shell
-14pip3 install dsjobs
-15```
-16
-17"""
-18from.dirimportget_ds_path_uri
-19from.jobsimportget_status,runtime_summary,generate_job_info,get_archive_path
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/docs/dsjobs/dir.html b/docs/dsjobs/dir.html
deleted file mode 100644
index 526945d..0000000
--- a/docs/dsjobs/dir.html
+++ /dev/null
@@ -1,372 +0,0 @@
-
-
-
-
-
-
- dsjobs.dir API documentation
-
-
-
-
-
-
-
-
-
-
-
-
1importos
- 2
- 3
- 4defget_ds_path_uri(ag,path):
- 5"""
- 6 Given a path on DesignSafe, determine the correct input URI.
- 7
- 8 Args:
- 9 ag (object): Agave object to fetch profiles or metadata.
-10 path (str): The directory path.
-11
-12 Returns:
-13 str: The corresponding input URI.
-14
-15 Raises:
-16 ValueError: If no matching directory pattern is found.
-17 """
-18
-19# If any of the following directory patterns are found in the path,
-20# process them accordingly.
-21directory_patterns=[
-22("jupyter/MyData","designsafe.storage.default",True),
-23("jupyter/mydata","designsafe.storage.default",True),
-24("jupyter/CommunityData","designsafe.storage.community",False),
-25("/MyData","designsafe.storage.default",True),
-26("/mydata","designsafe.storage.default",True),
-27]
-28
-29forpattern,storage,use_usernameindirectory_patterns:
-30ifpatterninpath:
-31path=path.split(pattern).pop()
-32input_dir=ag.profiles.get()["username"]+pathifuse_usernameelsepath
-33input_uri=f"agave://{storage}/{input_dir}"
-34returninput_uri.replace(" ","%20")
-35
-36project_patterns=[
-37("jupyter/MyProjects","project-"),
-38("jupyter/projects","project-"),
-39]
-40
-41forpattern,prefixinproject_patterns:
-42ifpatterninpath:
-43path=path.split(pattern+"/").pop()
-44project_id=path.split("/")[0]
-45query={"value.projectId":str(project_id)}
-46path=path.split(project_id).pop()
-47project_uuid=ag.meta.listMetadata(q=str(query))[0]["uuid"]
-48input_uri=f"agave://{prefix}{project_uuid}{path}"
-49returninput_uri.replace(" ","%20")
-50
-51raiseValueError(f"No matching directory pattern found for: {path}")
-
-
-
-
-
-
-
-
- def
- get_ds_path_uri(ag, path):
-
-
-
-
-
-
5defget_ds_path_uri(ag,path):
- 6"""
- 7 Given a path on DesignSafe, determine the correct input URI.
- 8
- 9 Args:
-10 ag (object): Agave object to fetch profiles or metadata.
-11 path (str): The directory path.
-12
-13 Returns:
-14 str: The corresponding input URI.
-15
-16 Raises:
-17 ValueError: If no matching directory pattern is found.
-18 """
-19
-20# If any of the following directory patterns are found in the path,
-21# process them accordingly.
-22directory_patterns=[
-23("jupyter/MyData","designsafe.storage.default",True),
-24("jupyter/mydata","designsafe.storage.default",True),
-25("jupyter/CommunityData","designsafe.storage.community",False),
-26("/MyData","designsafe.storage.default",True),
-27("/mydata","designsafe.storage.default",True),
-28]
-29
-30forpattern,storage,use_usernameindirectory_patterns:
-31ifpatterninpath:
-32path=path.split(pattern).pop()
-33input_dir=ag.profiles.get()["username"]+pathifuse_usernameelsepath
-34input_uri=f"agave://{storage}/{input_dir}"
-35returninput_uri.replace(" ","%20")
-36
-37project_patterns=[
-38("jupyter/MyProjects","project-"),
-39("jupyter/projects","project-"),
-40]
-41
-42forpattern,prefixinproject_patterns:
-43ifpatterninpath:
-44path=path.split(pattern+"/").pop()
-45project_id=path.split("/")[0]
-46query={"value.projectId":str(project_id)}
-47path=path.split(project_id).pop()
-48project_uuid=ag.meta.listMetadata(q=str(query))[0]["uuid"]
-49input_uri=f"agave://{prefix}{project_uuid}{path}"
-50returninput_uri.replace(" ","%20")
-51
-52raiseValueError(f"No matching directory pattern found for: {path}")
-
-
-
-
Given a path on DesignSafe, determine the correct input URI.
-
-
Args:
- ag (object): Agave object to fetch profiles or metadata.
- path (str): The directory path.
-
-
Returns:
- str: The corresponding input URI.
-
-
Raises:
- ValueError: If no matching directory pattern is found.
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/docs/dsjobs/jobs.html b/docs/dsjobs/jobs.html
deleted file mode 100644
index bd977ae..0000000
--- a/docs/dsjobs/jobs.html
+++ /dev/null
@@ -1,791 +0,0 @@
-
-
-
-
-
-
- dsjobs.jobs API documentation
-
-
-
-
-
-
-
-
-
-
-
-
1importtime
- 2fromdatetimeimportdatetime,timedelta,timezone
- 3fromtqdmimporttqdm
- 4importlogging
- 5
- 6# Configuring the logging system
- 7# logging.basicConfig(
- 8# level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
- 9# )
- 10
- 11
- 12defget_status(ag,job_id,time_lapse=15):
- 13"""
- 14 Retrieves and monitors the status of a job from Agave.
- 15
- 16 This function initially waits for the job to start, displaying its progress using
- 17 a tqdm progress bar. Once the job starts, it monitors the job's status up to
- 18 a maximum duration specified by the job's "maxHours". If the job completes or fails
- 19 before reaching this maximum duration, it returns the job's final status.
- 20
- 21 Args:
- 22 ag (object): The Agave job object used to interact with the job.
- 23 job_id (str): The unique identifier of the job to monitor.
- 24 time_lapse (int, optional): Time interval, in seconds, to wait between status
- 25 checks. Defaults to 15 seconds.
- 26
- 27 Returns:
- 28 str: The final status of the job. Typical values include "FINISHED", "FAILED",
- 29 and "STOPPED".
- 30
- 31 Raises:
- 32 No exceptions are explicitly raised, but potential exceptions raised by the Agave
- 33 job object or other called functions/methods will propagate.
- 34 """
- 35
- 36previous_status=None
- 37# Initially check if the job is already running
- 38status=ag.jobs.getStatus(jobId=job_id)["status"]
- 39
- 40job_details=ag.jobs.get(jobId=job_id)
- 41max_hours=job_details["maxHours"]
- 42
- 43# Using tqdm to provide visual feedback while waiting for job to start
- 44withtqdm(desc="Waiting for job to start",dynamic_ncols=True)aspbar:
- 45whilestatusnotin["RUNNING","FINISHED","FAILED","STOPPED"]:
- 46time.sleep(time_lapse)
- 47status=ag.jobs.getStatus(jobId=job_id)["status"]
- 48pbar.update(1)
- 49pbar.set_postfix_str(f"Status: {status}")
- 50
- 51# Once the job is running, monitor it for up to maxHours
- 52max_iterations=int(max_hours*3600//time_lapse)
- 53
- 54# Using tqdm for progress bar
- 55for_intqdm(range(max_iterations),desc="Monitoring job",ncols=100):
- 56status=ag.jobs.getStatus(jobId=job_id)["status"]
- 57
- 58# Print status if it has changed
- 59ifstatus!=previous_status:
- 60tqdm.write(f"\tStatus: {status}")
- 61previous_status=status
- 62
- 63# Break the loop if job reaches one of these statuses
- 64ifstatusin["FINISHED","FAILED","STOPPED"]:
- 65break
- 66
- 67time.sleep(time_lapse)
- 68else:
- 69# This block will execute if the for loop completes without a 'break'
- 70logging.warn("Warning: Maximum monitoring time reached!")
- 71
- 72returnstatus
- 73
- 74
- 75defruntime_summary(ag,job_id,verbose=False):
- 76"""Get the runtime of a job.
- 77
- 78 Args:
- 79 ag (object): The Agave object that has the job details.
- 80 job_id (str): The ID of the job for which the runtime needs to be determined.
- 81 verbose (bool): If True, prints all statuses. Otherwise, prints only specific statuses.
- 82
- 83 Returns:
- 84 None: This function doesn't return a value, but it prints the runtime details.
- 85
- 86 """
- 87
- 88print("Runtime Summary")
- 89print("---------------")
- 90
- 91job_history=ag.jobs.getHistory(jobId=job_id)
- 92total_time=job_history[-1]["created"]-job_history[0]["created"]
- 93
- 94status_times={}
- 95
- 96foriinrange(len(job_history)-1):
- 97current_status=job_history[i]["status"]
- 98elapsed_time=job_history[i+1]["created"]-job_history[i]["created"]
- 99
-100# Aggregate times for each status
-101ifcurrent_statusinstatus_times:
-102status_times[current_status]+=elapsed_time
-103else:
-104status_times[current_status]=elapsed_time
-105
-106# Filter the statuses if verbose is False
-107ifnotverbose:
-108filtered_statuses={
-109"PENDING",
-110"QUEUED",
-111"RUNNING",
-112"FINISHED",
-113"FAILED",
-114}
-115status_times={
-116status:time
-117forstatus,timeinstatus_times.items()
-118ifstatusinfiltered_statuses
-119}
-120
-121# Determine the max width of status names for alignment
-122max_status_width=max(len(status)forstatusinstatus_times.keys())
-123
-124# Print the aggregated times for each unique status in a table format
-125forstatus,timeinstatus_times.items():
-126print(f"{status.upper():<{max_status_width+2}} time: {time}")
-127
-128print(f"{'TOTAL':<{max_status_width+2}} time: {total_time}")
-129print("---------------")
-130
-131
-132defgenerate_job_info(
-133ag,
-134appid:str,
-135jobname:str="dsjob",
-136queue:str="skx-dev",
-137nnodes:int=1,
-138nprocessors:int=1,
-139runtime:str="00:10:00",
-140inputs=None,
-141parameters=None,
-142)->dict:
-143"""Generate a job information dictionary based on provided arguments.
-144
-145 Args:
-146 ag (object): The Agave object to interact with the platform.
-147 appid (str): The application ID for the job.
-148 jobname (str, optional): The name of the job. Defaults to 'dsjob'.
-149 queue (str, optional): The batch queue name. Defaults to 'skx-dev'.
-150 nnodes (int, optional): The number of nodes required. Defaults to 1.
-151 nprocessors (int, optional): The number of processors per node. Defaults to 1.
-152 runtime (str, optional): The maximum runtime in the format 'HH:MM:SS'. Defaults to '00:10:00'.
-153 inputs (dict, optional): The inputs for the job. Defaults to None.
-154 parameters (dict, optional): The parameters for the job. Defaults to None.
-155
-156 Returns:
-157 dict: A dictionary containing the job information.
-158
-159 Raises:
-160 ValueError: If the provided appid is not valid.
-161 """
-162
-163try:
-164app=ag.apps.get(appId=appid)
-165exceptException:
-166raiseValueError(f"Invalid app ID: {appid}")
-167
-168job_info={
-169"appId":appid,
-170"name":jobname,
-171"batchQueue":queue,
-172"nodeCount":nnodes,
-173"processorsPerNode":nprocessors,
-174"memoryPerNode":"1",
-175"maxRunTime":runtime,
-176"archive":True,
-177"inputs":inputs,
-178"parameters":parameters,
-179}
-180
-181returnjob_info
-182
-183
-184defget_archive_path(ag,job_id):
-185"""
-186 Get the archive path for a given job ID and modifies the user directory
-187 to '/home/jupyter/MyData'.
-188
-189 Args:
-190 ag (object): The Agave object to interact with the platform.
-191 job_id (str): The job ID to retrieve the archive path for.
-192
-193 Returns:
-194 str: The modified archive path.
-195
-196 Raises:
-197 ValueError: If the archivePath format is unexpected.
-198 """
-199
-200# Fetch the job info.
-201job_info=ag.jobs.get(jobId=job_id)
-202
-203# Try to split the archive path to extract the user.
-204try:
-205user,_=job_info.archivePath.split("/",1)
-206exceptValueError:
-207raiseValueError(f"Unexpected archivePath format for jobId={job_id}")
-208
-209# Construct the new path.
-210new_path=job_info.archivePath.replace(user,"/home/jupyter/MyData")
-211
-212returnnew_path
-
13defget_status(ag,job_id,time_lapse=15):
-14"""
-15 Retrieves and monitors the status of a job from Agave.
-16
-17 This function initially waits for the job to start, displaying its progress using
-18 a tqdm progress bar. Once the job starts, it monitors the job's status up to
-19 a maximum duration specified by the job's "maxHours". If the job completes or fails
-20 before reaching this maximum duration, it returns the job's final status.
-21
-22 Args:
-23 ag (object): The Agave job object used to interact with the job.
-24 job_id (str): The unique identifier of the job to monitor.
-25 time_lapse (int, optional): Time interval, in seconds, to wait between status
-26 checks. Defaults to 15 seconds.
-27
-28 Returns:
-29 str: The final status of the job. Typical values include "FINISHED", "FAILED",
-30 and "STOPPED".
-31
-32 Raises:
-33 No exceptions are explicitly raised, but potential exceptions raised by the Agave
-34 job object or other called functions/methods will propagate.
-35 """
-36
-37previous_status=None
-38# Initially check if the job is already running
-39status=ag.jobs.getStatus(jobId=job_id)["status"]
-40
-41job_details=ag.jobs.get(jobId=job_id)
-42max_hours=job_details["maxHours"]
-43
-44# Using tqdm to provide visual feedback while waiting for job to start
-45withtqdm(desc="Waiting for job to start",dynamic_ncols=True)aspbar:
-46whilestatusnotin["RUNNING","FINISHED","FAILED","STOPPED"]:
-47time.sleep(time_lapse)
-48status=ag.jobs.getStatus(jobId=job_id)["status"]
-49pbar.update(1)
-50pbar.set_postfix_str(f"Status: {status}")
-51
-52# Once the job is running, monitor it for up to maxHours
-53max_iterations=int(max_hours*3600//time_lapse)
-54
-55# Using tqdm for progress bar
-56for_intqdm(range(max_iterations),desc="Monitoring job",ncols=100):
-57status=ag.jobs.getStatus(jobId=job_id)["status"]
-58
-59# Print status if it has changed
-60ifstatus!=previous_status:
-61tqdm.write(f"\tStatus: {status}")
-62previous_status=status
-63
-64# Break the loop if job reaches one of these statuses
-65ifstatusin["FINISHED","FAILED","STOPPED"]:
-66break
-67
-68time.sleep(time_lapse)
-69else:
-70# This block will execute if the for loop completes without a 'break'
-71logging.warn("Warning: Maximum monitoring time reached!")
-72
-73returnstatus
-
-
-
-
Retrieves and monitors the status of a job from Agave.
-
-
This function initially waits for the job to start, displaying its progress using
-a tqdm progress bar. Once the job starts, it monitors the job's status up to
-a maximum duration specified by the job's "maxHours". If the job completes or fails
-before reaching this maximum duration, it returns the job's final status.
-
-
Args:
- ag (object): The Agave job object used to interact with the job.
- job_id (str): The unique identifier of the job to monitor.
- time_lapse (int, optional): Time interval, in seconds, to wait between status
- checks. Defaults to 15 seconds.
-
-
Returns:
- str: The final status of the job. Typical values include "FINISHED", "FAILED",
- and "STOPPED".
-
-
Raises:
- No exceptions are explicitly raised, but potential exceptions raised by the Agave
- job object or other called functions/methods will propagate.
76defruntime_summary(ag,job_id,verbose=False):
- 77"""Get the runtime of a job.
- 78
- 79 Args:
- 80 ag (object): The Agave object that has the job details.
- 81 job_id (str): The ID of the job for which the runtime needs to be determined.
- 82 verbose (bool): If True, prints all statuses. Otherwise, prints only specific statuses.
- 83
- 84 Returns:
- 85 None: This function doesn't return a value, but it prints the runtime details.
- 86
- 87 """
- 88
- 89print("Runtime Summary")
- 90print("---------------")
- 91
- 92job_history=ag.jobs.getHistory(jobId=job_id)
- 93total_time=job_history[-1]["created"]-job_history[0]["created"]
- 94
- 95status_times={}
- 96
- 97foriinrange(len(job_history)-1):
- 98current_status=job_history[i]["status"]
- 99elapsed_time=job_history[i+1]["created"]-job_history[i]["created"]
-100
-101# Aggregate times for each status
-102ifcurrent_statusinstatus_times:
-103status_times[current_status]+=elapsed_time
-104else:
-105status_times[current_status]=elapsed_time
-106
-107# Filter the statuses if verbose is False
-108ifnotverbose:
-109filtered_statuses={
-110"PENDING",
-111"QUEUED",
-112"RUNNING",
-113"FINISHED",
-114"FAILED",
-115}
-116status_times={
-117status:time
-118forstatus,timeinstatus_times.items()
-119ifstatusinfiltered_statuses
-120}
-121
-122# Determine the max width of status names for alignment
-123max_status_width=max(len(status)forstatusinstatus_times.keys())
-124
-125# Print the aggregated times for each unique status in a table format
-126forstatus,timeinstatus_times.items():
-127print(f"{status.upper():<{max_status_width+2}} time: {time}")
-128
-129print(f"{'TOTAL':<{max_status_width+2}} time: {total_time}")
-130print("---------------")
-
-
-
-
Get the runtime of a job.
-
-
Args:
- ag (object): The Agave object that has the job details.
- job_id (str): The ID of the job for which the runtime needs to be determined.
- verbose (bool): If True, prints all statuses. Otherwise, prints only specific statuses.
-
-
Returns:
- None: This function doesn't return a value, but it prints the runtime details.
133defgenerate_job_info(
-134ag,
-135appid:str,
-136jobname:str="dsjob",
-137queue:str="skx-dev",
-138nnodes:int=1,
-139nprocessors:int=1,
-140runtime:str="00:10:00",
-141inputs=None,
-142parameters=None,
-143)->dict:
-144"""Generate a job information dictionary based on provided arguments.
-145
-146 Args:
-147 ag (object): The Agave object to interact with the platform.
-148 appid (str): The application ID for the job.
-149 jobname (str, optional): The name of the job. Defaults to 'dsjob'.
-150 queue (str, optional): The batch queue name. Defaults to 'skx-dev'.
-151 nnodes (int, optional): The number of nodes required. Defaults to 1.
-152 nprocessors (int, optional): The number of processors per node. Defaults to 1.
-153 runtime (str, optional): The maximum runtime in the format 'HH:MM:SS'. Defaults to '00:10:00'.
-154 inputs (dict, optional): The inputs for the job. Defaults to None.
-155 parameters (dict, optional): The parameters for the job. Defaults to None.
-156
-157 Returns:
-158 dict: A dictionary containing the job information.
-159
-160 Raises:
-161 ValueError: If the provided appid is not valid.
-162 """
-163
-164try:
-165app=ag.apps.get(appId=appid)
-166exceptException:
-167raiseValueError(f"Invalid app ID: {appid}")
-168
-169job_info={
-170"appId":appid,
-171"name":jobname,
-172"batchQueue":queue,
-173"nodeCount":nnodes,
-174"processorsPerNode":nprocessors,
-175"memoryPerNode":"1",
-176"maxRunTime":runtime,
-177"archive":True,
-178"inputs":inputs,
-179"parameters":parameters,
-180}
-181
-182returnjob_info
-
-
-
-
Generate a job information dictionary based on provided arguments.
-
-
Args:
- ag (object): The Agave object to interact with the platform.
- appid (str): The application ID for the job.
- jobname (str, optional): The name of the job. Defaults to 'dsjob'.
- queue (str, optional): The batch queue name. Defaults to 'skx-dev'.
- nnodes (int, optional): The number of nodes required. Defaults to 1.
- nprocessors (int, optional): The number of processors per node. Defaults to 1.
- runtime (str, optional): The maximum runtime in the format 'HH:MM:SS'. Defaults to '00:10:00'.
- inputs (dict, optional): The inputs for the job. Defaults to None.
- parameters (dict, optional): The parameters for the job. Defaults to None.
-
-
Returns:
- dict: A dictionary containing the job information.
-
-
Raises:
- ValueError: If the provided appid is not valid.
-
-
-
-
-
-
-
-
- def
- get_archive_path(ag, job_id):
-
-
-
-
-
-
185defget_archive_path(ag,job_id):
-186"""
-187 Get the archive path for a given job ID and modifies the user directory
-188 to '/home/jupyter/MyData'.
-189
-190 Args:
-191 ag (object): The Agave object to interact with the platform.
-192 job_id (str): The job ID to retrieve the archive path for.
-193
-194 Returns:
-195 str: The modified archive path.
-196
-197 Raises:
-198 ValueError: If the archivePath format is unexpected.
-199 """
-200
-201# Fetch the job info.
-202job_info=ag.jobs.get(jobId=job_id)
-203
-204# Try to split the archive path to extract the user.
-205try:
-206user,_=job_info.archivePath.split("/",1)
-207exceptValueError:
-208raiseValueError(f"Unexpected archivePath format for jobId={job_id}")
-209
-210# Construct the new path.
-211new_path=job_info.archivePath.replace(user,"/home/jupyter/MyData")
-212
-213returnnew_path
-
-
-
-
Get the archive path for a given job ID and modifies the user directory
-to '/home/jupyter/MyData'.
-
-
Args:
- ag (object): The Agave object to interact with the platform.
- job_id (str): The job ID to retrieve the archive path for.
-
-
Returns:
- str: The modified archive path.
-
-
Raises:
- ValueError: If the archivePath format is unexpected.
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/docs/index.html b/docs/index.html
deleted file mode 100644
index 5bd6d3d..0000000
--- a/docs/index.html
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-
-
-
-
-
diff --git a/docs/search.js b/docs/search.js
deleted file mode 100644
index 429e0b3..0000000
--- a/docs/search.js
+++ /dev/null
@@ -1,46 +0,0 @@
-window.pdocSearch = (function(){
-/** elasticlunr - http://weixsong.github.io * Copyright (C) 2017 Oliver Nightingale * Copyright (C) 2017 Wei Song * MIT Licensed */!function(){function e(e){if(null===e||"object"!=typeof e)return e;var t=e.constructor();for(var n in e)e.hasOwnProperty(n)&&(t[n]=e[n]);return t}var t=function(e){var n=new t.Index;return n.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),e&&e.call(n,n),n};t.version="0.9.5",lunr=t,t.utils={},t.utils.warn=function(e){return function(t){e.console&&console.warn&&console.warn(t)}}(this),t.utils.toString=function(e){return void 0===e||null===e?"":e.toString()},t.EventEmitter=function(){this.events={}},t.EventEmitter.prototype.addListener=function(){var e=Array.prototype.slice.call(arguments),t=e.pop(),n=e;if("function"!=typeof t)throw new TypeError("last argument must be a function");n.forEach(function(e){this.hasHandler(e)||(this.events[e]=[]),this.events[e].push(t)},this)},t.EventEmitter.prototype.removeListener=function(e,t){if(this.hasHandler(e)){var n=this.events[e].indexOf(t);-1!==n&&(this.events[e].splice(n,1),0==this.events[e].length&&delete this.events[e])}},t.EventEmitter.prototype.emit=function(e){if(this.hasHandler(e)){var t=Array.prototype.slice.call(arguments,1);this.events[e].forEach(function(e){e.apply(void 0,t)},this)}},t.EventEmitter.prototype.hasHandler=function(e){return e in this.events},t.tokenizer=function(e){if(!arguments.length||null===e||void 0===e)return[];if(Array.isArray(e)){var n=e.filter(function(e){return null===e||void 0===e?!1:!0});n=n.map(function(e){return t.utils.toString(e).toLowerCase()});var i=[];return n.forEach(function(e){var n=e.split(t.tokenizer.seperator);i=i.concat(n)},this),i}return e.toString().trim().toLowerCase().split(t.tokenizer.seperator)},t.tokenizer.defaultSeperator=/[\s\-]+/,t.tokenizer.seperator=t.tokenizer.defaultSeperator,t.tokenizer.setSeperator=function(e){null!==e&&void 0!==e&&"object"==typeof e&&(t.tokenizer.seperator=e)},t.tokenizer.resetSeperator=function(){t.tokenizer.seperator=t.tokenizer.defaultSeperator},t.tokenizer.getSeperator=function(){return t.tokenizer.seperator},t.Pipeline=function(){this._queue=[]},t.Pipeline.registeredFunctions={},t.Pipeline.registerFunction=function(e,n){n in t.Pipeline.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+n),e.label=n,t.Pipeline.registeredFunctions[n]=e},t.Pipeline.getRegisteredFunction=function(e){return e in t.Pipeline.registeredFunctions!=!0?null:t.Pipeline.registeredFunctions[e]},t.Pipeline.warnIfFunctionNotRegistered=function(e){var n=e.label&&e.label in this.registeredFunctions;n||t.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},t.Pipeline.load=function(e){var n=new t.Pipeline;return e.forEach(function(e){var i=t.Pipeline.getRegisteredFunction(e);if(!i)throw new Error("Cannot load un-registered function: "+e);n.add(i)}),n},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(e){t.Pipeline.warnIfFunctionNotRegistered(e),this._queue.push(e)},this)},t.Pipeline.prototype.after=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i+1,0,n)},t.Pipeline.prototype.before=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i,0,n)},t.Pipeline.prototype.remove=function(e){var t=this._queue.indexOf(e);-1!==t&&this._queue.splice(t,1)},t.Pipeline.prototype.run=function(e){for(var t=[],n=e.length,i=this._queue.length,o=0;n>o;o++){for(var r=e[o],s=0;i>s&&(r=this._queue[s](r,o,e),void 0!==r&&null!==r);s++);void 0!==r&&null!==r&&t.push(r)}return t},t.Pipeline.prototype.reset=function(){this._queue=[]},t.Pipeline.prototype.get=function(){return this._queue},t.Pipeline.prototype.toJSON=function(){return this._queue.map(function(e){return t.Pipeline.warnIfFunctionNotRegistered(e),e.label})},t.Index=function(){this._fields=[],this._ref="id",this.pipeline=new t.Pipeline,this.documentStore=new t.DocumentStore,this.index={},this.eventEmitter=new t.EventEmitter,this._idfCache={},this.on("add","remove","update",function(){this._idfCache={}}.bind(this))},t.Index.prototype.on=function(){var e=Array.prototype.slice.call(arguments);return this.eventEmitter.addListener.apply(this.eventEmitter,e)},t.Index.prototype.off=function(e,t){return this.eventEmitter.removeListener(e,t)},t.Index.load=function(e){e.version!==t.version&&t.utils.warn("version mismatch: current "+t.version+" importing "+e.version);var n=new this;n._fields=e.fields,n._ref=e.ref,n.documentStore=t.DocumentStore.load(e.documentStore),n.pipeline=t.Pipeline.load(e.pipeline),n.index={};for(var i in e.index)n.index[i]=t.InvertedIndex.load(e.index[i]);return n},t.Index.prototype.addField=function(e){return this._fields.push(e),this.index[e]=new t.InvertedIndex,this},t.Index.prototype.setRef=function(e){return this._ref=e,this},t.Index.prototype.saveDocument=function(e){return this.documentStore=new t.DocumentStore(e),this},t.Index.prototype.addDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.addDoc(i,e),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));this.documentStore.addFieldLength(i,n,o.length);var r={};o.forEach(function(e){e in r?r[e]+=1:r[e]=1},this);for(var s in r){var u=r[s];u=Math.sqrt(u),this.index[n].addToken(s,{ref:i,tf:u})}},this),n&&this.eventEmitter.emit("add",e,this)}},t.Index.prototype.removeDocByRef=function(e){if(e&&this.documentStore.isDocStored()!==!1&&this.documentStore.hasDoc(e)){var t=this.documentStore.getDoc(e);this.removeDoc(t,!1)}},t.Index.prototype.removeDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.hasDoc(i)&&(this.documentStore.removeDoc(i),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));o.forEach(function(e){this.index[n].removeToken(e,i)},this)},this),n&&this.eventEmitter.emit("remove",e,this))}},t.Index.prototype.updateDoc=function(e,t){var t=void 0===t?!0:t;this.removeDocByRef(e[this._ref],!1),this.addDoc(e,!1),t&&this.eventEmitter.emit("update",e,this)},t.Index.prototype.idf=function(e,t){var n="@"+t+"/"+e;if(Object.prototype.hasOwnProperty.call(this._idfCache,n))return this._idfCache[n];var i=this.index[t].getDocFreq(e),o=1+Math.log(this.documentStore.length/(i+1));return this._idfCache[n]=o,o},t.Index.prototype.getFields=function(){return this._fields.slice()},t.Index.prototype.search=function(e,n){if(!e)return[];e="string"==typeof e?{any:e}:JSON.parse(JSON.stringify(e));var i=null;null!=n&&(i=JSON.stringify(n));for(var o=new t.Configuration(i,this.getFields()).get(),r={},s=Object.keys(e),u=0;u0&&t.push(e);for(var i in n)"docs"!==i&&"df"!==i&&this.expandToken(e+i,t,n[i]);return t},t.InvertedIndex.prototype.toJSON=function(){return{root:this.root}},t.Configuration=function(e,n){var e=e||"";if(void 0==n||null==n)throw new Error("fields should not be null");this.config={};var i;try{i=JSON.parse(e),this.buildUserConfig(i,n)}catch(o){t.utils.warn("user configuration parse failed, will use default configuration"),this.buildDefaultConfig(n)}},t.Configuration.prototype.buildDefaultConfig=function(e){this.reset(),e.forEach(function(e){this.config[e]={boost:1,bool:"OR",expand:!1}},this)},t.Configuration.prototype.buildUserConfig=function(e,n){var i="OR",o=!1;if(this.reset(),"bool"in e&&(i=e.bool||i),"expand"in e&&(o=e.expand||o),"fields"in e)for(var r in e.fields)if(n.indexOf(r)>-1){var s=e.fields[r],u=o;void 0!=s.expand&&(u=s.expand),this.config[r]={boost:s.boost||0===s.boost?s.boost:1,bool:s.bool||i,expand:u}}else t.utils.warn("field name in user configuration not found in index instance fields");else this.addAllFields2UserConfig(i,o,n)},t.Configuration.prototype.addAllFields2UserConfig=function(e,t,n){n.forEach(function(n){this.config[n]={boost:1,bool:e,expand:t}},this)},t.Configuration.prototype.get=function(){return this.config},t.Configuration.prototype.reset=function(){this.config={}},lunr.SortedSet=function(){this.length=0,this.elements=[]},lunr.SortedSet.load=function(e){var t=new this;return t.elements=e,t.length=e.length,t},lunr.SortedSet.prototype.add=function(){var e,t;for(e=0;e1;){if(r===e)return o;e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o]}return r===e?o:-1},lunr.SortedSet.prototype.locationFor=function(e){for(var t=0,n=this.elements.length,i=n-t,o=t+Math.floor(i/2),r=this.elements[o];i>1;)e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o];return r>e?o:e>r?o+1:void 0},lunr.SortedSet.prototype.intersect=function(e){for(var t=new lunr.SortedSet,n=0,i=0,o=this.length,r=e.length,s=this.elements,u=e.elements;;){if(n>o-1||i>r-1)break;s[n]!==u[i]?s[n]u[i]&&i++:(t.add(s[n]),n++,i++)}return t},lunr.SortedSet.prototype.clone=function(){var e=new lunr.SortedSet;return e.elements=this.toArray(),e.length=e.elements.length,e},lunr.SortedSet.prototype.union=function(e){var t,n,i;this.length>=e.length?(t=this,n=e):(t=e,n=this),i=t.clone();for(var o=0,r=n.toArray();odsjobs is a library that simplifies the process of submitting, running, and monitoring TAPIS v2 / AgavePy jobs on DesignSafe via Jupyter Notebooks.
\n\n
Features
\n\n
\n
Simplified TAPIS v2 Calls: No need to fiddle with complex API requests. dsjobs abstracts away the complexities.
\n
Seamless Integration with DesignSafe Jupyter Notebooks: Launch DesignSafe applications directly from the Jupyter environment.
Retrieves and monitors the status of a job from Agave.
\n\n
This function initially waits for the job to start, displaying its progress using\na tqdm progress bar. Once the job starts, it monitors the job's status up to\na maximum duration specified by the job's \"maxHours\". If the job completes or fails\nbefore reaching this maximum duration, it returns the job's final status.
\n\n
Args:\n ag (object): The Agave job object used to interact with the job.\n job_id (str): The unique identifier of the job to monitor.\n time_lapse (int, optional): Time interval, in seconds, to wait between status\n checks. Defaults to 15 seconds.
\n\n
Returns:\n str: The final status of the job. Typical values include \"FINISHED\", \"FAILED\",\n and \"STOPPED\".
\n\n
Raises:\n No exceptions are explicitly raised, but potential exceptions raised by the Agave\n job object or other called functions/methods will propagate.
Args:\n ag (object): The Agave object that has the job details.\n job_id (str): The ID of the job for which the runtime needs to be determined.\n verbose (bool): If True, prints all statuses. Otherwise, prints only specific statuses.
\n\n
Returns:\n None: This function doesn't return a value, but it prints the runtime details.
Generate a job information dictionary based on provided arguments.
\n\n
Args:\n ag (object): The Agave object to interact with the platform.\n appid (str): The application ID for the job.\n jobname (str, optional): The name of the job. Defaults to 'dsjob'.\n queue (str, optional): The batch queue name. Defaults to 'skx-dev'.\n nnodes (int, optional): The number of nodes required. Defaults to 1.\n nprocessors (int, optional): The number of processors per node. Defaults to 1.\n runtime (str, optional): The maximum runtime in the format 'HH:MM:SS'. Defaults to '00:10:00'.\n inputs (dict, optional): The inputs for the job. Defaults to None.\n parameters (dict, optional): The parameters for the job. Defaults to None.
\n\n
Returns:\n dict: A dictionary containing the job information.
\n\n
Raises:\n ValueError: If the provided appid is not valid.