diff --git a/Makefile b/Makefile index 09416298f403..81d6e3c3ccb7 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # Location of virtualenv used for development. VENV?=.venv -# Source virtualenv to execute command (darker, sphinx, twine, etc...) +# Source virtualenv to execute command (black, isort, sphinx, twine, etc...) IN_VENV=if [ -f "$(VENV)/bin/activate" ]; then . "$(VENV)/bin/activate"; fi; RELEASE_CURR:=26.0 RELEASE_UPSTREAM:=upstream diff --git a/client/src/api/schema/schema.ts b/client/src/api/schema/schema.ts index 972407692212..d200fd5e143b 100644 --- a/client/src/api/schema/schema.ts +++ b/client/src/api/schema/schema.ts @@ -3312,6 +3312,46 @@ export interface paths { patch?: never; trace?: never; }; + "/api/jobs/{job_id}/stderr": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Return stderr from job execution + * @description Return job stderr as plain text. + */ + get: operations["get_job_stderr_api_jobs__job_id__stderr_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/jobs/{job_id}/stdout": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Return stdout from job execution + * @description Return job stdout as plain text. + */ + get: operations["get_job_stdout_api_jobs__job_id__stdout_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; "/api/libraries": { parameters: { query?: never; @@ -6547,6 +6587,154 @@ export interface paths { patch?: never; trace?: never; }; + "/ga4gh/wes/v1/runs": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * List Runs + * @description List workflow runs. + */ + get: operations["list_runs_ga4gh_wes_v1_runs_get"]; + put?: never; + /** + * Submit Run + * @description Submit a new workflow run. + * + * Accepts multipart/form-data with workflow and parameters. + */ + post: operations["submit_run_ga4gh_wes_v1_runs_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ga4gh/wes/v1/runs/{run_id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Get Run + * @description Get workflow run details. + */ + get: operations["get_run_ga4gh_wes_v1_runs__run_id__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ga4gh/wes/v1/runs/{run_id}/cancel": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Cancel Run + * @description Cancel a workflow run. + */ + post: operations["cancel_run_ga4gh_wes_v1_runs__run_id__cancel_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ga4gh/wes/v1/runs/{run_id}/status": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Get Run Status + * @description Get workflow run status. + */ + get: operations["get_run_status_ga4gh_wes_v1_runs__run_id__status_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ga4gh/wes/v1/runs/{run_id}/tasks": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Get Run Tasks + * @description Get paginated list of tasks for a workflow run. + */ + get: operations["get_run_tasks_ga4gh_wes_v1_runs__run_id__tasks_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ga4gh/wes/v1/runs/{run_id}/tasks/{task_id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Get Run Task + * @description Get details for a specific task. + * + * Task ID format: order_index or order_index.job_index for collection mapping jobs. + */ + get: operations["get_run_task_ga4gh_wes_v1_runs__run_id__tasks__task_id__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ga4gh/wes/v1/service-info": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Service Info + * @description Get WES service information. + */ + get: operations["service_info_ga4gh_wes_v1_service_info_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; "/oauth2_callback": { parameters: { query?: never; @@ -7255,6 +7443,27 @@ export interface components { /** Targets */ targets: unknown; }; + /** Body_submit_run_ga4gh_wes_v1_runs_post */ + Body_submit_run_ga4gh_wes_v1_runs_post: { + /** Tags */ + tags?: string | null; + /** Workflow Attachment */ + workflow_attachment?: string | null; + /** Workflow Engine */ + workflow_engine?: string | null; + /** Workflow Engine Parameters */ + workflow_engine_parameters?: string | null; + /** Workflow Engine Version */ + workflow_engine_version?: string | null; + /** Workflow Params */ + workflow_params?: string | null; + /** Workflow Type */ + workflow_type: string; + /** Workflow Type Version */ + workflow_type_version: string; + /** Workflow Url */ + workflow_url?: string | null; + }; /** BooleanParameterModel */ BooleanParameterModel: { /** @@ -10533,6 +10742,24 @@ export interface components { * @enum {string} */ DefaultQuotaValues: "unregistered" | "registered" | "no"; + /** DefaultWorkflowEngineParameter */ + DefaultWorkflowEngineParameter: { + /** + * Default Value + * @description The stringified version of the default parameter. e.g. "2.45". + */ + default_value?: string | null; + /** + * Name + * @description The name of the parameter + */ + name?: string | null; + /** + * Type + * @description Describes the type of the parameter, e.g. float. + */ + type?: string | null; + }; /** DeleteDatasetBatchPayload */ DeleteDatasetBatchPayload: { /** @@ -17793,6 +18020,56 @@ export interface components { * @default [] */ ListUriResponse: (components["schemas"]["RemoteFile"] | components["schemas"]["RemoteDirectory"])[]; + /** Log */ + Log: { + /** + * Cmd + * @description The command line that was executed + */ + cmd?: string[] | null; + /** + * End Time + * @description When the command stopped executing (completed, failed, or cancelled), in ISO 8601 format "%Y-%m-%dT%H:%M:%SZ" + */ + end_time?: string | null; + /** + * Exit Code + * @description Exit code of the program + */ + exit_code?: number | null; + /** + * Name + * @description The task or workflow name + */ + name?: string | null; + /** + * Start Time + * @description When the command started executing, in ISO 8601 format "%Y-%m-%dT%H:%M:%SZ" + */ + start_time?: string | null; + /** + * Stderr + * @description A URL to retrieve standard error logs of the workflow run or task. This URL may change between status requests, or may not be available until the task or workflow has finished execution. Should be available using the same credentials used to access the WES endpoint. + */ + stderr?: string | null; + /** + * Stdout + * @description A URL to retrieve standard output logs of the workflow run or task. This URL may change between status requests, or may not be available until the task or workflow has finished execution. Should be available using the same credentials used to access the WES endpoint. + */ + stdout?: string | null; + /** + * System Logs + * @description System logs are any logs the system decides are relevant, + * which are not tied directly to a workflow. + * Content is implementation specific: format, size, etc. + * + * System logs may be collected here to provide convenient access. + * + * For example, the system may include an error message that caused + * a SYSTEM_ERROR state (e.g. disk is full), etc. + */ + system_logs?: string[] | null; + }; /** * MandatoryNotificationCategory * @description These notification categories cannot be opt-out by the user. @@ -20127,6 +20404,131 @@ export interface components { */ type: "rules"; }; + /** RunId */ + RunId: { + /** + * Run Id + * @description workflow run ID + */ + run_id?: string | null; + }; + /** RunListResponse */ + RunListResponse: { + /** + * Next Page Token + * @description A token which may be supplied as `page_token` in workflow run list request to get the next page of results. An empty string indicates there are no more items to return. + */ + next_page_token?: string | null; + /** + * Runs + * @description A list of workflow runs that the service has executed or is executing. The list is filtered to only include runs that the caller has permission to see. + */ + runs?: (components["schemas"]["RunStatus"] | components["schemas"]["RunSummary"])[] | null; + }; + /** RunLog */ + RunLog: { + /** + * Outputs + * @description The outputs from the workflow run. + */ + outputs?: { + [key: string]: unknown; + } | null; + request?: components["schemas"]["RunRequest"] | null; + /** + * Run Id + * @description workflow run ID + */ + run_id?: string | null; + run_log?: components["schemas"]["Log"] | null; + state?: components["schemas"]["State"] | null; + /** + * Task Logs + * @description The logs, and other key info like timing and exit code, for each step in the workflow run. This field is deprecated and the `task_logs_url` should be used to retrieve a paginated list of steps from the workflow run. This field will be removed in the next major version of the specification (2.0.0) + */ + task_logs?: (components["schemas"]["Log"] | components["schemas"]["TaskLog"])[] | null; + /** + * Task Logs Url + * @description A reference to the complete url which may be used to obtain a paginated list of task logs for this workflow + */ + task_logs_url?: string | null; + }; + /** RunRequest */ + RunRequest: { + /** Tags */ + tags?: { + [key: string]: string; + } | null; + /** + * Workflow Engine + * @description The workflow engine, must be one supported by this WES instance. Required if workflow_engine_version is provided. + */ + workflow_engine?: string | null; + /** Workflow Engine Parameters */ + workflow_engine_parameters?: { + [key: string]: string; + } | null; + /** + * Workflow Engine Version + * @description The workflow engine version, must be one supported by this WES instance. If workflow_engine is provided, but workflow_engine_version is not, servers can make no assumptions with regard to the engine version the WES instance uses to process the request if that WES instance supports multiple versions of the requested engine. + */ + workflow_engine_version?: string | null; + /** + * Workflow Params + * @description REQUIRED + * The workflow run parameterizations (JSON encoded), including input and output file locations + */ + workflow_params?: { + [key: string]: unknown; + } | null; + /** + * Workflow Type + * @description REQUIRED + * The workflow descriptor type, must be "CWL" or "WDL" currently (or another alternative supported by this WES instance) + */ + workflow_type: string; + /** + * Workflow Type Version + * @description REQUIRED + * The workflow descriptor type version, must be one supported by this WES instance + */ + workflow_type_version: string; + /** + * Workflow Url + * @description REQUIRED + * The workflow CWL or WDL document. When `workflow_attachments` is used to attach files, the `workflow_url` may be a relative path to one of the attachments. + */ + workflow_url: string; + }; + /** RunStatus */ + RunStatus: { + /** Run Id */ + run_id: string; + state?: components["schemas"]["State"] | null; + }; + /** RunSummary */ + RunSummary: { + /** + * End Time + * @description When the run stopped executing (completed, failed, or cancelled), in ISO 8601 format "%Y-%m-%dT%H:%M:%SZ" + */ + end_time?: string | null; + /** Run Id */ + run_id: string; + /** + * Start Time + * @description When the run started executing, in ISO 8601 format "%Y-%m-%dT%H:%M:%SZ" + */ + start_time?: string | null; + state?: components["schemas"]["State"] | null; + /** + * Tags + * @description Arbitrary key/value tags added by the client during run creation + */ + tags: { + [key: string]: string; + }; + }; /** SampleSheetColumnDefinition */ SampleSheetColumnDefinition: { /** Default Value */ @@ -20596,66 +20998,6 @@ export interface components { */ to_posix_lines: boolean; }; - /** Service */ - Service: { - /** - * Contacturl - * @description URL of the contact for the provider of this service, e.g. a link to a contact form (RFC 3986 format), or an email (RFC 2368 format). - * @example mailto:support@example.com - */ - contactUrl?: string | null; - /** - * Createdat - * @description Timestamp describing when the service was first deployed and available (RFC 3339 format) - * @example 2019-06-04T12:58:19Z - */ - createdAt?: string | null; - /** - * Description - * @description Description of the service. Should be human readable and provide information about the service. - * @example This service provides... - */ - description?: string | null; - /** - * Documentationurl - * @description URL of the documentation of this service (RFC 3986 format). This should help someone learn how to use your service, including any specifics required to access data, e.g. authentication. - * @example https://docs.myservice.example.com - */ - documentationUrl?: string | null; - /** - * Environment - * @description Environment the service is running in. Use this to distinguish between production, development and testing/staging deployments. Suggested values are prod, test, dev, staging. However this is advised and not enforced. - * @example test - */ - environment?: string | null; - /** - * Id - * @description Unique ID of this service. Reverse domain name notation is recommended, though not required. The identifier should attempt to be globally unique so it can be used in downstream aggregator services e.g. Service Registry. - * @example org.ga4gh.myservice - */ - id: string; - /** - * Name - * @description Name of this service. Should be human readable. - * @example My project - */ - name: string; - /** @description Organization providing the service */ - organization: components["schemas"]["galaxy__schema__drs__Organization"]; - type: components["schemas"]["ServiceType"]; - /** - * Updatedat - * @description Timestamp describing when the service was last updated (RFC 3339 format) - * @example 2019-06-04T12:58:19Z - */ - updatedAt?: string | null; - /** - * Version - * @description Version of the service being described. Semantic versioning is recommended, but other identifiers, such as dates or commit hashes, are also allowed. The version should be changed whenever the service is updated. - * @example 1.0.0 - */ - version: string; - }; /** ServiceCredentialGroupPayload */ ServiceCredentialGroupPayload: { /** @@ -20745,6 +21087,105 @@ export interface components { */ version: string; }; + /** ServiceInfo */ + ServiceInfo: { + /** + * Auth Instructions Url + * @description A web page URL with human-readable instructions on how to get an authorization token for use with a specific WES endpoint. + */ + auth_instructions_url: string; + /** + * Contacturl + * @description URL of the contact for the provider of this service, e.g. a link to a contact form (RFC 3986 format), or an email (RFC 2368 format). + * @example mailto:support@example.com + */ + contactUrl?: string | null; + /** + * Createdat + * @description Timestamp describing when the service was first deployed and available (RFC 3339 format) + * @example 2019-06-04T12:58:19Z + */ + createdAt?: string | null; + /** + * Default Workflow Engine Parameters + * @description Each workflow engine can present additional parameters that can be sent to the workflow engine. This message will list the default values, and their types for each workflow engine. + */ + default_workflow_engine_parameters: components["schemas"]["DefaultWorkflowEngineParameter"][]; + /** + * Description + * @description Description of the service. Should be human readable and provide information about the service. + * @example This service provides... + */ + description?: string | null; + /** + * Documentationurl + * @description URL of the documentation of this service (RFC 3986 format). This should help someone learn how to use your service, including any specifics required to access data, e.g. authentication. + * @example https://docs.myservice.example.com + */ + documentationUrl?: string | null; + /** + * Environment + * @description Environment the service is running in. Use this to distinguish between production, development and testing/staging deployments. Suggested values are prod, test, dev, staging. However this is advised and not enforced. + * @example test + */ + environment?: string | null; + /** + * Id + * @description Unique ID of this service. Reverse domain name notation is recommended, though not required. The identifier should attempt to be globally unique so it can be used in downstream aggregator services e.g. Service Registry. + * @example org.ga4gh.myservice + */ + id: string; + /** + * Name + * @description Name of this service. Should be human readable. + * @example My project + */ + name: string; + /** @description Organization providing the service */ + organization: components["schemas"]["galaxy__schema__wes__Organization"]; + /** + * Supported Filesystem Protocols + * @description The filesystem protocols supported by this service, currently these may include common protocols using the terms 'http', 'https', 'sftp', 's3', 'gs', 'file', or 'synapse', but others are possible and the terms beyond these core protocols are currently not fixed. This section reports those protocols (either common or not) supported by this WES service. + */ + supported_filesystem_protocols: string[]; + /** + * Supported Wes Versions + * @description The version(s) of the WES schema supported by this service + */ + supported_wes_versions: string[]; + /** + * System State Counts + * @description The system statistics, key is the statistic, value is the count of runs in that state. See the State enum for the possible keys. + */ + system_state_counts: { + [key: string]: number; + }; + /** Tags */ + tags: { + [key: string]: string; + }; + type: components["schemas"]["ServiceType"]; + /** + * Updatedat + * @description Timestamp describing when the service was last updated (RFC 3339 format) + * @example 2019-06-04T12:58:19Z + */ + updatedAt?: string | null; + /** + * Version + * @description Version of the service being described. Semantic versioning is recommended, but other identifiers, such as dates or commit hashes, are also allowed. The version should be changed whenever the service is updated. + * @example 1.0.0 + */ + version: string; + /** Workflow Engine Versions */ + workflow_engine_versions: { + [key: string]: components["schemas"]["WorkflowEngineVersion"]; + }; + /** Workflow Type Versions */ + workflow_type_versions: { + [key: string]: components["schemas"]["WorkflowTypeVersion"]; + }; + }; /** ServiceParameterDefinition */ ServiceParameterDefinition: { /** @@ -21231,6 +21672,22 @@ export interface components { * @enum {string} */ Src: "url" | "pasted" | "files" | "path" | "composite" | "ftp_import" | "server_dir"; + /** + * State + * @enum {string} + */ + State: + | "UNKNOWN" + | "QUEUED" + | "INITIALIZING" + | "RUNNING" + | "PAUSED" + | "COMPLETE" + | "EXECUTOR_ERROR" + | "SYSTEM_ERROR" + | "CANCELED" + | "CANCELING" + | "PREEMPTED"; /** StepReferenceByLabel */ StepReferenceByLabel: { /** @@ -21596,6 +22053,80 @@ export interface components { | "Page" | "StoredWorkflow" | "Visualization"; + /** TaskListResponse */ + TaskListResponse: { + /** + * Next Page Token + * @description A token which may be supplied as `page_token` in workflow run task list request to get the next page of results. An empty string indicates there are no more items to return. + */ + next_page_token?: string | null; + /** + * Task Logs + * @description The logs, and other key info like timing and exit code, for each step in the workflow run. + */ + task_logs?: components["schemas"]["TaskLog"][] | null; + }; + /** TaskLog */ + TaskLog: { + /** + * Cmd + * @description The command line that was executed + */ + cmd?: string[] | null; + /** + * End Time + * @description When the command stopped executing (completed, failed, or cancelled), in ISO 8601 format "%Y-%m-%dT%H:%M:%SZ" + */ + end_time?: string | null; + /** + * Exit Code + * @description Exit code of the program + */ + exit_code?: number | null; + /** + * Id + * @description A unique identifier which may be used to reference the task + */ + id: string; + /** + * Name + * @description The task or workflow name + */ + name: string; + /** + * Start Time + * @description When the command started executing, in ISO 8601 format "%Y-%m-%dT%H:%M:%SZ" + */ + start_time?: string | null; + /** + * Stderr + * @description A URL to retrieve standard error logs of the workflow run or task. This URL may change between status requests, or may not be available until the task or workflow has finished execution. Should be available using the same credentials used to access the WES endpoint. + */ + stderr?: string | null; + /** + * Stdout + * @description A URL to retrieve standard output logs of the workflow run or task. This URL may change between status requests, or may not be available until the task or workflow has finished execution. Should be available using the same credentials used to access the WES endpoint. + */ + stdout?: string | null; + /** + * System Logs + * @description System logs are any logs the system decides are relevant, + * which are not tied directly to a task. + * Content is implementation specific: format, size, etc. + * + * System logs may be collected here to provide convenient access. + * + * For example, the system may include the name of the host + * where the task is executing, an error message that caused + * a SYSTEM_ERROR state (e.g. disk is full), etc. + */ + system_logs?: string[] | null; + /** + * Tes Uri + * @description An optional URL pointing to an extended task definition defined by a [TES api](https://github.com/ga4gh/task-execution-schemas) + */ + tes_uri?: string | null; + }; /** * TaskResult * @description Contains information about the result of an asynchronous task. @@ -23965,6 +24496,14 @@ export interface components { */ revision: string; }; + /** WorkflowEngineVersion */ + WorkflowEngineVersion: { + /** + * Workflow Engine Version + * @description An array of one or more acceptable engines versions for the `workflow_engine` + */ + workflow_engine_version?: string[] | null; + }; /** WorkflowInput */ WorkflowInput: { /** @@ -24318,6 +24857,14 @@ export interface components { */ workflow_target_type: "stored_workflow" | "workflow" | "trs_url"; }; + /** WorkflowTypeVersion */ + WorkflowTypeVersion: { + /** + * Workflow Type Version + * @description an array of one or more acceptable types for the `workflow_type` + */ + workflow_type_version?: string[] | null; + }; /** WriteInvocationStoreToPayload */ WriteInvocationStoreToPayload: { /** @@ -24461,22 +25008,6 @@ export interface components { name?: string | null; }; /** Organization */ - galaxy__schema__drs__Organization: { - /** - * Name - * @description Name of the organization responsible for the service - * @example My organization - */ - name: string; - /** - * Url - * Format: uri - * @description URL of the website of the organization (RFC 3986 format) - * @example https://example.com - */ - url: string; - }; - /** Organization */ galaxy__schema__schema__Organization: { /** Address */ address?: string | null; @@ -24508,6 +25039,22 @@ export interface components { /** URL */ url?: string | null; }; + /** Organization */ + galaxy__schema__wes__Organization: { + /** + * Name + * @description Name of the organization responsible for the service + * @example My organization + */ + name: string; + /** + * Url + * Format: uri + * @description URL of the website of the organization (RFC 3986 format) + * @example https://example.com + */ + url: string; + }; }; responses: never; parameters: never; @@ -32239,25 +32786,142 @@ export interface operations { }; }; }; - history_contents__delete_typed: { + history_contents__delete_typed: { + parameters: { + query?: { + /** + * @deprecated + * @description Whether to remove from disk the target HDA or child HDAs of the target HDCA. + */ + purge?: boolean | null; + /** + * @deprecated + * @description When deleting a dataset collection, whether to also delete containing datasets. + */ + recursive?: boolean | null; + /** + * @deprecated + * @description Whether to stop the creating job if all outputs of the job have been deleted. + */ + stop_job?: boolean | null; + }; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The encoded database identifier of the History. */ + history_id: string; + /** @description The ID of the item (`HDA`/`HDCA`) */ + id: string; + /** @description The type of the target history element. */ + type: components["schemas"]["HistoryContentType"]; + }; + cookie?: never; + }; + requestBody?: { + content: { + "application/json": components["schemas"]["DeleteHistoryContentPayload"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Request accepted, processing will finish later. */ + 202: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Request has been executed. */ + 204: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + show_jobs_summary_api_histories__history_id__contents__type_s__id__jobs_summary_get: { + parameters: { + query?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The encoded database identifier of the History. */ + history_id: string; + /** @description The ID of the item (`HDA`/`HDCA`) */ + id: string; + /** @description The type of the target history element. */ + type: components["schemas"]["HistoryContentType"]; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": + | components["schemas"]["JobStateSummary"] + | components["schemas"]["ImplicitCollectionJobsStateSummary"] + | components["schemas"]["WorkflowInvocationStateSummary"]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + prepare_store_download_api_histories__history_id__contents__type_s__id__prepare_store_download_post: { parameters: { - query?: { - /** - * @deprecated - * @description Whether to remove from disk the target HDA or child HDAs of the target HDCA. - */ - purge?: boolean | null; - /** - * @deprecated - * @description When deleting a dataset collection, whether to also delete containing datasets. - */ - recursive?: boolean | null; - /** - * @deprecated - * @description Whether to stop the creating job if all outputs of the job have been deleted. - */ - stop_job?: boolean | null; - }; + query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; @@ -32272,9 +32936,9 @@ export interface operations { }; cookie?: never; }; - requestBody?: { + requestBody: { content: { - "application/json": components["schemas"]["DeleteHistoryContentPayload"]; + "application/json": components["schemas"]["StoreExportPayload"]; }; }; responses: { @@ -32284,22 +32948,8 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": unknown; - }; - }; - /** @description Request accepted, processing will finish later. */ - 202: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Request has been executed. */ - 204: { - headers: { - [name: string]: unknown; + "application/json": components["schemas"]["AsyncFile"]; }; - content?: never; }; /** @description Request Error */ "4XX": { @@ -32321,7 +32971,7 @@ export interface operations { }; }; }; - show_jobs_summary_api_histories__history_id__contents__type_s__id__jobs_summary_get: { + write_store_api_histories__history_id__contents__type_s__id__write_store_post: { parameters: { query?: never; header?: { @@ -32338,7 +32988,11 @@ export interface operations { }; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "application/json": components["schemas"]["WriteStoreToPayload"]; + }; + }; responses: { /** @description Successful Response */ 200: { @@ -32346,10 +33000,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": - | components["schemas"]["JobStateSummary"] - | components["schemas"]["ImplicitCollectionJobsStateSummary"] - | components["schemas"]["WorkflowInvocationStateSummary"]; + "application/json": components["schemas"]["AsyncTaskResultSummary"]; }; }; /** @description Request Error */ @@ -32372,9 +33023,14 @@ export interface operations { }; }; }; - prepare_store_download_api_histories__history_id__contents__type_s__id__prepare_store_download_post: { + create_from_store_api_histories__history_id__contents_from_store_post: { parameters: { - query?: never; + query?: { + /** @description View to be passed to the serializer */ + view?: string | null; + /** @description Comma-separated list of keys to be passed to the serializer */ + keys?: string | null; + }; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; @@ -32382,16 +33038,12 @@ export interface operations { path: { /** @description The encoded database identifier of the History. */ history_id: string; - /** @description The ID of the item (`HDA`/`HDCA`) */ - id: string; - /** @description The type of the target history element. */ - type: components["schemas"]["HistoryContentType"]; }; cookie?: never; }; requestBody: { content: { - "application/json": components["schemas"]["StoreExportPayload"]; + "application/json": components["schemas"]["CreateHistoryContentFromStore"]; }; }; responses: { @@ -32401,7 +33053,15 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["AsyncFile"]; + "application/json": ( + | components["schemas"]["HDACustom"] + | components["schemas"]["HDADetailed"] + | components["schemas"]["HDASummary"] + | components["schemas"]["HDAInaccessible"] + | components["schemas"]["HDCACustom"] + | components["schemas"]["HDCADetailed"] + | components["schemas"]["HDCASummary"] + )[]; }; }; /** @description Request Error */ @@ -32424,7 +33084,7 @@ export interface operations { }; }; }; - write_store_api_histories__history_id__contents__type_s__id__write_store_post: { + history_contents__copy_contents: { parameters: { query?: never; header?: { @@ -32434,16 +33094,12 @@ export interface operations { path: { /** @description The encoded database identifier of the History. */ history_id: string; - /** @description The ID of the item (`HDA`/`HDCA`) */ - id: string; - /** @description The type of the target history element. */ - type: components["schemas"]["HistoryContentType"]; }; cookie?: never; }; requestBody: { content: { - "application/json": components["schemas"]["WriteStoreToPayload"]; + "application/json": components["schemas"]["CopyDatasetsPayload"]; }; }; responses: { @@ -32453,7 +33109,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["AsyncTaskResultSummary"]; + "application/json": components["schemas"]["CopyDatasetsResponse"]; }; }; /** @description Request Error */ @@ -32476,14 +33132,9 @@ export interface operations { }; }; }; - create_from_store_api_histories__history_id__contents_from_store_post: { + get_custom_builds_metadata_api_histories__history_id__custom_builds_metadata_get: { parameters: { - query?: { - /** @description View to be passed to the serializer */ - view?: string | null; - /** @description Comma-separated list of keys to be passed to the serializer */ - keys?: string | null; - }; + query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; @@ -32494,11 +33145,7 @@ export interface operations { }; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["CreateHistoryContentFromStore"]; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -32506,15 +33153,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": ( - | components["schemas"]["HDACustom"] - | components["schemas"]["HDADetailed"] - | components["schemas"]["HDASummary"] - | components["schemas"]["HDAInaccessible"] - | components["schemas"]["HDCACustom"] - | components["schemas"]["HDCADetailed"] - | components["schemas"]["HDCASummary"] - )[]; + "application/json": components["schemas"]["CustomBuildsMetadataResponse"]; }; }; /** @description Request Error */ @@ -32537,7 +33176,7 @@ export interface operations { }; }; }; - history_contents__copy_contents: { + disable_link_access_api_histories__history_id__disable_link_access_put: { parameters: { query?: never; header?: { @@ -32550,11 +33189,7 @@ export interface operations { }; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["CopyDatasetsPayload"]; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -32562,7 +33197,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["CopyDatasetsResponse"]; + "application/json": components["schemas"]["SharingStatus"]; }; }; /** @description Request Error */ @@ -32585,7 +33220,7 @@ export interface operations { }; }; }; - get_custom_builds_metadata_api_histories__history_id__custom_builds_metadata_get: { + enable_link_access_api_histories__history_id__enable_link_access_put: { parameters: { query?: never; header?: { @@ -32606,7 +33241,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["CustomBuildsMetadataResponse"]; + "application/json": components["schemas"]["SharingStatus"]; }; }; /** @description Request Error */ @@ -32629,10 +33264,17 @@ export interface operations { }; }; }; - disable_link_access_api_histories__history_id__disable_link_access_put: { + get_history_exports_api_histories__history_id__exports_get: { parameters: { - query?: never; + query?: { + /** @description The maximum number of items to return. */ + limit?: number | null; + /** @description Starts at the beginning skip the first ( offset - 1 ) items and begin returning at the Nth item */ + offset?: number | null; + }; header?: { + /** @description Accept header to determine the response format. Default is 'application/json'. */ + accept?: "application/json" | "application/vnd.galaxy.task.export+json"; /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; @@ -32644,13 +33286,14 @@ export interface operations { }; requestBody?: never; responses: { - /** @description Successful Response */ + /** @description A list of history exports */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["SharingStatus"]; + "application/json": components["schemas"]["JobExportHistoryArchiveListResponse"]; + "application/vnd.galaxy.task.export+json": components["schemas"]["ExportTaskListResponse"]; }; }; /** @description Request Error */ @@ -32673,7 +33316,7 @@ export interface operations { }; }; }; - enable_link_access_api_histories__history_id__enable_link_access_put: { + archive_export_api_histories__history_id__exports_put: { parameters: { query?: never; header?: { @@ -32686,17 +33329,30 @@ export interface operations { }; cookie?: never; }; - requestBody?: never; + requestBody?: { + content: { + "application/json": components["schemas"]["ExportHistoryArchivePayload"] | null; + }; + }; responses: { - /** @description Successful Response */ + /** @description Object containing url to fetch export from. */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["SharingStatus"]; + "application/json": + | components["schemas"]["JobExportHistoryArchiveModel"] + | components["schemas"]["JobIdResponse"]; }; }; + /** @description The exported archive file is not ready yet. */ + 202: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; /** @description Request Error */ "4XX": { headers: { @@ -32717,17 +33373,59 @@ export interface operations { }; }; }; - get_history_exports_api_histories__history_id__exports_get: { + history_archive_download_api_histories__history_id__exports__jeha_id__get: { + parameters: { + query?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The encoded database identifier of the History. */ + history_id: string; + /** @description The ID of the specific Job Export History Association or `latest` (default) to download the last generated archive. */ + jeha_id: string | "latest"; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description The archive file containing the History. */ + 200: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + index_jobs_summary_api_histories__history_id__jobs_summary_get: { parameters: { query?: { - /** @description The maximum number of items to return. */ - limit?: number | null; - /** @description Starts at the beginning skip the first ( offset - 1 ) items and begin returning at the Nth item */ - offset?: number | null; + /** @description A comma-separated list of encoded ids of job summary objects to return - if `ids` is specified types must also be specified and have same length. */ + ids?: string | null; + /** @description A comma-separated list of type of object represented by elements in the `ids` array - any of `Job`, `ImplicitCollectionJob`, or `WorkflowInvocation`. */ + types?: string | null; }; header?: { - /** @description Accept header to determine the response format. Default is 'application/json'. */ - accept?: "application/json" | "application/vnd.galaxy.task.export+json"; /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; @@ -32739,14 +33437,17 @@ export interface operations { }; requestBody?: never; responses: { - /** @description A list of history exports */ + /** @description Successful Response */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["JobExportHistoryArchiveListResponse"]; - "application/vnd.galaxy.task.export+json": components["schemas"]["ExportTaskListResponse"]; + "application/json": ( + | components["schemas"]["JobStateSummary"] + | components["schemas"]["ImplicitCollectionJobsStateSummary"] + | components["schemas"]["WorkflowInvocationStateSummary"] + )[]; }; }; /** @description Request Error */ @@ -32769,7 +33470,7 @@ export interface operations { }; }; }; - archive_export_api_histories__history_id__exports_put: { + materialize_to_history_api_histories__history_id__materialize_post: { parameters: { query?: never; header?: { @@ -32782,29 +33483,20 @@ export interface operations { }; cookie?: never; }; - requestBody?: { + requestBody: { content: { - "application/json": components["schemas"]["ExportHistoryArchivePayload"] | null; + "application/json": components["schemas"]["MaterializeDatasetInstanceAPIRequest"]; }; }; responses: { - /** @description Object containing url to fetch export from. */ + /** @description Successful Response */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": - | components["schemas"]["JobExportHistoryArchiveModel"] - | components["schemas"]["JobIdResponse"]; - }; - }; - /** @description The exported archive file is not ready yet. */ - 202: { - headers: { - [name: string]: unknown; + "application/json": components["schemas"]["AsyncTaskResultSummary"]; }; - content?: never; }; /** @description Request Error */ "4XX": { @@ -32826,7 +33518,7 @@ export interface operations { }; }; }; - history_archive_download_api_histories__history_id__exports__jeha_id__get: { + prepare_store_download_api_histories__history_id__prepare_store_download_post: { parameters: { query?: never; header?: { @@ -32836,19 +33528,23 @@ export interface operations { path: { /** @description The encoded database identifier of the History. */ history_id: string; - /** @description The ID of the specific Job Export History Association or `latest` (default) to download the last generated archive. */ - jeha_id: string | "latest"; }; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "application/json": components["schemas"]["StoreExportPayload"]; + }; + }; responses: { - /** @description The archive file containing the History. */ + /** @description Successful Response */ 200: { headers: { [name: string]: unknown; }; - content?: never; + content: { + "application/json": components["schemas"]["AsyncFile"]; + }; }; /** @description Request Error */ "4XX": { @@ -32870,14 +33566,9 @@ export interface operations { }; }; }; - index_jobs_summary_api_histories__history_id__jobs_summary_get: { + publish_api_histories__history_id__publish_put: { parameters: { - query?: { - /** @description A comma-separated list of encoded ids of job summary objects to return - if `ids` is specified types must also be specified and have same length. */ - ids?: string | null; - /** @description A comma-separated list of type of object represented by elements in the `ids` array - any of `Job`, `ImplicitCollectionJob`, or `WorkflowInvocation`. */ - types?: string | null; - }; + query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; @@ -32896,11 +33587,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": ( - | components["schemas"]["JobStateSummary"] - | components["schemas"]["ImplicitCollectionJobsStateSummary"] - | components["schemas"]["WorkflowInvocationStateSummary"] - )[]; + "application/json": components["schemas"]["SharingStatus"]; }; }; /** @description Request Error */ @@ -32923,7 +33610,7 @@ export interface operations { }; }; }; - materialize_to_history_api_histories__history_id__materialize_post: { + share_with_users_api_histories__history_id__share_with_users_put: { parameters: { query?: never; header?: { @@ -32938,7 +33625,7 @@ export interface operations { }; requestBody: { content: { - "application/json": components["schemas"]["MaterializeDatasetInstanceAPIRequest"]; + "application/json": components["schemas"]["ShareWithPayload"]; }; }; responses: { @@ -32948,7 +33635,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["AsyncTaskResultSummary"]; + "application/json": components["schemas"]["ShareHistoryWithStatus"]; }; }; /** @description Request Error */ @@ -32971,7 +33658,7 @@ export interface operations { }; }; }; - prepare_store_download_api_histories__history_id__prepare_store_download_post: { + sharing_api_histories__history_id__sharing_get: { parameters: { query?: never; header?: { @@ -32984,11 +33671,7 @@ export interface operations { }; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["StoreExportPayload"]; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -32996,7 +33679,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["AsyncFile"]; + "application/json": components["schemas"]["SharingStatus"]; }; }; /** @description Request Error */ @@ -33019,7 +33702,7 @@ export interface operations { }; }; }; - publish_api_histories__history_id__publish_put: { + set_slug_api_histories__history_id__slug_put: { parameters: { query?: never; header?: { @@ -33032,16 +33715,18 @@ export interface operations { }; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "application/json": components["schemas"]["SetSlugPayload"]; + }; + }; responses: { /** @description Successful Response */ - 200: { + 204: { headers: { [name: string]: unknown; }; - content: { - "application/json": components["schemas"]["SharingStatus"]; - }; + content?: never; }; /** @description Request Error */ "4XX": { @@ -33063,7 +33748,7 @@ export interface operations { }; }; }; - share_with_users_api_histories__history_id__share_with_users_put: { + index_api_histories__history_id__tags_get: { parameters: { query?: never; header?: { @@ -33071,16 +33756,11 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the History. */ history_id: string; }; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["ShareWithPayload"]; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -33088,7 +33768,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ShareHistoryWithStatus"]; + "application/json": components["schemas"]["ItemTagsListResponse"]; }; }; /** @description Request Error */ @@ -33111,7 +33791,7 @@ export interface operations { }; }; }; - sharing_api_histories__history_id__sharing_get: { + show_api_histories__history_id__tags__tag_name__get: { parameters: { query?: never; header?: { @@ -33119,8 +33799,8 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the History. */ history_id: string; + tag_name: string; }; cookie?: never; }; @@ -33132,7 +33812,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["SharingStatus"]; + "application/json": components["schemas"]["ItemTagsResponse"]; }; }; /** @description Request Error */ @@ -33155,7 +33835,7 @@ export interface operations { }; }; }; - set_slug_api_histories__history_id__slug_put: { + update_api_histories__history_id__tags__tag_name__put: { parameters: { query?: never; header?: { @@ -33163,23 +33843,25 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the History. */ history_id: string; + tag_name: string; }; cookie?: never; }; requestBody: { content: { - "application/json": components["schemas"]["SetSlugPayload"]; + "application/json": components["schemas"]["ItemTagsCreatePayload"]; }; }; responses: { /** @description Successful Response */ - 204: { + 200: { headers: { [name: string]: unknown; }; - content?: never; + content: { + "application/json": components["schemas"]["ItemTagsResponse"]; + }; }; /** @description Request Error */ "4XX": { @@ -33201,7 +33883,7 @@ export interface operations { }; }; }; - index_api_histories__history_id__tags_get: { + create_api_histories__history_id__tags__tag_name__post: { parameters: { query?: never; header?: { @@ -33210,10 +33892,15 @@ export interface operations { }; path: { history_id: string; + tag_name: string; }; cookie?: never; }; - requestBody?: never; + requestBody?: { + content: { + "application/json": components["schemas"]["ItemTagsCreatePayload"]; + }; + }; responses: { /** @description Successful Response */ 200: { @@ -33221,7 +33908,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ItemTagsListResponse"]; + "application/json": components["schemas"]["ItemTagsResponse"]; }; }; /** @description Request Error */ @@ -33244,7 +33931,7 @@ export interface operations { }; }; }; - show_api_histories__history_id__tags__tag_name__get: { + delete_api_histories__history_id__tags__tag_name__delete: { parameters: { query?: never; header?: { @@ -33265,7 +33952,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ItemTagsResponse"]; + "application/json": boolean; }; }; /** @description Request Error */ @@ -33288,7 +33975,7 @@ export interface operations { }; }; }; - update_api_histories__history_id__tags__tag_name__put: { + tool_requests_api_histories__history_id__tool_requests_get: { parameters: { query?: never; header?: { @@ -33296,16 +33983,12 @@ export interface operations { "run-as"?: string | null; }; path: { + /** @description The encoded database identifier of the History. */ history_id: string; - tag_name: string; }; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["ItemTagsCreatePayload"]; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -33313,7 +33996,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ItemTagsResponse"]; + "application/json": components["schemas"]["ToolRequestModel"][]; }; }; /** @description Request Error */ @@ -33336,7 +34019,7 @@ export interface operations { }; }; }; - create_api_histories__history_id__tags__tag_name__post: { + unpublish_api_histories__history_id__unpublish_put: { parameters: { query?: never; header?: { @@ -33344,16 +34027,12 @@ export interface operations { "run-as"?: string | null; }; path: { + /** @description The encoded database identifier of the History. */ history_id: string; - tag_name: string; }; cookie?: never; }; - requestBody?: { - content: { - "application/json": components["schemas"]["ItemTagsCreatePayload"]; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -33361,7 +34040,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ItemTagsResponse"]; + "application/json": components["schemas"]["SharingStatus"]; }; }; /** @description Request Error */ @@ -33384,7 +34063,7 @@ export interface operations { }; }; }; - delete_api_histories__history_id__tags__tag_name__delete: { + write_store_api_histories__history_id__write_store_post: { parameters: { query?: never; header?: { @@ -33392,12 +34071,16 @@ export interface operations { "run-as"?: string | null; }; path: { + /** @description The encoded database identifier of the History. */ history_id: string; - tag_name: string; }; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "application/json": components["schemas"]["WriteStoreToPayload"]; + }; + }; responses: { /** @description Successful Response */ 200: { @@ -33405,7 +34088,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": boolean; + "application/json": components["schemas"]["AsyncTaskResultSummary"]; }; }; /** @description Request Error */ @@ -33428,17 +34111,40 @@ export interface operations { }; }; }; - tool_requests_api_histories__history_id__tool_requests_get: { + index_invocations_api_invocations_get: { parameters: { - query?: never; + query?: { + /** @description Return only invocations for this Workflow ID */ + workflow_id?: string | null; + /** @description Return only invocations for this History ID */ + history_id?: string | null; + /** @description Return only invocations for this Job ID */ + job_id?: string | null; + /** @description Return invocations for this User ID. */ + user_id?: string | null; + /** @description Sort Workflow Invocations by this attribute */ + sort_by?: components["schemas"]["InvocationSortByEnum"] | null; + /** @description Sort in descending order? */ + sort_desc?: boolean; + /** @description Set to false to only include terminal Invocations. */ + include_terminal?: boolean | null; + /** @description Limit the number of invocations to return. */ + limit?: number | null; + /** @description Number of invocations to skip. */ + offset?: number | null; + /** @description Is provided workflow id for Workflow instead of StoredWorkflow? */ + instance?: boolean | null; + /** @description View to be passed to the serializer */ + view?: string | null; + /** @description Include details for individual invocation steps and populate a steps attribute in the resulting dictionary. */ + step_details?: boolean; + include_nested_invocations?: boolean; + }; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; - path: { - /** @description The encoded database identifier of the History. */ - history_id: string; - }; + path?: never; cookie?: never; }; requestBody?: never; @@ -33449,7 +34155,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ToolRequestModel"][]; + "application/json": components["schemas"]["WorkflowInvocationResponse"][]; }; }; /** @description Request Error */ @@ -33472,20 +34178,21 @@ export interface operations { }; }; }; - unpublish_api_histories__history_id__unpublish_put: { + create_invocations_from_store_api_invocations_from_store_post: { parameters: { query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; - path: { - /** @description The encoded database identifier of the History. */ - history_id: string; - }; + path?: never; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "application/json": components["schemas"]["CreateInvocationsFromStorePayload"]; + }; + }; responses: { /** @description Successful Response */ 200: { @@ -33493,7 +34200,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["SharingStatus"]; + "application/json": components["schemas"]["WorkflowInvocationResponse"][]; }; }; /** @description Request Error */ @@ -33516,7 +34223,7 @@ export interface operations { }; }; }; - write_store_api_histories__history_id__write_store_post: { + step_api_invocations_steps__step_id__get: { parameters: { query?: never; header?: { @@ -33524,16 +34231,12 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the History. */ - history_id: string; + /** @description The encoded database identifier of the WorkflowInvocationStep. */ + step_id: string; }; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["WriteStoreToPayload"]; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -33541,7 +34244,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["AsyncTaskResultSummary"]; + "application/json": components["schemas"]["InvocationStep"]; }; }; /** @description Request Error */ @@ -33564,40 +34267,27 @@ export interface operations { }; }; }; - index_invocations_api_invocations_get: { + show_invocation_api_invocations__invocation_id__get: { parameters: { query?: { - /** @description Return only invocations for this Workflow ID */ - workflow_id?: string | null; - /** @description Return only invocations for this History ID */ - history_id?: string | null; - /** @description Return only invocations for this Job ID */ - job_id?: string | null; - /** @description Return invocations for this User ID. */ - user_id?: string | null; - /** @description Sort Workflow Invocations by this attribute */ - sort_by?: components["schemas"]["InvocationSortByEnum"] | null; - /** @description Sort in descending order? */ - sort_desc?: boolean; - /** @description Set to false to only include terminal Invocations. */ - include_terminal?: boolean | null; - /** @description Limit the number of invocations to return. */ - limit?: number | null; - /** @description Number of invocations to skip. */ - offset?: number | null; - /** @description Is provided workflow id for Workflow instead of StoredWorkflow? */ - instance?: boolean | null; - /** @description View to be passed to the serializer */ - view?: string | null; /** @description Include details for individual invocation steps and populate a steps attribute in the resulting dictionary. */ step_details?: boolean; - include_nested_invocations?: boolean; + /** + * @description Populate the invocation step state with the job state instead of the invocation step state. + * This will also produce one step per job in mapping jobs to mimic the older behavior with respect to collections. + * Partially scheduled steps may provide incomplete information and the listed steps outputs + * are not the mapped over step outputs but the individual job outputs. + */ + legacy_job_state?: boolean; }; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; - path?: never; + path: { + /** @description The encoded database identifier of the Invocation. */ + invocation_id: string; + }; cookie?: never; }; requestBody?: never; @@ -33608,7 +34298,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["WorkflowInvocationResponse"][]; + "application/json": components["schemas"]["WorkflowInvocationResponse"]; }; }; /** @description Request Error */ @@ -33631,21 +34321,30 @@ export interface operations { }; }; }; - create_invocations_from_store_api_invocations_from_store_post: { + cancel_invocation_api_invocations__invocation_id__delete: { parameters: { - query?: never; + query?: { + /** @description Include details for individual invocation steps and populate a steps attribute in the resulting dictionary. */ + step_details?: boolean; + /** + * @description Populate the invocation step state with the job state instead of the invocation step state. + * This will also produce one step per job in mapping jobs to mimic the older behavior with respect to collections. + * Partially scheduled steps may provide incomplete information and the listed steps outputs + * are not the mapped over step outputs but the individual job outputs. + */ + legacy_job_state?: boolean; + }; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; - path?: never; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["CreateInvocationsFromStorePayload"]; + path: { + /** @description The encoded database identifier of the Invocation. */ + invocation_id: string; }; + cookie?: never; }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -33653,7 +34352,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["WorkflowInvocationResponse"][]; + "application/json": components["schemas"]["WorkflowInvocationResponse"]; }; }; /** @description Request Error */ @@ -33676,7 +34375,7 @@ export interface operations { }; }; }; - step_api_invocations_steps__step_id__get: { + report_error_api_invocations__invocation_id__error_post: { parameters: { query?: never; header?: { @@ -33684,21 +34383,23 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the WorkflowInvocationStep. */ - step_id: string; + /** @description The encoded database identifier of the Invocation. */ + invocation_id: string; }; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "application/json": components["schemas"]["ReportInvocationErrorPayload"]; + }; + }; responses: { /** @description Successful Response */ - 200: { + 204: { headers: { [name: string]: unknown; }; - content: { - "application/json": components["schemas"]["InvocationStep"]; - }; + content?: never; }; /** @description Request Error */ "4XX": { @@ -33720,19 +34421,9 @@ export interface operations { }; }; }; - show_invocation_api_invocations__invocation_id__get: { + invocation_jobs_summary_api_invocations__invocation_id__jobs_summary_get: { parameters: { - query?: { - /** @description Include details for individual invocation steps and populate a steps attribute in the resulting dictionary. */ - step_details?: boolean; - /** - * @description Populate the invocation step state with the job state instead of the invocation step state. - * This will also produce one step per job in mapping jobs to mimic the older behavior with respect to collections. - * Partially scheduled steps may provide incomplete information and the listed steps outputs - * are not the mapped over step outputs but the individual job outputs. - */ - legacy_job_state?: boolean; - }; + query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; @@ -33751,7 +34442,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["WorkflowInvocationResponse"]; + "application/json": components["schemas"]["InvocationJobsResponse"]; }; }; /** @description Request Error */ @@ -33774,19 +34465,9 @@ export interface operations { }; }; }; - cancel_invocation_api_invocations__invocation_id__delete: { + get_invocation_metrics_api_invocations__invocation_id__metrics_get: { parameters: { - query?: { - /** @description Include details for individual invocation steps and populate a steps attribute in the resulting dictionary. */ - step_details?: boolean; - /** - * @description Populate the invocation step state with the job state instead of the invocation step state. - * This will also produce one step per job in mapping jobs to mimic the older behavior with respect to collections. - * Partially scheduled steps may provide incomplete information and the listed steps outputs - * are not the mapped over step outputs but the individual job outputs. - */ - legacy_job_state?: boolean; - }; + query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; @@ -33805,7 +34486,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["WorkflowInvocationResponse"]; + "application/json": components["schemas"]["WorkflowJobMetric"][]; }; }; /** @description Request Error */ @@ -33828,7 +34509,7 @@ export interface operations { }; }; }; - report_error_api_invocations__invocation_id__error_post: { + prepare_store_download_api_invocations__invocation_id__prepare_store_download_post: { parameters: { query?: never; header?: { @@ -33843,16 +34524,18 @@ export interface operations { }; requestBody: { content: { - "application/json": components["schemas"]["ReportInvocationErrorPayload"]; + "application/json": components["schemas"]["PrepareStoreDownloadPayload"]; }; }; responses: { /** @description Successful Response */ - 204: { + 200: { headers: { [name: string]: unknown; }; - content?: never; + content: { + "application/json": components["schemas"]["AsyncFile"]; + }; }; /** @description Request Error */ "4XX": { @@ -33874,7 +34557,7 @@ export interface operations { }; }; }; - invocation_jobs_summary_api_invocations__invocation_id__jobs_summary_get: { + show_invocation_report_api_invocations__invocation_id__report_get: { parameters: { query?: never; header?: { @@ -33895,7 +34578,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["InvocationJobsResponse"]; + "application/json": components["schemas"]["InvocationReport"]; }; }; /** @description Request Error */ @@ -33918,7 +34601,7 @@ export interface operations { }; }; }; - get_invocation_metrics_api_invocations__invocation_id__metrics_get: { + show_invocation_report_pdf_api_invocations__invocation_id__report_pdf_get: { parameters: { query?: never; header?: { @@ -33938,9 +34621,7 @@ export interface operations { headers: { [name: string]: unknown; }; - content: { - "application/json": components["schemas"]["WorkflowJobMetric"][]; - }; + content?: never; }; /** @description Request Error */ "4XX": { @@ -33962,7 +34643,7 @@ export interface operations { }; }; }; - prepare_store_download_api_invocations__invocation_id__prepare_store_download_post: { + invocation_as_request_api_invocations__invocation_id__request_get: { parameters: { query?: never; header?: { @@ -33975,11 +34656,7 @@ export interface operations { }; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["PrepareStoreDownloadPayload"]; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -33987,7 +34664,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["AsyncFile"]; + "application/json": components["schemas"]["WorkflowInvocationRequestModel"]; }; }; /** @description Request Error */ @@ -34010,7 +34687,7 @@ export interface operations { }; }; }; - show_invocation_report_api_invocations__invocation_id__report_get: { + invocation_step_jobs_summary_api_invocations__invocation_id__step_jobs_summary_get: { parameters: { query?: never; header?: { @@ -34031,7 +34708,11 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["InvocationReport"]; + "application/json": ( + | components["schemas"]["InvocationStepJobsResponseStepModel"] + | components["schemas"]["InvocationStepJobsResponseJobModel"] + | components["schemas"]["InvocationStepJobsResponseCollectionJobsModel"] + )[]; }; }; /** @description Request Error */ @@ -34054,7 +34735,7 @@ export interface operations { }; }; }; - show_invocation_report_pdf_api_invocations__invocation_id__report_pdf_get: { + invocation_step_api_invocations__invocation_id__steps__step_id__get: { parameters: { query?: never; header?: { @@ -34064,6 +34745,8 @@ export interface operations { path: { /** @description The encoded database identifier of the Invocation. */ invocation_id: string; + /** @description The encoded database identifier of the WorkflowInvocationStep. */ + step_id: string; }; cookie?: never; }; @@ -34074,7 +34757,9 @@ export interface operations { headers: { [name: string]: unknown; }; - content?: never; + content: { + "application/json": components["schemas"]["InvocationStep"]; + }; }; /** @description Request Error */ "4XX": { @@ -34096,7 +34781,7 @@ export interface operations { }; }; }; - invocation_as_request_api_invocations__invocation_id__request_get: { + update_invocation_step_api_invocations__invocation_id__steps__step_id__put: { parameters: { query?: never; header?: { @@ -34106,10 +34791,16 @@ export interface operations { path: { /** @description The encoded database identifier of the Invocation. */ invocation_id: string; + /** @description The encoded database identifier of the WorkflowInvocationStep. */ + step_id: string; }; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "application/json": components["schemas"]["InvocationUpdatePayload"]; + }; + }; responses: { /** @description Successful Response */ 200: { @@ -34117,7 +34808,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["WorkflowInvocationRequestModel"]; + "application/json": components["schemas"]["InvocationStep"]; }; }; /** @description Request Error */ @@ -34140,7 +34831,7 @@ export interface operations { }; }; }; - invocation_step_jobs_summary_api_invocations__invocation_id__step_jobs_summary_get: { + write_store_api_invocations__invocation_id__write_store_post: { parameters: { query?: never; header?: { @@ -34153,7 +34844,11 @@ export interface operations { }; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "application/json": components["schemas"]["WriteInvocationStoreToPayload"]; + }; + }; responses: { /** @description Successful Response */ 200: { @@ -34161,11 +34856,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": ( - | components["schemas"]["InvocationStepJobsResponseStepModel"] - | components["schemas"]["InvocationStepJobsResponseJobModel"] - | components["schemas"]["InvocationStepJobsResponseCollectionJobsModel"] - )[]; + "application/json": components["schemas"]["AsyncTaskResultSummary"]; }; }; /** @description Request Error */ @@ -34188,19 +34879,14 @@ export interface operations { }; }; }; - invocation_step_api_invocations__invocation_id__steps__step_id__get: { + job_lock_status_api_job_lock_get: { parameters: { query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; - path: { - /** @description The encoded database identifier of the Invocation. */ - invocation_id: string; - /** @description The encoded database identifier of the WorkflowInvocationStep. */ - step_id: string; - }; + path?: never; cookie?: never; }; requestBody?: never; @@ -34211,7 +34897,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["InvocationStep"]; + "application/json": components["schemas"]["JobLock"]; }; }; /** @description Request Error */ @@ -34234,24 +34920,19 @@ export interface operations { }; }; }; - update_invocation_step_api_invocations__invocation_id__steps__step_id__put: { + update_job_lock_api_job_lock_put: { parameters: { query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; - path: { - /** @description The encoded database identifier of the Invocation. */ - invocation_id: string; - /** @description The encoded database identifier of the WorkflowInvocationStep. */ - step_id: string; - }; + path?: never; cookie?: never; }; requestBody: { content: { - "application/json": components["schemas"]["InvocationUpdatePayload"]; + "application/json": components["schemas"]["JobLock"]; }; }; responses: { @@ -34261,7 +34942,122 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["InvocationStep"]; + "application/json": components["schemas"]["JobLock"]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + index_api_jobs_get: { + parameters: { + query?: { + /** @description If true, and requester is an admin, will return external job id and user email. This is only available to admins. */ + user_details?: boolean; + /** @description an encoded user id to restrict query to, must be own id if not admin user */ + user_id?: string | null; + /** @description Determines columns to return. Defaults to 'collection'. */ + view?: components["schemas"]["JobIndexViewEnum"]; + /** @description Limit listing of jobs to those that are updated after specified date (e.g. '2014-01-01') */ + date_range_min?: string | null; + /** @description Limit listing of jobs to those that are updated before specified date (e.g. '2014-01-01') */ + date_range_max?: string | null; + /** @description Limit listing of jobs to those that match the history_id. If none, jobs from any history may be returned. */ + history_id?: string | null; + /** @description Limit listing of jobs to those that match the specified workflow ID. If none, jobs from any workflow (or from no workflows) may be returned. */ + workflow_id?: string | null; + /** @description Limit listing of jobs to those that match the specified workflow invocation ID. If none, jobs from any workflow invocation (or from no workflows) may be returned. */ + invocation_id?: string | null; + /** @description Limit listing of jobs to those that match the specified implicit collection job ID. If none, jobs from any implicit collection execution (or from no implicit collection execution) may be returned. */ + implicit_collection_jobs_id?: string | null; + /** @description Limit listing of jobs to those that were created from the supplied tool request ID. If none, jobs from any tool request (or from no workflows) may be returned. */ + tool_request_id?: string | null; + /** @description Sort results by specified field. */ + order_by?: components["schemas"]["JobIndexSortByEnum"]; + /** + * @description A mix of free text and GitHub-style tags used to filter the index operation. + * + * ## Query Structure + * + * GitHub-style filter tags (not be confused with Galaxy tags) are tags of the form + * `:` or `:''`. The tag name + * *generally* (but not exclusively) corresponds to the name of an attribute on the model + * being indexed (i.e. a column in the database). + * + * If the tag is quoted, the attribute will be filtered exactly. If the tag is unquoted, + * generally a partial match will be used to filter the query (i.e. in terms of the implementation + * this means the database operation `ILIKE` will typically be used). + * + * Once the tagged filters are extracted from the search query, the remaining text is just + * used to search various documented attributes of the object. + * + * ## GitHub-style Tags Available + * + * `user` + * : The user email of the user that executed the Job. (The tag `u` can be used a short hand alias for this tag to filter on this attribute.) + * + * `tool_id` + * : The tool ID corresponding to the job. (The tag `t` can be used a short hand alias for this tag to filter on this attribute.) + * + * `runner` + * : The job runner name used to execute the job. (The tag `r` can be used a short hand alias for this tag to filter on this attribute.) This tag is only available for requests using admin keys and/or sessions. + * + * `handler` + * : The job handler name used to execute the job. (The tag `h` can be used a short hand alias for this tag to filter on this attribute.) This tag is only available for requests using admin keys and/or sessions. + * + * ## Free Text + * + * Free text search terms will be searched against the following attributes of the + * Jobs: `user`, `tool`, `handler`, `runner`. + */ + search?: string | null; + /** @description Maximum number of jobs to return. */ + limit?: number; + /** @description Return jobs starting from this specified position. For example, if ``limit`` is set to 100 and ``offset`` to 200, jobs 200-299 will be returned. */ + offset?: number; + /** @description A list or comma-separated list of states to filter job query on. If unspecified, jobs of any state may be returned. */ + state?: string[] | null; + /** @description Limit listing of jobs to those that match one of the included tool_ids. If none, all are returned */ + tool_id?: string[] | null; + /** @description Limit listing of jobs to those that match one of the included tool ID sql-like patterns. If none, all are returned */ + tool_id_like?: string[] | null; + }; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": ( + | components["schemas"]["ShowFullJobResponse"] + | components["schemas"]["EncodedJobDetails"] + | components["schemas"]["JobSummary"] + )[]; }; }; /** @description Request Error */ @@ -34284,65 +35080,21 @@ export interface operations { }; }; }; - write_store_api_invocations__invocation_id__write_store_post: { + create_api_jobs_post: { parameters: { query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; - path: { - /** @description The encoded database identifier of the Invocation. */ - invocation_id: string; - }; + path?: never; cookie?: never; }; requestBody: { content: { - "application/json": components["schemas"]["WriteInvocationStoreToPayload"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["AsyncTaskResultSummary"]; - }; - }; - /** @description Request Error */ - "4XX": { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["MessageExceptionModel"]; - }; - }; - /** @description Server Error */ - "5XX": { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["MessageExceptionModel"]; - }; - }; - }; - }; - job_lock_status_api_job_lock_get: { - parameters: { - query?: never; - header?: { - /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ - "run-as"?: string | null; + "application/json": components["schemas"]["JobRequest"]; }; - path?: never; - cookie?: never; }; - requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -34350,7 +35102,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["JobLock"]; + "application/json": components["schemas"]["JobCreateResponse"]; }; }; /** @description Request Error */ @@ -34373,7 +35125,7 @@ export interface operations { }; }; }; - update_job_lock_api_job_lock_put: { + search_jobs_api_jobs_search_post: { parameters: { query?: never; header?: { @@ -34385,7 +35137,7 @@ export interface operations { }; requestBody: { content: { - "application/json": components["schemas"]["JobLock"]; + "application/json": components["schemas"]["SearchJobsPayload"]; }; }; responses: { @@ -34395,7 +35147,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["JobLock"]; + "application/json": components["schemas"]["EncodedJobDetails"][]; }; }; /** @description Request Error */ @@ -34418,84 +35170,20 @@ export interface operations { }; }; }; - index_api_jobs_get: { + show_job_api_jobs__job_id__get: { parameters: { query?: { - /** @description If true, and requester is an admin, will return external job id and user email. This is only available to admins. */ - user_details?: boolean; - /** @description an encoded user id to restrict query to, must be own id if not admin user */ - user_id?: string | null; - /** @description Determines columns to return. Defaults to 'collection'. */ - view?: components["schemas"]["JobIndexViewEnum"]; - /** @description Limit listing of jobs to those that are updated after specified date (e.g. '2014-01-01') */ - date_range_min?: string | null; - /** @description Limit listing of jobs to those that are updated before specified date (e.g. '2014-01-01') */ - date_range_max?: string | null; - /** @description Limit listing of jobs to those that match the history_id. If none, jobs from any history may be returned. */ - history_id?: string | null; - /** @description Limit listing of jobs to those that match the specified workflow ID. If none, jobs from any workflow (or from no workflows) may be returned. */ - workflow_id?: string | null; - /** @description Limit listing of jobs to those that match the specified workflow invocation ID. If none, jobs from any workflow invocation (or from no workflows) may be returned. */ - invocation_id?: string | null; - /** @description Limit listing of jobs to those that match the specified implicit collection job ID. If none, jobs from any implicit collection execution (or from no implicit collection execution) may be returned. */ - implicit_collection_jobs_id?: string | null; - /** @description Limit listing of jobs to those that were created from the supplied tool request ID. If none, jobs from any tool request (or from no workflows) may be returned. */ - tool_request_id?: string | null; - /** @description Sort results by specified field. */ - order_by?: components["schemas"]["JobIndexSortByEnum"]; - /** - * @description A mix of free text and GitHub-style tags used to filter the index operation. - * - * ## Query Structure - * - * GitHub-style filter tags (not be confused with Galaxy tags) are tags of the form - * `:` or `:''`. The tag name - * *generally* (but not exclusively) corresponds to the name of an attribute on the model - * being indexed (i.e. a column in the database). - * - * If the tag is quoted, the attribute will be filtered exactly. If the tag is unquoted, - * generally a partial match will be used to filter the query (i.e. in terms of the implementation - * this means the database operation `ILIKE` will typically be used). - * - * Once the tagged filters are extracted from the search query, the remaining text is just - * used to search various documented attributes of the object. - * - * ## GitHub-style Tags Available - * - * `user` - * : The user email of the user that executed the Job. (The tag `u` can be used a short hand alias for this tag to filter on this attribute.) - * - * `tool_id` - * : The tool ID corresponding to the job. (The tag `t` can be used a short hand alias for this tag to filter on this attribute.) - * - * `runner` - * : The job runner name used to execute the job. (The tag `r` can be used a short hand alias for this tag to filter on this attribute.) This tag is only available for requests using admin keys and/or sessions. - * - * `handler` - * : The job handler name used to execute the job. (The tag `h` can be used a short hand alias for this tag to filter on this attribute.) This tag is only available for requests using admin keys and/or sessions. - * - * ## Free Text - * - * Free text search terms will be searched against the following attributes of the - * Jobs: `user`, `tool`, `handler`, `runner`. - */ - search?: string | null; - /** @description Maximum number of jobs to return. */ - limit?: number; - /** @description Return jobs starting from this specified position. For example, if ``limit`` is set to 100 and ``offset`` to 200, jobs 200-299 will be returned. */ - offset?: number; - /** @description A list or comma-separated list of states to filter job query on. If unspecified, jobs of any state may be returned. */ - state?: string[] | null; - /** @description Limit listing of jobs to those that match one of the included tool_ids. If none, all are returned */ - tool_id?: string[] | null; - /** @description Limit listing of jobs to those that match one of the included tool ID sql-like patterns. If none, all are returned */ - tool_id_like?: string[] | null; + /** @description Show extra information. */ + full?: boolean | null; }; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; - path?: never; + path: { + /** @description The ID of the job */ + job_id: string; + }; cookie?: never; }; requestBody?: never; @@ -34506,11 +35194,9 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": ( + "application/json": | components["schemas"]["ShowFullJobResponse"] - | components["schemas"]["EncodedJobDetails"] - | components["schemas"]["JobSummary"] - )[]; + | components["schemas"]["EncodedJobDetails"]; }; }; /** @description Request Error */ @@ -34533,19 +35219,22 @@ export interface operations { }; }; }; - create_api_jobs_post: { + cancel_job_api_jobs__job_id__delete: { parameters: { query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; - path?: never; + path: { + /** @description The ID of the job */ + job_id: string; + }; cookie?: never; }; - requestBody: { + requestBody?: { content: { - "application/json": components["schemas"]["JobRequest"]; + "application/json": components["schemas"]["DeleteJobPayload"] | null; }; }; responses: { @@ -34555,7 +35244,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["JobCreateResponse"]; + "application/json": boolean; }; }; /** @description Request Error */ @@ -34578,21 +35267,20 @@ export interface operations { }; }; }; - search_jobs_api_jobs_search_post: { + check_common_problems_api_jobs__job_id__common_problems_get: { parameters: { query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; - path?: never; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["SearchJobsPayload"]; + path: { + /** @description The ID of the job */ + job_id: string; }; + cookie?: never; }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -34600,7 +35288,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["EncodedJobDetails"][]; + "application/json": components["schemas"]["JobInputSummary"]; }; }; /** @description Request Error */ @@ -34623,11 +35311,13 @@ export interface operations { }; }; }; - show_job_api_jobs__job_id__get: { + get_console_output_api_jobs__job_id__console_output_get: { parameters: { - query?: { - /** @description Show extra information. */ - full?: boolean | null; + query: { + stdout_position: number; + stdout_length: number; + stderr_position: number; + stderr_length: number; }; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ @@ -34647,9 +35337,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": - | components["schemas"]["ShowFullJobResponse"] - | components["schemas"]["EncodedJobDetails"]; + "application/json": components["schemas"]["JobConsoleOutput"]; }; }; /** @description Request Error */ @@ -34672,7 +35360,7 @@ export interface operations { }; }; }; - cancel_job_api_jobs__job_id__delete: { + destination_params_job_api_jobs__job_id__destination_params_get: { parameters: { query?: never; header?: { @@ -34685,11 +35373,7 @@ export interface operations { }; cookie?: never; }; - requestBody?: { - content: { - "application/json": components["schemas"]["DeleteJobPayload"] | null; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -34697,7 +35381,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": boolean; + "application/json": components["schemas"]["JobDestinationParams"]; }; }; /** @description Request Error */ @@ -34720,7 +35404,7 @@ export interface operations { }; }; }; - check_common_problems_api_jobs__job_id__common_problems_get: { + report_error_api_jobs__job_id__error_post: { parameters: { query?: never; header?: { @@ -34733,7 +35417,11 @@ export interface operations { }; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "application/json": components["schemas"]["ReportJobErrorPayload"]; + }; + }; responses: { /** @description Successful Response */ 200: { @@ -34741,7 +35429,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["JobInputSummary"]; + "application/json": components["schemas"]["JobErrorSummary"]; }; }; /** @description Request Error */ @@ -34764,14 +35452,9 @@ export interface operations { }; }; }; - get_console_output_api_jobs__job_id__console_output_get: { + get_inputs_api_jobs__job_id__inputs_get: { parameters: { - query: { - stdout_position: number; - stdout_length: number; - stderr_position: number; - stderr_length: number; - }; + query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; @@ -34790,7 +35473,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["JobConsoleOutput"]; + "application/json": components["schemas"]["JobInputAssociation"][]; }; }; /** @description Request Error */ @@ -34813,9 +35496,15 @@ export interface operations { }; }; }; - destination_params_job_api_jobs__job_id__destination_params_get: { + get_metrics_api_jobs__job_id__metrics_get: { parameters: { - query?: never; + query?: { + /** + * @deprecated + * @description Whether this dataset belongs to a history (HDA) or a library (LDDA). + */ + hda_ldda?: components["schemas"]["DatasetSourceType"] | null; + }; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; @@ -34834,7 +35523,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["JobDestinationParams"]; + "application/json": (components["schemas"]["JobMetric"] | null)[]; }; }; /** @description Request Error */ @@ -34857,24 +35546,24 @@ export interface operations { }; }; }; - report_error_api_jobs__job_id__error_post: { + get_token_api_jobs__job_id__oidc_tokens_get: { parameters: { - query?: never; + query: { + /** @description A key used to authenticate this request as acting on behalf or a job runner for the specified job */ + job_key: string; + /** @description OIDC provider name */ + provider: string; + }; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; path: { - /** @description The ID of the job */ job_id: string; }; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["ReportJobErrorPayload"]; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -34882,7 +35571,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["JobErrorSummary"]; + "text/plain": string; }; }; /** @description Request Error */ @@ -34891,7 +35580,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["MessageExceptionModel"]; + "text/plain": components["schemas"]["MessageExceptionModel"]; }; }; /** @description Server Error */ @@ -34900,12 +35589,12 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["MessageExceptionModel"]; + "text/plain": components["schemas"]["MessageExceptionModel"]; }; }; }; }; - get_inputs_api_jobs__job_id__inputs_get: { + get_outputs_api_jobs__job_id__outputs_get: { parameters: { query?: never; header?: { @@ -34926,7 +35615,10 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["JobInputAssociation"][]; + "application/json": ( + | components["schemas"]["JobOutputAssociation"] + | components["schemas"]["JobOutputCollectionAssociation"] + )[]; }; }; /** @description Request Error */ @@ -34949,7 +35641,7 @@ export interface operations { }; }; }; - get_metrics_api_jobs__job_id__metrics_get: { + resolve_parameters_display_api_jobs__job_id__parameters_display_get: { parameters: { query?: { /** @@ -34976,7 +35668,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": (components["schemas"]["JobMetric"] | null)[]; + "application/json": components["schemas"]["JobDisplayParametersSummary"]; }; }; /** @description Request Error */ @@ -34999,55 +35691,7 @@ export interface operations { }; }; }; - get_token_api_jobs__job_id__oidc_tokens_get: { - parameters: { - query: { - /** @description A key used to authenticate this request as acting on behalf or a job runner for the specified job */ - job_key: string; - /** @description OIDC provider name */ - provider: string; - }; - header?: { - /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ - "run-as"?: string | null; - }; - path: { - job_id: string; - }; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "text/plain": string; - }; - }; - /** @description Request Error */ - "4XX": { - headers: { - [name: string]: unknown; - }; - content: { - "text/plain": components["schemas"]["MessageExceptionModel"]; - }; - }; - /** @description Server Error */ - "5XX": { - headers: { - [name: string]: unknown; - }; - content: { - "text/plain": components["schemas"]["MessageExceptionModel"]; - }; - }; - }; - }; - get_outputs_api_jobs__job_id__outputs_get: { + resume_paused_job_api_jobs__job_id__resume_put: { parameters: { query?: never; header?: { @@ -35068,10 +35712,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": ( - | components["schemas"]["JobOutputAssociation"] - | components["schemas"]["JobOutputCollectionAssociation"] - )[]; + "application/json": components["schemas"]["JobOutputAssociation"][]; }; }; /** @description Request Error */ @@ -35094,15 +35735,9 @@ export interface operations { }; }; }; - resolve_parameters_display_api_jobs__job_id__parameters_display_get: { + get_job_stderr_api_jobs__job_id__stderr_get: { parameters: { - query?: { - /** - * @deprecated - * @description Whether this dataset belongs to a history (HDA) or a library (LDDA). - */ - hda_ldda?: components["schemas"]["DatasetSourceType"] | null; - }; + query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; @@ -35121,7 +35756,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["JobDisplayParametersSummary"]; + "application/json": unknown; }; }; /** @description Request Error */ @@ -35144,7 +35779,7 @@ export interface operations { }; }; }; - resume_paused_job_api_jobs__job_id__resume_put: { + get_job_stdout_api_jobs__job_id__stdout_get: { parameters: { query?: never; header?: { @@ -35165,7 +35800,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["JobOutputAssociation"][]; + "application/json": unknown; }; }; /** @description Request Error */ @@ -44206,26 +44841,207 @@ export interface operations { }; }; }; - refactor_api_workflows__workflow_id__refactor_put: { + refactor_api_workflows__workflow_id__refactor_put: { + parameters: { + query?: { + instance?: boolean | null; + }; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The encoded database identifier of the Stored Workflow. */ + workflow_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["RefactorRequest"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["RefactorResponse"]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + share_with_users_api_workflows__workflow_id__share_with_users_put: { + parameters: { + query?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The encoded database identifier of the Stored Workflow. */ + workflow_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["ShareWithPayload"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ShareWithStatus"]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + sharing_api_workflows__workflow_id__sharing_get: { + parameters: { + query?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The encoded database identifier of the Stored Workflow. */ + workflow_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["SharingStatus"]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + set_slug_api_workflows__workflow_id__slug_put: { + parameters: { + query?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The encoded database identifier of the Stored Workflow. */ + workflow_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["SetSlugPayload"]; + }; + }; + responses: { + /** @description Successful Response */ + 204: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + index_api_workflows__workflow_id__tags_get: { parameters: { - query?: { - instance?: boolean | null; - }; + query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the Stored Workflow. */ workflow_id: string; }; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["RefactorRequest"]; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -44233,7 +45049,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["RefactorResponse"]; + "application/json": components["schemas"]["ItemTagsListResponse"]; }; }; /** @description Request Error */ @@ -44256,7 +45072,7 @@ export interface operations { }; }; }; - share_with_users_api_workflows__workflow_id__share_with_users_put: { + show_api_workflows__workflow_id__tags__tag_name__get: { parameters: { query?: never; header?: { @@ -44264,16 +45080,12 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the Stored Workflow. */ workflow_id: string; + tag_name: string; }; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["ShareWithPayload"]; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -44281,7 +45093,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ShareWithStatus"]; + "application/json": components["schemas"]["ItemTagsResponse"]; }; }; /** @description Request Error */ @@ -44304,7 +45116,7 @@ export interface operations { }; }; }; - sharing_api_workflows__workflow_id__sharing_get: { + update_api_workflows__workflow_id__tags__tag_name__put: { parameters: { query?: never; header?: { @@ -44312,12 +45124,16 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the Stored Workflow. */ workflow_id: string; + tag_name: string; }; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "application/json": components["schemas"]["ItemTagsCreatePayload"]; + }; + }; responses: { /** @description Successful Response */ 200: { @@ -44325,7 +45141,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["SharingStatus"]; + "application/json": components["schemas"]["ItemTagsResponse"]; }; }; /** @description Request Error */ @@ -44348,7 +45164,7 @@ export interface operations { }; }; }; - set_slug_api_workflows__workflow_id__slug_put: { + create_api_workflows__workflow_id__tags__tag_name__post: { parameters: { query?: never; header?: { @@ -44356,23 +45172,25 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the Stored Workflow. */ workflow_id: string; + tag_name: string; }; cookie?: never; }; - requestBody: { + requestBody?: { content: { - "application/json": components["schemas"]["SetSlugPayload"]; + "application/json": components["schemas"]["ItemTagsCreatePayload"]; }; }; responses: { /** @description Successful Response */ - 204: { + 200: { headers: { [name: string]: unknown; }; - content?: never; + content: { + "application/json": components["schemas"]["ItemTagsResponse"]; + }; }; /** @description Request Error */ "4XX": { @@ -44394,7 +45212,7 @@ export interface operations { }; }; }; - index_api_workflows__workflow_id__tags_get: { + delete_api_workflows__workflow_id__tags__tag_name__delete: { parameters: { query?: never; header?: { @@ -44403,6 +45221,7 @@ export interface operations { }; path: { workflow_id: string; + tag_name: string; }; cookie?: never; }; @@ -44414,7 +45233,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ItemTagsListResponse"]; + "application/json": boolean; }; }; /** @description Request Error */ @@ -44437,7 +45256,7 @@ export interface operations { }; }; }; - show_api_workflows__workflow_id__tags__tag_name__get: { + undelete_workflow_api_workflows__workflow_id__undelete_post: { parameters: { query?: never; header?: { @@ -44445,8 +45264,8 @@ export interface operations { "run-as"?: string | null; }; path: { + /** @description The encoded database identifier of the Stored Workflow. */ workflow_id: string; - tag_name: string; }; cookie?: never; }; @@ -44458,7 +45277,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ItemTagsResponse"]; + "application/json": unknown; }; }; /** @description Request Error */ @@ -44481,7 +45300,7 @@ export interface operations { }; }; }; - update_api_workflows__workflow_id__tags__tag_name__put: { + unpublish_api_workflows__workflow_id__unpublish_put: { parameters: { query?: never; header?: { @@ -44489,16 +45308,79 @@ export interface operations { "run-as"?: string | null; }; path: { + /** @description The encoded database identifier of the Stored Workflow. */ workflow_id: string; - tag_name: string; }; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["ItemTagsCreatePayload"]; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["SharingStatus"]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + index_invocations_api_workflows__workflow_id__usage_get: { + parameters: { + query?: { + /** @description Return only invocations for this History ID */ + history_id?: string | null; + /** @description Return only invocations for this Job ID */ + job_id?: string | null; + /** @description Return invocations for this User ID. */ + user_id?: string | null; + /** @description Sort Workflow Invocations by this attribute */ + sort_by?: components["schemas"]["InvocationSortByEnum"] | null; + /** @description Sort in descending order? */ + sort_desc?: boolean; + /** @description Set to false to only include terminal Invocations. */ + include_terminal?: boolean | null; + /** @description Limit the number of invocations to return. */ + limit?: number | null; + /** @description Number of invocations to skip. */ + offset?: number | null; + /** @description Is provided workflow id for Workflow instead of StoredWorkflow? */ + instance?: boolean | null; + /** @description View to be passed to the serializer */ + view?: string | null; + /** @description Include details for individual invocation steps and populate a steps attribute in the resulting dictionary. */ + step_details?: boolean; + }; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The encoded database identifier of the Stored Workflow. */ + workflow_id: string; }; + cookie?: never; }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -44506,7 +45388,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ItemTagsResponse"]; + "application/json": components["schemas"]["WorkflowInvocationResponse"][]; }; }; /** @description Request Error */ @@ -44529,7 +45411,7 @@ export interface operations { }; }; }; - create_api_workflows__workflow_id__tags__tag_name__post: { + Invoke_workflow_api_workflows__workflow_id__usage_post: { parameters: { query?: never; header?: { @@ -44537,14 +45419,14 @@ export interface operations { "run-as"?: string | null; }; path: { + /** @description The database identifier - UUID or encoded - of the Workflow. */ workflow_id: string; - tag_name: string; }; cookie?: never; }; - requestBody?: { + requestBody: { content: { - "application/json": components["schemas"]["ItemTagsCreatePayload"]; + "application/json": components["schemas"]["InvokeWorkflowPayload"]; }; }; responses: { @@ -44554,7 +45436,9 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ItemTagsResponse"]; + "application/json": + | components["schemas"]["WorkflowInvocationResponse"] + | components["schemas"]["WorkflowInvocationResponse"][]; }; }; /** @description Request Error */ @@ -44577,16 +45461,28 @@ export interface operations { }; }; }; - delete_api_workflows__workflow_id__tags__tag_name__delete: { + show_workflow_invocation_api_workflows__workflow_id__usage__invocation_id__get: { parameters: { - query?: never; + query?: { + /** @description Include details for individual invocation steps and populate a steps attribute in the resulting dictionary. */ + step_details?: boolean; + /** + * @description Populate the invocation step state with the job state instead of the invocation step state. + * This will also produce one step per job in mapping jobs to mimic the older behavior with respect to collections. + * Partially scheduled steps may provide incomplete information and the listed steps outputs + * are not the mapped over step outputs but the individual job outputs. + */ + legacy_job_state?: boolean; + }; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; path: { + /** @description The encoded database identifier of the Stored Workflow. */ workflow_id: string; - tag_name: string; + /** @description The encoded database identifier of the Invocation. */ + invocation_id: string; }; cookie?: never; }; @@ -44598,7 +45494,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": boolean; + "application/json": components["schemas"]["WorkflowInvocationResponse"]; }; }; /** @description Request Error */ @@ -44621,14 +45517,26 @@ export interface operations { }; }; }; - undelete_workflow_api_workflows__workflow_id__undelete_post: { + cancel_workflow_invocation_api_workflows__workflow_id__usage__invocation_id__delete: { parameters: { - query?: never; + query?: { + /** @description Include details for individual invocation steps and populate a steps attribute in the resulting dictionary. */ + step_details?: boolean; + /** + * @description Populate the invocation step state with the job state instead of the invocation step state. + * This will also produce one step per job in mapping jobs to mimic the older behavior with respect to collections. + * Partially scheduled steps may provide incomplete information and the listed steps outputs + * are not the mapped over step outputs but the individual job outputs. + */ + legacy_job_state?: boolean; + }; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; path: { + /** @description The encoded database identifier of the Invocation. */ + invocation_id: string; /** @description The encoded database identifier of the Stored Workflow. */ workflow_id: string; }; @@ -44642,7 +45550,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": unknown; + "application/json": components["schemas"]["WorkflowInvocationResponse"]; }; }; /** @description Request Error */ @@ -44665,7 +45573,7 @@ export interface operations { }; }; }; - unpublish_api_workflows__workflow_id__unpublish_put: { + workflow_invocation_jobs_summary_api_workflows__workflow_id__usage__invocation_id__jobs_summary_get: { parameters: { query?: never; header?: { @@ -44675,6 +45583,8 @@ export interface operations { path: { /** @description The encoded database identifier of the Stored Workflow. */ workflow_id: string; + /** @description The encoded database identifier of the Invocation. */ + invocation_id: string; }; cookie?: never; }; @@ -44686,7 +45596,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["SharingStatus"]; + "application/json": components["schemas"]["InvocationJobsResponse"]; }; }; /** @description Request Error */ @@ -44709,37 +45619,16 @@ export interface operations { }; }; }; - index_invocations_api_workflows__workflow_id__usage_get: { + show_workflow_invocation_report_api_workflows__workflow_id__usage__invocation_id__report_get: { parameters: { - query?: { - /** @description Return only invocations for this History ID */ - history_id?: string | null; - /** @description Return only invocations for this Job ID */ - job_id?: string | null; - /** @description Return invocations for this User ID. */ - user_id?: string | null; - /** @description Sort Workflow Invocations by this attribute */ - sort_by?: components["schemas"]["InvocationSortByEnum"] | null; - /** @description Sort in descending order? */ - sort_desc?: boolean; - /** @description Set to false to only include terminal Invocations. */ - include_terminal?: boolean | null; - /** @description Limit the number of invocations to return. */ - limit?: number | null; - /** @description Number of invocations to skip. */ - offset?: number | null; - /** @description Is provided workflow id for Workflow instead of StoredWorkflow? */ - instance?: boolean | null; - /** @description View to be passed to the serializer */ - view?: string | null; - /** @description Include details for individual invocation steps and populate a steps attribute in the resulting dictionary. */ - step_details?: boolean; - }; + query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; path: { + /** @description The encoded database identifier of the Invocation. */ + invocation_id: string; /** @description The encoded database identifier of the Stored Workflow. */ workflow_id: string; }; @@ -44753,7 +45642,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["WorkflowInvocationResponse"][]; + "application/json": components["schemas"]["InvocationReport"]; }; }; /** @description Request Error */ @@ -44776,7 +45665,7 @@ export interface operations { }; }; }; - Invoke_workflow_api_workflows__workflow_id__usage_post: { + show_workflow_invocation_report_pdf_api_workflows__workflow_id__usage__invocation_id__report_pdf_get: { parameters: { query?: never; header?: { @@ -44784,27 +45673,21 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The database identifier - UUID or encoded - of the Workflow. */ + /** @description The encoded database identifier of the Stored Workflow. */ workflow_id: string; + /** @description The encoded database identifier of the Invocation. */ + invocation_id: string; }; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["InvokeWorkflowPayload"]; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { headers: { [name: string]: unknown; }; - content: { - "application/json": - | components["schemas"]["WorkflowInvocationResponse"] - | components["schemas"]["WorkflowInvocationResponse"][]; - }; + content?: never; }; /** @description Request Error */ "4XX": { @@ -44826,19 +45709,9 @@ export interface operations { }; }; }; - show_workflow_invocation_api_workflows__workflow_id__usage__invocation_id__get: { + workflow_invocation_step_jobs_summary_api_workflows__workflow_id__usage__invocation_id__step_jobs_summary_get: { parameters: { - query?: { - /** @description Include details for individual invocation steps and populate a steps attribute in the resulting dictionary. */ - step_details?: boolean; - /** - * @description Populate the invocation step state with the job state instead of the invocation step state. - * This will also produce one step per job in mapping jobs to mimic the older behavior with respect to collections. - * Partially scheduled steps may provide incomplete information and the listed steps outputs - * are not the mapped over step outputs but the individual job outputs. - */ - legacy_job_state?: boolean; - }; + query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; @@ -44859,7 +45732,11 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["WorkflowInvocationResponse"]; + "application/json": ( + | components["schemas"]["InvocationStepJobsResponseStepModel"] + | components["schemas"]["InvocationStepJobsResponseJobModel"] + | components["schemas"]["InvocationStepJobsResponseCollectionJobsModel"] + )[]; }; }; /** @description Request Error */ @@ -44882,28 +45759,20 @@ export interface operations { }; }; }; - cancel_workflow_invocation_api_workflows__workflow_id__usage__invocation_id__delete: { + workflow_invocation_step_api_workflows__workflow_id__usage__invocation_id__steps__step_id__get: { parameters: { - query?: { - /** @description Include details for individual invocation steps and populate a steps attribute in the resulting dictionary. */ - step_details?: boolean; - /** - * @description Populate the invocation step state with the job state instead of the invocation step state. - * This will also produce one step per job in mapping jobs to mimic the older behavior with respect to collections. - * Partially scheduled steps may provide incomplete information and the listed steps outputs - * are not the mapped over step outputs but the individual job outputs. - */ - legacy_job_state?: boolean; - }; + query?: never; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the Invocation. */ - invocation_id: string; /** @description The encoded database identifier of the Stored Workflow. */ workflow_id: string; + /** @description The encoded database identifier of the Invocation. */ + invocation_id: string; + /** @description The encoded database identifier of the WorkflowInvocationStep. */ + step_id: string; }; cookie?: never; }; @@ -44915,7 +45784,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["WorkflowInvocationResponse"]; + "application/json": components["schemas"]["InvocationStep"]; }; }; /** @description Request Error */ @@ -44938,7 +45807,7 @@ export interface operations { }; }; }; - workflow_invocation_jobs_summary_api_workflows__workflow_id__usage__invocation_id__jobs_summary_get: { + update_workflow_invocation_step_api_workflows__workflow_id__usage__invocation_id__steps__step_id__put: { parameters: { query?: never; header?: { @@ -44950,10 +45819,16 @@ export interface operations { workflow_id: string; /** @description The encoded database identifier of the Invocation. */ invocation_id: string; + /** @description The encoded database identifier of the WorkflowInvocationStep. */ + step_id: string; }; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "application/json": components["schemas"]["InvocationUpdatePayload"]; + }; + }; responses: { /** @description Successful Response */ 200: { @@ -44961,7 +45836,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["InvocationJobsResponse"]; + "application/json": components["schemas"]["InvocationStep"]; }; }; /** @description Request Error */ @@ -44984,16 +45859,16 @@ export interface operations { }; }; }; - show_workflow_invocation_report_api_workflows__workflow_id__usage__invocation_id__report_get: { + show_versions_api_workflows__workflow_id__versions_get: { parameters: { - query?: never; + query?: { + instance?: boolean | null; + }; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the Invocation. */ - invocation_id: string; /** @description The encoded database identifier of the Stored Workflow. */ workflow_id: string; }; @@ -45007,7 +45882,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["InvocationReport"]; + "application/json": unknown; }; }; /** @description Request Error */ @@ -45030,7 +45905,48 @@ export interface operations { }; }; }; - show_workflow_invocation_report_pdf_api_workflows__workflow_id__usage__invocation_id__report_pdf_get: { + index_context_get: { + parameters: { + query?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ContextResponse"]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + get_object_ga4gh_drs_v1_objects__object_id__get: { parameters: { query?: never; header?: { @@ -45038,10 +45954,8 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the Stored Workflow. */ - workflow_id: string; - /** @description The encoded database identifier of the Invocation. */ - invocation_id: string; + /** @description The ID of the group */ + object_id: string; }; cookie?: never; }; @@ -45052,7 +45966,9 @@ export interface operations { headers: { [name: string]: unknown; }; - content?: never; + content: { + "application/json": components["schemas"]["DrsObject"]; + }; }; /** @description Request Error */ "4XX": { @@ -45074,7 +45990,7 @@ export interface operations { }; }; }; - workflow_invocation_step_jobs_summary_api_workflows__workflow_id__usage__invocation_id__step_jobs_summary_get: { + get_object_ga4gh_drs_v1_objects__object_id__post: { parameters: { query?: never; header?: { @@ -45082,10 +45998,8 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the Stored Workflow. */ - workflow_id: string; - /** @description The encoded database identifier of the Invocation. */ - invocation_id: string; + /** @description The ID of the group */ + object_id: string; }; cookie?: never; }; @@ -45097,11 +46011,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": ( - | components["schemas"]["InvocationStepJobsResponseStepModel"] - | components["schemas"]["InvocationStepJobsResponseJobModel"] - | components["schemas"]["InvocationStepJobsResponseCollectionJobsModel"] - )[]; + "application/json": components["schemas"]["DrsObject"]; }; }; /** @description Request Error */ @@ -45124,7 +46034,7 @@ export interface operations { }; }; }; - workflow_invocation_step_api_workflows__workflow_id__usage__invocation_id__steps__step_id__get: { + get_access_url_ga4gh_drs_v1_objects__object_id__access__access_id__get: { parameters: { query?: never; header?: { @@ -45132,12 +46042,10 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the Stored Workflow. */ - workflow_id: string; - /** @description The encoded database identifier of the Invocation. */ - invocation_id: string; - /** @description The encoded database identifier of the WorkflowInvocationStep. */ - step_id: string; + /** @description The ID of the group */ + object_id: string; + /** @description The access ID of the access method for objects, unused in Galaxy. */ + access_id: string; }; cookie?: never; }; @@ -45149,7 +46057,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["InvocationStep"]; + "application/json": unknown; }; }; /** @description Request Error */ @@ -45172,7 +46080,7 @@ export interface operations { }; }; }; - update_workflow_invocation_step_api_workflows__workflow_id__usage__invocation_id__steps__step_id__put: { + get_access_url_ga4gh_drs_v1_objects__object_id__access__access_id__post: { parameters: { query?: never; header?: { @@ -45180,20 +46088,52 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The encoded database identifier of the Stored Workflow. */ - workflow_id: string; - /** @description The encoded database identifier of the Invocation. */ - invocation_id: string; - /** @description The encoded database identifier of the WorkflowInvocationStep. */ - step_id: string; + /** @description The ID of the group */ + object_id: string; + /** @description The access ID of the access method for objects, unused in Galaxy. */ + access_id: string; }; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["InvocationUpdatePayload"]; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; }; }; + }; + service_info_ga4gh_drs_v1_service_info_get: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -45201,7 +46141,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["InvocationStep"]; + "application/json": unknown; }; }; /** @description Request Error */ @@ -45224,19 +46164,19 @@ export interface operations { }; }; }; - show_versions_api_workflows__workflow_id__versions_get: { + list_runs_ga4gh_wes_v1_runs_get: { parameters: { query?: { - instance?: boolean | null; + /** @description Number of results per page */ + page_size?: number; + /** @description Token for pagination */ + page_token?: string; }; header?: { /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ "run-as"?: string | null; }; - path: { - /** @description The encoded database identifier of the Stored Workflow. */ - workflow_id: string; - }; + path?: never; cookie?: never; }; requestBody?: never; @@ -45247,7 +46187,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": unknown; + "application/json": components["schemas"]["RunListResponse"]; }; }; /** @description Request Error */ @@ -45270,7 +46210,7 @@ export interface operations { }; }; }; - index_context_get: { + submit_run_ga4gh_wes_v1_runs_post: { parameters: { query?: never; header?: { @@ -45280,7 +46220,11 @@ export interface operations { path?: never; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "multipart/form-data": components["schemas"]["Body_submit_run_ga4gh_wes_v1_runs_post"]; + }; + }; responses: { /** @description Successful Response */ 200: { @@ -45288,7 +46232,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ContextResponse"]; + "application/json": components["schemas"]["RunId"]; }; }; /** @description Request Error */ @@ -45311,7 +46255,7 @@ export interface operations { }; }; }; - get_object_ga4gh_drs_v1_objects__object_id__get: { + get_run_ga4gh_wes_v1_runs__run_id__get: { parameters: { query?: never; header?: { @@ -45319,8 +46263,7 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The ID of the group */ - object_id: string; + run_id: string; }; cookie?: never; }; @@ -45332,7 +46275,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["DrsObject"]; + "application/json": components["schemas"]["RunLog"]; }; }; /** @description Request Error */ @@ -45355,7 +46298,7 @@ export interface operations { }; }; }; - get_object_ga4gh_drs_v1_objects__object_id__post: { + cancel_run_ga4gh_wes_v1_runs__run_id__cancel_post: { parameters: { query?: never; header?: { @@ -45363,8 +46306,7 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The ID of the group */ - object_id: string; + run_id: string; }; cookie?: never; }; @@ -45376,7 +46318,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["DrsObject"]; + "application/json": components["schemas"]["RunId"]; }; }; /** @description Request Error */ @@ -45399,7 +46341,7 @@ export interface operations { }; }; }; - get_access_url_ga4gh_drs_v1_objects__object_id__access__access_id__get: { + get_run_status_ga4gh_wes_v1_runs__run_id__status_get: { parameters: { query?: never; header?: { @@ -45407,10 +46349,7 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The ID of the group */ - object_id: string; - /** @description The access ID of the access method for objects, unused in Galaxy. */ - access_id: string; + run_id: string; }; cookie?: never; }; @@ -45422,7 +46361,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": unknown; + "application/json": components["schemas"]["RunStatus"]; }; }; /** @description Request Error */ @@ -45445,7 +46384,55 @@ export interface operations { }; }; }; - get_access_url_ga4gh_drs_v1_objects__object_id__access__access_id__post: { + get_run_tasks_ga4gh_wes_v1_runs__run_id__tasks_get: { + parameters: { + query?: { + /** @description Number of results per page */ + page_size?: number; + /** @description Token for pagination */ + page_token?: string; + }; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + run_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["TaskListResponse"]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + get_run_task_ga4gh_wes_v1_runs__run_id__tasks__task_id__get: { parameters: { query?: never; header?: { @@ -45453,10 +46440,9 @@ export interface operations { "run-as"?: string | null; }; path: { - /** @description The ID of the group */ - object_id: string; - /** @description The access ID of the access method for objects, unused in Galaxy. */ - access_id: string; + run_id: string; + /** @description Task identifier: step order_index, or order_index.job_index for collection mapping jobs */ + task_id: string; }; cookie?: never; }; @@ -45468,7 +46454,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": unknown; + "application/json": components["schemas"]["TaskLog"]; }; }; /** @description Request Error */ @@ -45491,10 +46477,13 @@ export interface operations { }; }; }; - service_info_ga4gh_drs_v1_service_info_get: { + service_info_ga4gh_wes_v1_service_info_get: { parameters: { query?: never; - header?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; path?: never; cookie?: never; }; @@ -45506,7 +46495,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["Service"]; + "application/json": components["schemas"]["ServiceInfo"]; }; }; /** @description Request Error */ diff --git a/doc/source/admin/ga4gh.md b/doc/source/admin/ga4gh.md new file mode 100644 index 000000000000..faed513cc971 --- /dev/null +++ b/doc/source/admin/ga4gh.md @@ -0,0 +1,189 @@ +--- +myst: + substitutions: + GA4GH_DRS: GA4GH Data Repository Service (DRS) + GA4GH_WES: GA4GH Workflow Execution Service (WES) +--- + +# GA4GH API Support + +Galaxy consumes many of the APIs from the [GA4GH standards](https://www.ga4gh.org/). But a +Galaxy server acts as an implementor of two of these standards currently. + +## Overview + +The GA4GH standards provide standardized APIs for accessing and executing workflows and datasets across different bioinformatics platforms. Galaxy's implementation allows external tools and services to: + +- **DRS (Data Repository Service)**: Access Galaxy datasets via standardized data retrieval APIs +- **WES (Workflow Execution Service)**: Submit and monitor Galaxy workflow executions via standardized workflow APIs + +## DRS - Data Repository Service + +The {{ GA4GH_DRS }} enables standardized access to datasets stored in Galaxy. + +For detailed API specifications, see the [GA4GH DRS specification](https://ga4gh.github.io/data-repository-service/). + +### Configuration + +DRS service information is configured via the following Galaxy settings in `galaxy.yml`: + +```yaml +galaxy: + # Organization name shown in DRS service-info responses + organization_name: "My Organization" + + # Organization website URL shown in DRS service-info responses + organization_url: "https://example.com" + + # GA4GH service ID (reverse domain format) + # If not set, defaults to reversed hostname (e.g., com.example for example.com) + ga4gh_service_id: "org.example.myservice" + + # Environment tag for service (e.g., "test", "staging", "production") + ga4gh_service_environment: "production" +``` + +### Verifying DRS Configuration + +To verify DRS is properly configured, query the service-info endpoint: + +```bash +curl -s http://localhost:8080/ga4gh/drs/v1/service-info | jq . +``` + +You should see output like: + +```json +{ + "id": "org.example.drs", + "name": "Galaxy DRS API", + "description": "Serves Galaxy datasets according to the GA4GH DRS specification", + "organization": { + "name": "My Organization", + "url": "https://example.com" + }, + "type": { + "group": "org.ga4gh", + "artifact": "drs", + "version": "1.2.0" + }, + "version": "26.0", + "environment": "production" +} +``` + +Verify that: +- `organization.name` and `organization.url` match your configured values +- `environment` is set appropriately for your deployment +- `id` reflects your `ga4gh_service_id` setting (or sensible defaults if not configured) + +## WES - Workflow Execution Service + +The {{ GA4GH_WES }} enables external systems to submit and monitor Galaxy workflow executions. + +For detailed API specifications, see the [GA4GH WES specification](https://ga4gh.github.io/workflow-execution-service-schemas/). + +### Workflow Types + +WES supports two Galaxy workflow formats: + +- **gx_workflow_ga**: Native Galaxy XML/YAML workflow format +- **gx_workflow_format2**: Galaxy's CWL-compatible workflow format + +### Configuration + +WES service information is configured via the same Galaxy settings as DRS in `galaxy.yml`: + +```yaml +galaxy: + # Organization name shown in WES service-info responses + organization_name: "My Organization" + + # Organization website URL shown in WES service-info responses + organization_url: "https://example.com" + + # GA4GH service ID (reverse domain format) + # If not set, defaults to reversed hostname + ga4gh_service_id: "org.example.myservice" + + # Environment tag for service (e.g., "test", "staging", "production") + ga4gh_service_environment: "production" +``` + +### Verifying WES Configuration + +To verify WES is properly configured, query the service-info endpoint: + +```bash +curl -s http://localhost:8080/ga4gh/wes/v1/service-info | jq . +``` + +You should see output like: + +```json +{ + "id": "org.example.wes", + "name": "Galaxy WES API", + "description": "Executes Galaxy workflows according to the GA4GH WES specification", + "organization": { + "name": "My Organization", + "url": "https://example.com" + }, + "type": { + "group": "org.ga4gh", + "artifact": "wes", + "version": "1.0.0" + }, + "version": "26.0", + "environment": "production" +} +``` + +Verify that: +- `organization.name` and `organization.url` match your configured values +- `environment` is set appropriately for your deployment +- `id` reflects your `ga4gh_service_id` setting (or sensible defaults if not configured) + +## Configuration Reference + +All GA4GH configuration is optional and falls back to sensible defaults based on your Galaxy deployment. + +### Settings + +| Setting | Default | Purpose | +|---------|---------|---------| +| `organization_name` | Reversed hostname | Organization name in service responses | +| `organization_url` | Scheme + hostname from request | Organization website URL | +| `ga4gh_service_id` | Reversed hostname | Service ID in reverse domain format (e.g., `org.example`) | +| `ga4gh_service_environment` | (none) | Environment classifier (e.g., "test", "staging", "production") | + +### Complete Configuration Example + +```yaml +# galaxy.yml - Complete GA4GH configuration +galaxy: + # For DRS and WES service-info responses + organization_name: "Example Bioinformatics Institute" + organization_url: "https://example.com" + + # Service identifier (reverse domain format) + ga4gh_service_id: "com.example.galaxy" + + # Environment classifier + ga4gh_service_environment: "production" +``` + +### Default Behavior + +If GA4GH settings are not explicitly configured: + +- `organization_name` and `organization_url` are derived from the request URL +- `ga4gh_service_id` is auto-generated by reversing the hostname + - For `galaxy.example.com`, this becomes `com.example.galaxy` +- `ga4gh_service_environment` is omitted from responses + +## Related Documentation + +- [Galaxy Workflow Guide](../learn/workflow.rst) +- [API Authentication](./authentication.md) +- [GA4GH Organization](https://www.ga4gh.org/) diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst index 491a492d53e5..da7af3703ac0 100644 --- a/doc/source/admin/index.rst +++ b/doc/source/admin/index.rst @@ -28,5 +28,6 @@ Galaxy Deployment & Administration db_migration reports useful_scripts + ga4gh options migrating_to_gunicorn diff --git a/lib/galaxy/managers/workflows.py b/lib/galaxy/managers/workflows.py index b4622ff09ef8..e96fb54a3cc9 100644 --- a/lib/galaxy/managers/workflows.py +++ b/lib/galaxy/managers/workflows.py @@ -649,7 +649,6 @@ def normalize_workflow_format(self, trans, as_dict): ) except yaml.scanner.ScannerError as e: raise exceptions.MalformedContents(str(e)) - return RawWorkflowDescription(as_dict, workflow_path) def build_workflow_from_raw_description( diff --git a/lib/galaxy/model/keyset_token_pagination.py b/lib/galaxy/model/keyset_token_pagination.py new file mode 100644 index 000000000000..29dcadebfcbf --- /dev/null +++ b/lib/galaxy/model/keyset_token_pagination.py @@ -0,0 +1,116 @@ +"""Keyset-based pagination support for cursor-stable pagination.""" + +import base64 +import json +from dataclasses import dataclass +from typing import ( + Optional, + Protocol, + Type, + TypeVar, +) + +from galaxy import exceptions + + +class KeysetToken(Protocol): + """Protocol for keyset tokens that can be encoded/decoded. + + Implementations must provide: + - to_values(): Convert token to normalized list of values for encoding + - from_values(): Reconstruct token from decoded values (classmethod) + """ + + def to_values(self) -> list: + """Convert token to normalized list of values for encoding. + + Returns: + List of values to be JSON-encoded + """ + ... + + @classmethod + def from_values(cls, values: list) -> "KeysetToken": + """Reconstruct token from decoded values. + + Args: + values: List of values from JSON decoding + + Returns: + Token instance + """ + ... + + +@dataclass +class SingleKeysetToken: + """Single ID column keyset token. + + Used for pagination on a single numeric ID column (e.g., database IDs). + """ + + last_id: int + + def to_values(self) -> list: + """Convert to normalized values.""" + return [self.last_id] + + @classmethod + def from_values(cls, values: list) -> "SingleKeysetToken": + """Reconstruct from decoded values.""" + if len(values) < 1: + raise ValueError("SingleKeysetToken requires at least 1 value") + return cls(last_id=values[0]) + + +T = TypeVar("T", bound=KeysetToken) + + +class KeysetPagination: + """Keyset pagination encoder/decoder using Protocol. + + Encodes tokens to opaque base64 strings, works with any KeysetToken + implementation via Protocol duck typing. + """ + + def encode_token(self, token: KeysetToken) -> str: + """Encode keyset token to opaque base64 string. + + Works with any KeysetToken implementation via Protocol. + + Args: + token: Token implementing KeysetToken protocol + + Returns: + Base64-encoded token string + """ + values = token.to_values() + payload = json.dumps(values) + return base64.b64encode(payload.encode()).decode() + + def decode_token( + self, + encoded: Optional[str], + token_class: Type[T], + ) -> Optional[T]: + """Decode token using provided token class. + + Args: + encoded: Base64-encoded token string + token_class: Token class with from_values() classmethod + + Returns: + Decoded token instance or None if encoded is None + + Raises: + MessageException: If token is invalid + """ + if not encoded: + return None + + try: + payload = base64.b64decode(encoded.encode()).decode() + values = json.loads(payload) + return token_class.from_values(values) + except (ValueError, TypeError, json.JSONDecodeError) as e: + raise exceptions.MessageException(f"Invalid page_token: {str(e)}") diff --git a/lib/galaxy/schema/wes/__init__.py b/lib/galaxy/schema/wes/__init__.py new file mode 100644 index 000000000000..56a53d9a3f92 --- /dev/null +++ b/lib/galaxy/schema/wes/__init__.py @@ -0,0 +1,283 @@ +# generated by datamodel-codegen: +# filename: https://raw.githubusercontent.com/ga4gh/workflow-execution-service-schemas/develop/openapi/workflow_execution_service.openapi.yaml +# timestamp: 2025-11-18T17:23:28+00:00 + +from __future__ import annotations + +from enum import Enum +from typing import ( + Any, + Dict, + List, + Optional, + Union, +) + +from pydantic import ( + AnyUrl, + AwareDatetime, + BaseModel, + Field, +) + + +class Organization(BaseModel): + name: str = Field( + ..., description="Name of the organization responsible for the service", examples=["My organization"] + ) + url: AnyUrl = Field( + ..., description="URL of the website of the organization (RFC 3986 format)", examples=["https://example.com"] + ) + + +class ServiceType(BaseModel): + group: str = Field( + ..., + description="Namespace in reverse domain name format. Use `org.ga4gh` for implementations compliant with official GA4GH specifications. For services with custom APIs not standardized by GA4GH, or implementations diverging from official GA4GH specifications, use a different namespace (e.g. your organization's reverse domain name).", + examples=["org.ga4gh"], + ) + artifact: str = Field( + ..., + description="Name of the API or GA4GH specification implemented. Official GA4GH types should be assigned as part of standards approval process. Custom artifacts are supported.", + examples=["beacon"], + ) + version: str = Field( + ..., + description="Version of the API or specification. GA4GH specifications use semantic versioning.", + examples=["1.0.0"], + ) + + +class RunId(BaseModel): + run_id: Optional[str] = Field(None, description="workflow run ID") + + +class State(Enum): + UNKNOWN = "UNKNOWN" + QUEUED = "QUEUED" + INITIALIZING = "INITIALIZING" + RUNNING = "RUNNING" + PAUSED = "PAUSED" + COMPLETE = "COMPLETE" + EXECUTOR_ERROR = "EXECUTOR_ERROR" + SYSTEM_ERROR = "SYSTEM_ERROR" + CANCELED = "CANCELED" + CANCELING = "CANCELING" + PREEMPTED = "PREEMPTED" + + +class RunStatus(BaseModel): + run_id: str + state: Optional[State] = None + + +class RunSummary(RunStatus): + start_time: Optional[str] = Field( + None, description='When the run started executing, in ISO 8601 format "%Y-%m-%dT%H:%M:%SZ"' + ) + end_time: Optional[str] = Field( + None, + description='When the run stopped executing (completed, failed, or cancelled), in ISO 8601 format "%Y-%m-%dT%H:%M:%SZ"', + ) + tags: Dict[str, str] = Field(..., description="Arbitrary key/value tags added by the client during run creation") + + +class RunRequest(BaseModel): + workflow_params: Optional[Dict[str, Any]] = Field( + None, + description="REQUIRED\nThe workflow run parameterizations (JSON encoded), including input and output file locations", + ) + workflow_type: str = Field( + ..., + description='REQUIRED\nThe workflow descriptor type, must be "CWL" or "WDL" currently (or another alternative supported by this WES instance)', + ) + workflow_type_version: str = Field( + ..., description="REQUIRED\nThe workflow descriptor type version, must be one supported by this WES instance" + ) + tags: Optional[Dict[str, str]] = None + workflow_engine_parameters: Optional[Dict[str, str]] = None + workflow_engine: Optional[str] = Field( + None, + description="The workflow engine, must be one supported by this WES instance. Required if workflow_engine_version is provided.", + ) + workflow_engine_version: Optional[str] = Field( + None, + description="The workflow engine version, must be one supported by this WES instance. If workflow_engine is provided, but workflow_engine_version is not, servers can make no assumptions with regard to the engine version the WES instance uses to process the request if that WES instance supports multiple versions of the requested engine.", + ) + workflow_url: str = Field( + ..., + description="REQUIRED\nThe workflow CWL or WDL document. When `workflow_attachments` is used to attach files, the `workflow_url` may be a relative path to one of the attachments.", + ) + + +class Log(BaseModel): + name: Optional[str] = Field(None, description="The task or workflow name") + cmd: Optional[List[str]] = Field(None, description="The command line that was executed") + start_time: Optional[str] = Field( + None, description='When the command started executing, in ISO 8601 format "%Y-%m-%dT%H:%M:%SZ"' + ) + end_time: Optional[str] = Field( + None, + description='When the command stopped executing (completed, failed, or cancelled), in ISO 8601 format "%Y-%m-%dT%H:%M:%SZ"', + ) + stdout: Optional[str] = Field( + None, + description="A URL to retrieve standard output logs of the workflow run or task. This URL may change between status requests, or may not be available until the task or workflow has finished execution. Should be available using the same credentials used to access the WES endpoint.", + ) + stderr: Optional[str] = Field( + None, + description="A URL to retrieve standard error logs of the workflow run or task. This URL may change between status requests, or may not be available until the task or workflow has finished execution. Should be available using the same credentials used to access the WES endpoint.", + ) + exit_code: Optional[int] = Field(None, description="Exit code of the program") + system_logs: Optional[List[str]] = Field( + None, + description="System logs are any logs the system decides are relevant,\nwhich are not tied directly to a workflow.\nContent is implementation specific: format, size, etc.\n\nSystem logs may be collected here to provide convenient access.\n\nFor example, the system may include an error message that caused\na SYSTEM_ERROR state (e.g. disk is full), etc.", + ) + + +class DefaultWorkflowEngineParameter(BaseModel): + name: Optional[str] = Field(None, description="The name of the parameter") + type: Optional[str] = Field(None, description="Describes the type of the parameter, e.g. float.") + default_value: Optional[str] = Field( + None, description='The stringified version of the default parameter. e.g. "2.45".' + ) + + +class WorkflowTypeVersion(BaseModel): + workflow_type_version: Optional[List[str]] = Field( + None, description="an array of one or more acceptable types for the `workflow_type`" + ) + + +class TaskLog(Log): + id: str = Field(..., description="A unique identifier which may be used to reference the task") + system_logs: Optional[List[str]] = Field( + None, + description="System logs are any logs the system decides are relevant,\nwhich are not tied directly to a task.\nContent is implementation specific: format, size, etc.\n\nSystem logs may be collected here to provide convenient access.\n\nFor example, the system may include the name of the host\nwhere the task is executing, an error message that caused\na SYSTEM_ERROR state (e.g. disk is full), etc.", + ) + tes_uri: Optional[str] = Field( + None, + description="An optional URL pointing to an extended task definition defined by a [TES api](https://github.com/ga4gh/task-execution-schemas)", + ) + name: str = Field(..., description="The task or workflow name") + + +class WorkflowEngineVersion(BaseModel): + workflow_engine_version: Optional[List[str]] = Field( + None, description="An array of one or more acceptable engines versions for the `workflow_engine`" + ) + + +class RunListResponse(BaseModel): + runs: Optional[List[Union[RunStatus, RunSummary]]] = Field( + None, + description="A list of workflow runs that the service has executed or is executing. The list is filtered to only include runs that the caller has permission to see.", + ) + next_page_token: Optional[str] = Field( + None, + description="A token which may be supplied as `page_token` in workflow run list request to get the next page of results. An empty string indicates there are no more items to return.", + ) + + +class ErrorResponse(BaseModel): + msg: Optional[str] = Field(None, description="A detailed error message.") + status_code: Optional[int] = Field( + None, description="The integer representing the HTTP status code (e.g. 200, 404)." + ) + + +class Service(BaseModel): + id: str = Field( + ..., + description="Unique ID of this service. Reverse domain name notation is recommended, though not required. The identifier should attempt to be globally unique so it can be used in downstream aggregator services e.g. Service Registry.", + examples=["org.ga4gh.myservice"], + ) + name: str = Field(..., description="Name of this service. Should be human readable.", examples=["My project"]) + type: ServiceType + description: Optional[str] = Field( + None, + description="Description of the service. Should be human readable and provide information about the service.", + examples=["This service provides..."], + ) + organization: Organization = Field(..., description="Organization providing the service") + contactUrl: Optional[AnyUrl] = Field( + None, + description="URL of the contact for the provider of this service, e.g. a link to a contact form (RFC 3986 format), or an email (RFC 2368 format).", + examples=["mailto:support@example.com"], + ) + documentationUrl: Optional[AnyUrl] = Field( + None, + description="URL of the documentation of this service (RFC 3986 format). This should help someone learn how to use your service, including any specifics required to access data, e.g. authentication.", + examples=["https://docs.myservice.example.com"], + ) + createdAt: Optional[AwareDatetime] = Field( + None, + description="Timestamp describing when the service was first deployed and available (RFC 3339 format)", + examples=["2019-06-04T12:58:19Z"], + ) + updatedAt: Optional[AwareDatetime] = Field( + None, + description="Timestamp describing when the service was last updated (RFC 3339 format)", + examples=["2019-06-04T12:58:19Z"], + ) + environment: Optional[str] = Field( + None, + description="Environment the service is running in. Use this to distinguish between production, development and testing/staging deployments. Suggested values are prod, test, dev, staging. However this is advised and not enforced.", + examples=["test"], + ) + version: str = Field( + ..., + description="Version of the service being described. Semantic versioning is recommended, but other identifiers, such as dates or commit hashes, are also allowed. The version should be changed whenever the service is updated.", + examples=["1.0.0"], + ) + + +class RunLog(BaseModel): + run_id: Optional[str] = Field(None, description="workflow run ID") + request: Optional[RunRequest] = None + state: Optional[State] = None + run_log: Optional[Log] = None + task_logs_url: Optional[str] = Field( + None, + description="A reference to the complete url which may be used to obtain a paginated list of task logs for this workflow", + ) + task_logs: Optional[List[Union[Log, TaskLog]]] = Field( + None, + description="The logs, and other key info like timing and exit code, for each step in the workflow run. This field is deprecated and the `task_logs_url` should be used to retrieve a paginated list of steps from the workflow run. This field will be removed in the next major version of the specification (2.0.0)", + ) + outputs: Optional[Dict[str, Any]] = Field(None, description="The outputs from the workflow run.") + + +class TaskListResponse(BaseModel): + task_logs: Optional[List[TaskLog]] = Field( + None, description="The logs, and other key info like timing and exit code, for each step in the workflow run." + ) + next_page_token: Optional[str] = Field( + None, + description="A token which may be supplied as `page_token` in workflow run task list request to get the next page of results. An empty string indicates there are no more items to return.", + ) + + +class ServiceInfo(Service): + workflow_type_versions: Dict[str, WorkflowTypeVersion] + supported_wes_versions: List[str] = Field( + ..., description="The version(s) of the WES schema supported by this service" + ) + supported_filesystem_protocols: List[str] = Field( + ..., + description="The filesystem protocols supported by this service, currently these may include common protocols using the terms 'http', 'https', 'sftp', 's3', 'gs', 'file', or 'synapse', but others are possible and the terms beyond these core protocols are currently not fixed. This section reports those protocols (either common or not) supported by this WES service.", + ) + workflow_engine_versions: Dict[str, WorkflowEngineVersion] + default_workflow_engine_parameters: List[DefaultWorkflowEngineParameter] = Field( + ..., + description="Each workflow engine can present additional parameters that can be sent to the workflow engine. This message will list the default values, and their types for each workflow engine.", + ) + system_state_counts: Dict[str, int] = Field( + ..., + description="The system statistics, key is the statistic, value is the count of runs in that state. See the State enum for the possible keys.", + ) + auth_instructions_url: str = Field( + ..., + description="A web page URL with human-readable instructions on how to get an authorization token for use with a specific WES endpoint.", + ) + tags: Dict[str, str] diff --git a/lib/galaxy/schema/wes/gen.sh b/lib/galaxy/schema/wes/gen.sh new file mode 100755 index 000000000000..25bd30469ec2 --- /dev/null +++ b/lib/galaxy/schema/wes/gen.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# must be run from a virtualenv with... +# https://github.com/koxudaxi/datamodel-code-generator + +# Use the installed datamodel-codegen +CODEGEN="datamodel-codegen" + +# Base URL for WES OpenAPI spec +WES_SPEC_URL="https://raw.githubusercontent.com/ga4gh/workflow-execution-service-schemas/develop/openapi/workflow_execution_service.openapi.yaml" + +# Generate models from full OpenAPI spec +$CODEGEN --input-file-type openapi --output-model-type pydantic_v2.BaseModel --url "$WES_SPEC_URL" --output "__init__.py" diff --git a/lib/galaxy/webapps/galaxy/api/__init__.py b/lib/galaxy/webapps/galaxy/api/__init__.py index 96a8f3df2834..853f38926c62 100644 --- a/lib/galaxy/webapps/galaxy/api/__init__.py +++ b/lib/galaxy/webapps/galaxy/api/__init__.py @@ -50,7 +50,10 @@ Mapper, request_config, ) -from starlette.datastructures import Headers +from starlette.datastructures import ( + Headers, + URL, +) from starlette.routing import ( Match, NoMatchFound, @@ -253,6 +256,10 @@ def url_path(self) -> str: url = urljoin(url, root_path) return url + @property + def url(self) -> URL: + return self.__request.url + @property def host(self) -> str: return self.__request.base_url.netloc diff --git a/lib/galaxy/webapps/galaxy/api/drs.py b/lib/galaxy/webapps/galaxy/api/drs.py index 99d535cf9153..f5dfa15e68d8 100644 --- a/lib/galaxy/webapps/galaxy/api/drs.py +++ b/lib/galaxy/webapps/galaxy/api/drs.py @@ -13,14 +13,9 @@ from galaxy.config import GalaxyAppConfiguration from galaxy.exceptions import ObjectNotFound from galaxy.managers.context import ProvidesHistoryContext -from galaxy.schema.drs import ( - DrsObject, - Organization, - Service, - ServiceType, -) -from galaxy.version import VERSION +from galaxy.schema.drs import DrsObject from galaxy.webapps.galaxy.services.datasets import DatasetsService +from galaxy.webapps.galaxy.services.ga4gh import build_service_info from . import ( depends, DependsOnTrans, @@ -44,36 +39,14 @@ class DrsApi: config: GalaxyAppConfiguration = depends(GalaxyAppConfiguration) @router.get("/ga4gh/drs/v1/service-info", public=True) - def service_info(self, request: Request) -> Service: - components = request.url.components - hostname = components.hostname - assert hostname - default_organization_id = ".".join(reversed(hostname.split("."))) - config = self.config - organization_id = config.ga4gh_service_id or default_organization_id - organization_name = config.organization_name or organization_id - organization_url = config.organization_url or f"{components.scheme}://{components.netloc}" - - organization = Organization( - url=organization_url, - name=organization_name, - ) - service_type = ServiceType( - group="org.ga4gh", + def service_info(self, request: Request): + return build_service_info( + config=self.config, + request_url=str(request.url), artifact="drs", - version="1.2.0", - ) - extra_kwds = {} - if environment := config.ga4gh_service_environment: - extra_kwds["environment"] = environment - return Service( - id=organization_id + ".drs", - name=DRS_SERVICE_NAME, - description=DRS_SERVICE_DESCRIPTION, - organization=organization, - type=service_type, - version=VERSION, - **extra_kwds, + service_name=DRS_SERVICE_NAME, + service_description=DRS_SERVICE_DESCRIPTION, + artifact_version="1.2.0", ) @router.get("/ga4gh/drs/v1/objects/{object_id}", public=True) diff --git a/lib/galaxy/webapps/galaxy/api/jobs.py b/lib/galaxy/webapps/galaxy/api/jobs.py index 4885b712068e..0c8e866fa762 100644 --- a/lib/galaxy/webapps/galaxy/api/jobs.py +++ b/lib/galaxy/webapps/galaxy/api/jobs.py @@ -22,6 +22,7 @@ Path, Query, ) +from fastapi.responses import Response from pydantic import Field from galaxy import exceptions @@ -620,6 +621,36 @@ def show( else: return EncodedJobDetails(**self.service.show(trans, job_id, bool(full))) + @router.get( + "/api/jobs/{job_id}/stdout", + name="get_job_stdout", + summary="Return stdout from job execution", + ) + def stdout( + self, + job_id: JobIdPathParam, + trans: ProvidesUserContext = DependsOnTrans, + ) -> Response: + """Return job stdout as plain text.""" + job = self.service.get_job(trans=trans, job_id=job_id) + stdout_content = job.stdout or "" + return Response(content=stdout_content, media_type="text/plain") + + @router.get( + "/api/jobs/{job_id}/stderr", + name="get_job_stderr", + summary="Return stderr from job execution", + ) + def stderr( + self, + job_id: JobIdPathParam, + trans: ProvidesUserContext = DependsOnTrans, + ) -> Response: + """Return job stderr as plain text.""" + job = self.service.get_job(trans=trans, job_id=job_id) + stderr_content = job.stderr or "" + return Response(content=stderr_content, media_type="text/plain") + @router.delete( "/api/jobs/{job_id}", name="cancel_job", diff --git a/lib/galaxy/webapps/galaxy/api/wes.py b/lib/galaxy/webapps/galaxy/api/wes.py new file mode 100644 index 000000000000..cb9280d86110 --- /dev/null +++ b/lib/galaxy/webapps/galaxy/api/wes.py @@ -0,0 +1,162 @@ +"""GA4GH WES (Workflow Execution Service) API endpoints.""" + +import logging +from typing import ( + Annotated, + Optional, +) + +from fastapi import ( + File, + Form, + Path, + Query, + Request, + UploadFile, +) + +from galaxy.managers.context import ProvidesUserContext +from galaxy.schema.fields import DecodedDatabaseIdField +from galaxy.schema.wes import ( + RunId, + RunListResponse, + RunLog, + RunStatus, + ServiceInfo, + TaskListResponse, + TaskLog, +) +from galaxy.webapps.galaxy.services.wes import WesService +from galaxy.work.context import SessionRequestContext +from . import ( + depends, + DependsOnTrans, + Router, +) + +log = logging.getLogger(__name__) +router = Router(tags=["wes"]) + +RunIdParam = Annotated[ + DecodedDatabaseIdField, + Path( + title="Workflow Invocation ID", + ), +] +TaskIdParam = Annotated[ + str, + Path( + title="Task ID", + description="Task identifier: step order_index, or order_index.job_index for collection mapping jobs", + ), +] +PageTokenParam: str = Query(None, title="Page Token", description="Token for pagination") +PageSizeParam: int = Query(10, title="Page Size", description="Number of results per page", ge=1, le=100) + +WES_SERVICE_NAME = "Galaxy WES API" +WES_SERVICE_DESCRIPTION = "Executes Galaxy workflows according to the GA4GH WES specification" + + +@router.cbv +class WesApi: + service: WesService = depends(WesService) + + @router.get("/ga4gh/wes/v1/service-info", public=True) + def service_info(self, request: Request, trans: ProvidesUserContext = DependsOnTrans) -> ServiceInfo: + """Get WES service information.""" + return self.service.service_info(trans, str(request.url)) + + @router.post("/ga4gh/wes/v1/runs") + def submit_run( + self, + trans: ProvidesUserContext = DependsOnTrans, + workflow_params: Optional[str] = Form(None), + workflow_type: str = Form(...), + workflow_type_version: str = Form(...), + workflow_url: Optional[str] = Form(None), + workflow_engine_parameters: Optional[str] = Form(None), + workflow_engine: Optional[str] = Form(None), + workflow_engine_version: Optional[str] = Form(None), + tags: Optional[str] = Form(None), + workflow_attachment: Optional[UploadFile] = File(None), + ) -> RunId: + """Submit a new workflow run. + + Accepts multipart/form-data with workflow and parameters. + """ + return self.service.submit_run( + trans=trans, + workflow_params=workflow_params, + workflow_type=workflow_type, + workflow_type_version=workflow_type_version, + workflow_url=workflow_url, + workflow_engine_parameters=workflow_engine_parameters, + workflow_engine=workflow_engine, + workflow_engine_version=workflow_engine_version, + tags=tags, + workflow_attachment=workflow_attachment, + ) + + @router.get("/ga4gh/wes/v1/runs") + def list_runs( + self, + trans: ProvidesUserContext = DependsOnTrans, + page_size: int = PageSizeParam, + page_token: str = PageTokenParam, + ) -> RunListResponse: + """List workflow runs.""" + return self.service.list_runs(trans, page_size, page_token) + + @router.get("/ga4gh/wes/v1/runs/{run_id}") + def get_run( + self, + run_id: RunIdParam, + trans: SessionRequestContext = DependsOnTrans, + ) -> RunLog: + """Get workflow run details.""" + assert isinstance(run_id, int) + return self.service.get_run(trans, run_id) + + @router.get("/ga4gh/wes/v1/runs/{run_id}/status") + def get_run_status( + self, + run_id: RunIdParam, + trans: ProvidesUserContext = DependsOnTrans, + ) -> RunStatus: + """Get workflow run status.""" + return self.service.get_run_status(trans, run_id) + + @router.post("/ga4gh/wes/v1/runs/{run_id}/cancel") + def cancel_run( + self, + run_id: RunIdParam, + trans: ProvidesUserContext = DependsOnTrans, + ) -> RunId: + """Cancel a workflow run.""" + return self.service.cancel_run(trans, run_id) + + @router.get("/ga4gh/wes/v1/runs/{run_id}/tasks") + def get_run_tasks( + self, + run_id: RunIdParam, + trans: ProvidesUserContext = DependsOnTrans, + page_size: int = PageSizeParam, + page_token: str = PageTokenParam, + ) -> TaskListResponse: + """Get paginated list of tasks for a workflow run.""" + assert isinstance(run_id, int) + return self.service.get_run_tasks(trans, run_id, page_size, page_token) + + @router.get("/ga4gh/wes/v1/runs/{run_id}/tasks/{task_id}") + def get_run_task( + self, + run_id: RunIdParam, + task_id: TaskIdParam, + trans: ProvidesUserContext = DependsOnTrans, + ) -> TaskLog: + """Get details for a specific task. + + Task ID format: order_index or order_index.job_index for collection mapping jobs. + """ + assert isinstance(run_id, int) + return self.service.get_run_task(trans, run_id, task_id) diff --git a/lib/galaxy/webapps/galaxy/services/ga4gh.py b/lib/galaxy/webapps/galaxy/services/ga4gh.py new file mode 100644 index 000000000000..062fb903623f --- /dev/null +++ b/lib/galaxy/webapps/galaxy/services/ga4gh.py @@ -0,0 +1,77 @@ +"""Shared utilities for GA4GH service implementations.""" + +from urllib.parse import urlparse + +from galaxy.config import GalaxyAppConfiguration +from galaxy.schema.drs import ( + Organization, + Service, + ServiceType, +) +from galaxy.version import VERSION + + +def build_service_info( + config: GalaxyAppConfiguration, + request_url: str, + artifact: str, + service_name: str, + service_description: str, + artifact_version: str = "1.0.0", +) -> Service: + """Build a GA4GH Service object with Galaxy organization info. + + This utility handles the common pattern of building GA4GH service-info + responses for services like DRS and WES. + + Args: + config: Galaxy application configuration + request_url: The request URL (used to extract hostname) + artifact: The GA4GH artifact name (e.g., "drs", "wes") + service_name: Human-readable name for the service + service_description: Description of the service + artifact_version: Version of the artifact specification (default "1.0.0") + + Returns: + Service object with organization and type information + """ + # Extract hostname from request URL + parsed_url = urlparse(request_url) + hostname = parsed_url.hostname or "localhost" + scheme = parsed_url.scheme or "https" + netloc = parsed_url.netloc + + # Build organization ID from reversed domain name + default_organization_id = ".".join(reversed(hostname.split("."))) + organization_id = config.ga4gh_service_id or default_organization_id + organization_name = config.organization_name or organization_id + organization_url = config.organization_url or f"{scheme}://{netloc}" + + # Create Organization object + organization = Organization( + name=organization_name, + url=organization_url, + ) + + # Create ServiceType object + service_type = ServiceType( + group="org.ga4gh", + artifact=artifact, + version=artifact_version, + ) + + # Build extra kwargs from config + extra_kwds = {} + if environment := config.ga4gh_service_environment: + extra_kwds["environment"] = environment + + # Create and return Service object + return Service( + id=f"{organization_id}.{artifact}", + name=service_name, + description=service_description, + organization=organization, + type=service_type, + version=VERSION, + **extra_kwds, + ) diff --git a/lib/galaxy/webapps/galaxy/services/wes.py b/lib/galaxy/webapps/galaxy/services/wes.py new file mode 100644 index 000000000000..b663a68c26e6 --- /dev/null +++ b/lib/galaxy/webapps/galaxy/services/wes.py @@ -0,0 +1,1174 @@ +"""GA4GH WES (Workflow Execution Service) implementation for Galaxy.""" + +import base64 +import json +import logging +from dataclasses import dataclass +from typing import ( + Any, + List, + Optional, +) +from urllib.parse import ( + parse_qs, + urlparse, +) + +from fastapi import UploadFile +from sqlalchemy import ( + literal, + select, + tuple_, + union_all, +) +from sqlalchemy.orm import joinedload + +from galaxy import exceptions +from galaxy.config import GalaxyAppConfiguration +from galaxy.files.uris import stream_url_to_str +from galaxy.managers.context import ProvidesUserContext +from galaxy.managers.workflows import ( + RawWorkflowDescription, + WorkflowContentsManager, + WorkflowCreateOptions, +) +from galaxy.model import ( + History, + ImplicitCollectionJobs, + ImplicitCollectionJobsJobAssociation, + Job, + WorkflowInvocation, + WorkflowInvocationStep, +) +from galaxy.model.keyset_token_pagination import ( + KeysetPagination, + SingleKeysetToken, +) +from galaxy.schema.wes import ( + DefaultWorkflowEngineParameter, + RunId, + RunListResponse, + RunLog, + RunRequest, + RunStatus, + RunSummary, + ServiceInfo, + State, + TaskListResponse, + TaskLog, + WorkflowEngineVersion, + WorkflowTypeVersion, +) +from galaxy.schema.workflows import InvokeWorkflowPayload +from galaxy.security.idencoding import IdEncodingHelper +from galaxy.webapps.galaxy.services.base import ServiceBase +from galaxy.webapps.galaxy.services.ga4gh import build_service_info +from galaxy.webapps.galaxy.services.workflows import WorkflowsService +from galaxy.work.context import SessionRequestContext + +log = logging.getLogger(__name__) + +# Map Galaxy workflow invocation states to WES states +GALAXY_TO_WES_STATE = { + "new": State.QUEUED, + "ready": State.INITIALIZING, + "scheduled": State.RUNNING, + "failed": State.EXECUTOR_ERROR, + "cancelled": State.CANCELED, + "cancelling": State.CANCELING, +} + +WES_TO_GALAXY_STATE = {v: k for k, v in GALAXY_TO_WES_STATE.items()} + + +@dataclass +class TaskKeysetToken: + """Composite keyset token for task pagination (step_order, job_index). + + Used to identify position in task list for cursor-based pagination. + """ + + step_order: int + job_index: int + + def to_values(self) -> list: + """Convert token to normalized list of values for encoding.""" + return [self.step_order, self.job_index] + + @classmethod + def from_values(cls, values: list) -> "TaskKeysetToken": + """Reconstruct token from decoded values.""" + if len(values) < 2: + raise ValueError("TaskKeysetToken requires at least 2 values") + return cls(step_order=values[0], job_index=values[1]) + + +def _parse_gxworkflow_uri(workflow_url: str) -> tuple[str, bool]: + """Parse a gxworkflow:// URI to extract workflow ID and instance flag. + + Format: gxworkflow://[?instance=] + - encoded_workflow_id: Base62-encoded Galaxy workflow ID + - instance: Optional parameter (defaults to False) + - False: Load the StoredWorkflow (workflow definition) + - True: Load the Workflow (active workflow instance) + + Args: + workflow_url: The gxworkflow:// URI + + Returns: + Tuple of (encoded_workflow_id, instance_flag) + + Raises: + exceptions.MessageException: If URI format is invalid + """ + try: + parsed = urlparse(workflow_url) + + # Verify scheme is gxworkflow + if parsed.scheme != "gxworkflow": + raise ValueError("Invalid scheme, must be gxworkflow://") + + # Extract workflow ID (netloc + path) + workflow_id = parsed.netloc + parsed.path if parsed.path else parsed.netloc + + if not workflow_id: + raise ValueError("Missing workflow ID in gxworkflow:// URI") + + # Parse query parameters for instance flag + instance = False + if parsed.query: + params = parse_qs(parsed.query) + if "instance" in params: + instance_str = params["instance"][0].lower() + if instance_str not in ("true", "false"): + raise ValueError("instance parameter must be 'true' or 'false'") + instance = instance_str == "true" + + return workflow_id, instance + + except ValueError as e: + raise exceptions.MessageException(f"Invalid gxworkflow:// URI: {str(e)}") + except Exception as e: + raise exceptions.MessageException(f"Error parsing gxworkflow:// URI: {str(e)}") + + +def _load_workflow_content( + trans: ProvidesUserContext, + workflow_attachment: Optional[UploadFile], + workflow_url: Optional[str], +) -> dict[str, Any]: + """Load workflow content from attachment or URL. + + Handles three input methods: + 1. workflow_attachment: Uploaded file + 2. workflow_url with gxworkflow:// scheme: Load from Galaxy database + 3. workflow_url with other schemes: Fetch from URL (http, base64, etc.) + + Returns the workflow as a dictionary for normalization. + + Args: + trans: Galaxy transaction/context + workflow_attachment: Uploaded workflow file + workflow_url: URL to fetch workflow from + + Returns: + Dictionary representation of the workflow + + Raises: + exceptions.MessageException: If workflow cannot be loaded + """ + workflow_content = None + + # Load from attachment file + if workflow_attachment: + try: + # Read the uploaded file content + content = workflow_attachment.file.read() + workflow_content = content.decode("utf-8") + except UnicodeDecodeError: + raise exceptions.MessageException("Workflow attachment must be UTF-8 encoded text") + except Exception as e: + raise exceptions.MessageException(f"Error reading workflow attachment: {str(e)}") + + # Load from URL + elif workflow_url: + # Handle gxworkflow:// scheme (Galaxy database reference) + if workflow_url.startswith("gxworkflow://"): + # This will be handled by the WesService that has access to workflows_manager + # Return a special marker dict that includes the URI for later processing + return {"workflow_uri": workflow_url} + + # Handle other URL schemes (http, https, base64, file, etc.) + try: + # Fetch workflow content from URL + # validate_uri_access is called by stream_url_to_str for security + workflow_content = stream_url_to_str(workflow_url, trans.app.file_sources) + except exceptions.AuthenticationRequired as e: + raise e + except Exception as e: + raise exceptions.MessageException(f"Error fetching workflow from URL: {str(e)}") + + if workflow_content is None: + raise exceptions.RequestParameterMissingException("Either workflow_url or workflow_attachment must be provided") + + assert isinstance(workflow_content, str) + + # Parse as JSON or YAML + try: + return json.loads(workflow_content) + except json.JSONDecodeError: + # Try as YAML-like dict string - Galaxy's normalize_workflow_format + # will handle YAML parsing + return {"yaml_content": workflow_content} + + +def _determine_workflow_type(workflow_dict: dict[str, Any]) -> str: + """Determine Galaxy workflow type from workflow dictionary. + + For WES, we support: + - gx_workflow_ga: Native Galaxy workflow format + - gx_workflow_format2: Format2 (CWL-style) Galaxy workflows + + Args: + workflow_dict: The parsed workflow dictionary + + Returns: + The Galaxy workflow type identifier + + Raises: + exceptions.MessageException: If workflow type cannot be determined + """ + # Check for Format2 indicators + if "class" in workflow_dict and workflow_dict["class"] == "GalaxyWorkflow": + return "gx_workflow_format2" + + if "yaml_content" in workflow_dict: + return "gx_workflow_format2" + + # Check for native Galaxy workflow format + if "steps" in workflow_dict or "workflow" in workflow_dict: + return "gx_workflow_ga" + + raise exceptions.MessageException( + "Cannot determine workflow type from RunRequest. Supported types: gx_workflow_ga, gx_workflow_format2" + ) + + +def _normalize_run_request( + trans: ProvidesUserContext, + workflow_contents_manager: WorkflowContentsManager, + workflow_dict: dict[str, Any], +) -> RawWorkflowDescription: + """Normalize workflow content for Galaxy consumption. + + Converts WES workflow format to Galaxy's internal format. + + Args: + trans: Galaxy transaction/context + workflow_contents_manager: Galaxy workflow contents manager + workflow_dict: The parsed workflow dictionary + + Returns: + RawWorkflowDescription ready for workflow creation + + Raises: + exceptions.MessageException: If normalization fails + """ + return workflow_contents_manager.normalize_workflow_format(trans, workflow_dict) + + +def _get_or_create_history( + trans: ProvidesUserContext, + engine_params: dict[str, Any], +) -> History: + """Get or create a history for the WES run. + + If history_id is provided in engine_params, use existing history. + Otherwise, create new history with name from history_name or auto-generated. + + Args: + trans: Galaxy transaction/context + engine_params: Workflow engine parameters from RunRequest + + Returns: + History object for the workflow invocation + + Raises: + exceptions.ObjectNotFound: If specified history_id does not exist + exceptions.AuthenticationRequired: If user cannot access history + """ + # Check if specific history requested + if "history_id" in engine_params: + history_id = engine_params["history_id"] + # Decode the ID if it's encoded + try: + decoded_id = trans.security.decode_id(history_id) + except Exception: + # If decode fails, assume it's already decoded + decoded_id = history_id + + history = trans.sa_session.query(History).filter_by(id=decoded_id, user_id=trans.user.id).one_or_none() + if not history: + raise exceptions.ObjectNotFound(f"History {history_id} not found or not accessible") + return history + + # Create new history + history = History(user=trans.user, name=_generate_history_name(engine_params)) + trans.sa_session.add(history) + trans.sa_session.flush() + # Postgres tests in CI fail without this commit. + trans.sa_session.commit() + return history + + +def _generate_history_name(engine_params: dict[str, Any]) -> str: + """Generate a name for auto-created history. + + Uses history_name from engine_params if provided, otherwise generates one. + + Args: + engine_params: Workflow engine parameters + + Returns: + History name string + """ + if "history_name" in engine_params and engine_params["history_name"]: + return engine_params["history_name"] + + return "WES Run" + + +class WesService(ServiceBase): + """Service for handling GA4GH WES API requests.""" + + _security: IdEncodingHelper + + def __init__( + self, + workflows_service: WorkflowsService, + config: GalaxyAppConfiguration, + security: IdEncodingHelper, + ): + self._workflows_service = workflows_service + self._config = config + self._security = security + self._keyset_pagination = KeysetPagination() + + def service_info(self, trans: ProvidesUserContext, request_url: str) -> ServiceInfo: + """Return WES service information. + + Args: + trans: Galaxy transaction/context + request_url: The request URL used to build service endpoint URLs + + Returns: + ServiceInfo object with Galaxy WES capabilities + """ + # Build base service info using shared utility + base_service = build_service_info( + config=self._config, + request_url=request_url, + artifact="wes", + service_name="Galaxy WES API", + service_description="Executes Galaxy workflows according to the GA4GH WES specification", + artifact_version="1.0.0", + ) + # TODO: + auth_instructions_url = "TODO" + + # Import Organization and ServiceType from WES schema + from galaxy.schema.wes import ( + Organization as WESOrganization, + ServiceType as WESServiceType, + ) + + # Convert DRS Service objects to WES schema objects + organization = WESOrganization( + name=base_service.organization.name, + url=base_service.organization.url, + ) + service_type = WESServiceType( + group=base_service.type.group, + artifact=base_service.type.artifact, + version=base_service.type.version, + ) + + # Create ServiceInfo with WES-specific fields + return ServiceInfo( + id=base_service.id, + name=base_service.name, + type=service_type, + description=base_service.description, + organization=organization, + contactUrl=base_service.contactUrl, + documentationUrl=base_service.documentationUrl, + createdAt=base_service.createdAt, + updatedAt=base_service.updatedAt, + environment=base_service.environment, + version=base_service.version, + workflow_type_versions={ + "gx_workflow_ga": WorkflowTypeVersion(workflow_type_version=["1.0.0"]), + "gx_workflow_format2": WorkflowTypeVersion(workflow_type_version=["1.0.0"]), + }, + supported_wes_versions=["1.0.0"], + supported_filesystem_protocols=["http", "https", "file", "s3", "gs"], + workflow_engine_versions={ + "galaxy": WorkflowEngineVersion(workflow_engine_version=["1.0.0"]), + }, + default_workflow_engine_parameters=[ + DefaultWorkflowEngineParameter( + name="history_name", + type="string", + default_value="", + ), + DefaultWorkflowEngineParameter( + name="history_id", + type="string", + default_value="", + ), + DefaultWorkflowEngineParameter( + name="preferred_object_store_id", + type="string", + default_value="", + ), + DefaultWorkflowEngineParameter( + name="use_cached_job", + type="boolean", + default_value="false", + ), + ], + system_state_counts=dict.fromkeys([s.value for s in State], 0), + auth_instructions_url=auth_instructions_url, + tags={}, + ) + + def submit_run( + self, + trans: ProvidesUserContext, + workflow_params: Optional[str] = None, + workflow_type: Optional[str] = None, + workflow_type_version: Optional[str] = None, + workflow_url: Optional[str] = None, + workflow_engine_parameters: Optional[str] = None, + workflow_engine: Optional[str] = None, + workflow_engine_version: Optional[str] = None, + tags: Optional[str] = None, + workflow_attachment: Optional[UploadFile] = None, + ) -> RunId: + """Submit a new workflow run. + + Args: + trans: Galaxy transaction/context + workflow_params: JSON string of workflow parameters + workflow_type: Type of workflow (gx_workflow_ga, gx_workflow_format2) + workflow_type_version: Version of the workflow type + workflow_url: URL to fetch workflow from + workflow_engine_parameters: JSON string of engine parameters + workflow_engine: Workflow engine name + workflow_engine_version: Version of workflow engine + tags: JSON string of tags + workflow_attachment: Uploaded workflow file + + Returns: + RunId containing the created run ID + """ + if trans.anonymous: + raise exceptions.AuthenticationRequired("You need to be logged in to submit workflows.") + + trans.check_user_activation() + + # Step 1: Load workflow content from URL or attachment + workflow_dict = _load_workflow_content(trans, workflow_attachment, workflow_url) + + # Check if this is a gxworkflow:// URI reference + if "workflow_uri" in workflow_dict: + # Load workflow from Galaxy database using gxworkflow:// URI + workflow_uri = workflow_dict["workflow_uri"] + encoded_workflow_id, instance = _parse_gxworkflow_uri(workflow_uri) + + # Load the workflow from the database + # by_stored_id=not instance means: + # - False (instance=False) -> load StoredWorkflow (by_stored_id=True) + # - True (instance=True) -> load Workflow (by_stored_id=False) + try: + stored_workflow = self._workflows_service._workflows_manager.get_stored_workflow( + trans, encoded_workflow_id, by_stored_id=not instance + ) + except Exception as e: + raise exceptions.ObjectNotFound( + f"Workflow '{encoded_workflow_id}' not found or not accessible: {str(e)}" + ) + + # Validate user has access to this workflow + if stored_workflow.user_id != trans.user.id and not trans.user_is_admin: + raise exceptions.ItemAccessibilityException("You do not have access to this workflow") + + # Use the existing workflow directly - no need to create a new one + # Skip to step 5 (engine parameters and history) + else: + # Step 2: Determine/validate workflow type + detected_type = _determine_workflow_type(workflow_dict) + if workflow_type and detected_type != workflow_type: + raise exceptions.RequestParameterInvalidException( + f"Requested workflow type '{workflow_type}' does not match detected type '{detected_type}'" + ) + workflow_type = detected_type + + # Step 3: Normalize workflow for Galaxy + raw_workflow_description = _normalize_run_request( + trans, self._workflows_service._workflow_contents_manager, workflow_dict + ) + + # Step 4: Create workflow in Galaxy + workflow_create_options = WorkflowCreateOptions( + archive_source="wes_api", + fill_defaults=True, + ) + created_workflow = self._workflows_service._workflow_contents_manager.build_workflow_from_raw_description( + trans, + raw_workflow_description, + workflow_create_options, + source="WES API", + ) + stored_workflow = created_workflow.stored_workflow + + # Step 5: Parse engine parameters and create/select history + engine_params = {} + if workflow_engine_parameters: + try: + engine_params = json.loads(workflow_engine_parameters) + except json.JSONDecodeError: + raise exceptions.MessageException("Invalid JSON in workflow_engine_parameters") + + history = _get_or_create_history(trans, engine_params) + + # Step 6: Parse workflow parameters + invoke_params = { + "history_id": trans.security.encode_id(history.id), + "inputs_by": "name", + } + + if workflow_params: + try: + params = json.loads(workflow_params) + invoke_params["inputs"] = params + except json.JSONDecodeError: + raise exceptions.MessageException("Invalid JSON in workflow_params") + + # Step 7: Apply workflow engine parameters + if "preferred_object_store_id" in engine_params: + invoke_params["preferred_object_store_id"] = engine_params["preferred_object_store_id"] + if "use_cached_job" in engine_params: + invoke_params["use_cached_job"] = engine_params["use_cached_job"].lower() == "true" + + # Step 8: Invoke workflow + invoke_payload = InvokeWorkflowPayload(**invoke_params) + workflow_invocation_response = self._workflows_service.invoke_workflow( + trans, + trans.security.encode_id(stored_workflow.id), + invoke_payload, + ) + + # Handle both single and batch responses + if isinstance(workflow_invocation_response, list): + raise Exception("Batch workflow invocation not supported in WES API") + + invocation = workflow_invocation_response.root + encoded_invocation_id = invocation.id + return self._invocation_id_to_run_id(encoded_invocation_id) + + def list_runs( + self, + trans: ProvidesUserContext, + page_size: int = 10, + page_token: Optional[str] = None, + ) -> RunListResponse: + """List workflow runs for the user with keyset pagination. + + Uses keyset pagination (cursor-based) for stable results even when + invocations are added/deleted between requests. + + Args: + trans: Galaxy transaction/context + page_size: Number of runs per page (default 10, max 100) + page_token: Keyset token encoding last seen invocation ID + + Returns: + RunListResponse with paginated list of runs and next_page_token if more results exist + """ + # Decode keyset token to get last seen ID + token = self._keyset_pagination.decode_token(page_token, token_class=SingleKeysetToken) + last_id = token.last_id if token else None + + # Build query with keyset filtering + query = trans.sa_session.query(WorkflowInvocation).join(History).where(History.user_id == trans.user.id) + + # Apply keyset filter if we have a cursor + if last_id is not None: + query = query.filter(WorkflowInvocation.id < last_id) + + # Order by ID desc and fetch page + 1 to detect more results + invocations = query.order_by(WorkflowInvocation.id.desc()).limit(page_size + 1).all() + + runs = [] + has_more = len(invocations) > page_size + for invocation in invocations[:page_size]: + run_summary = self._invocation_to_run_summary(invocation) + runs.append(run_summary) + + # Generate next_page_token from last item's ID + next_page_token = None + if has_more and invocations: + last_invocation = invocations[page_size - 1] + token = SingleKeysetToken(last_id=last_invocation.id) + next_page_token = self._keyset_pagination.encode_token(token) + + return RunListResponse(runs=runs, next_page_token=next_page_token) + + def get_run( + self, + trans: SessionRequestContext, + run_id: int, + ) -> RunLog: + """Get full details of a workflow run. + + Args: + trans: Galaxy transaction/context + run_id: The WES run ID (Galaxy invocation ID) + + Returns: + RunLog with complete run details and DRS URIs for outputs + """ + invocation = self._get_invocation(trans, run_id) + return self._invocation_to_run_log(trans, invocation) + + def get_run_status( + self, + trans: ProvidesUserContext, + run_id: int, + ) -> RunStatus: + """Get abbreviated status of a workflow run. + + Args: + trans: Galaxy transaction/context + run_id: The WES run ID (Galaxy invocation ID) + + Returns: + RunStatus with run ID and current state + """ + invocation = self._get_invocation(trans, run_id) + + return RunStatus( + run_id=self.security.encode_id(invocation.id), + state=GALAXY_TO_WES_STATE.get(invocation.state or "", State.UNKNOWN), + ) + + def cancel_run( + self, + trans: ProvidesUserContext, + run_id: int, + ) -> RunId: + """Cancel a running workflow. + + Args: + trans: Galaxy transaction/context + run_id: The WES run ID (Galaxy invocation ID) + + Returns: + RunId of the cancelled run + """ + invocation = self._get_invocation(trans, run_id) + + # Request cancellation through Galaxy's workflows manager + cancelled_invocation = self._workflows_service._workflows_manager.request_invocation_cancellation( + trans, invocation.id + ) + return self._invocation_to_run_id(cancelled_invocation) + + def _build_task_rows_query(self, invocation_id: int): + """Build UNION query for all task rows in an invocation. + + Returns a SQLAlchemy Select that produces columns: + - step_id: int + - step_order: int + - task_type: 'single', 'collection', or 'no_job' + - job_id: Optional[int] + - job_index: int + """ + # Subquery 1: Single job steps + single_jobs = select( + WorkflowInvocationStep.id.label("step_id"), + WorkflowInvocationStep.order_index.label("step_order"), + literal("single").label("task_type"), + WorkflowInvocationStep.job_id.label("job_id"), + literal(0).label("job_index"), + ).where( + WorkflowInvocationStep.workflow_invocation_id == invocation_id, + WorkflowInvocationStep.job_id.isnot(None), + ) + + # Subquery 2: Collection mapping job steps (expanded per job) + collection_jobs = ( + select( + WorkflowInvocationStep.id.label("step_id"), + WorkflowInvocationStep.order_index.label("step_order"), + literal("collection").label("task_type"), + ImplicitCollectionJobsJobAssociation.job_id.label("job_id"), + ImplicitCollectionJobsJobAssociation.order_index.label("job_index"), + ) + .join( + ImplicitCollectionJobs, WorkflowInvocationStep.implicit_collection_jobs_id == ImplicitCollectionJobs.id + ) + .join( + ImplicitCollectionJobsJobAssociation, + ImplicitCollectionJobs.id == ImplicitCollectionJobsJobAssociation.implicit_collection_jobs_id, + ) + .where( + WorkflowInvocationStep.workflow_invocation_id == invocation_id, + WorkflowInvocationStep.implicit_collection_jobs_id.isnot(None), + ) + ) + + # Subquery 3: No-job steps + no_jobs = select( + WorkflowInvocationStep.id.label("step_id"), + WorkflowInvocationStep.order_index.label("step_order"), + literal("no_job").label("task_type"), + literal(None).label("job_id"), + literal(0).label("job_index"), + ).where( + WorkflowInvocationStep.workflow_invocation_id == invocation_id, + WorkflowInvocationStep.job_id.is_(None), + WorkflowInvocationStep.implicit_collection_jobs_id.is_(None), + ) + + return union_all(single_jobs, collection_jobs, no_jobs) + + def _get_paginated_task_rows( + self, + trans: ProvidesUserContext, + invocation_id: int, + last_token: Optional[TaskKeysetToken], + limit: int, + ) -> List[dict]: + """Fetch paginated task rows using composite keyset pagination. + + Uses (step_order, job_index) as composite keyset for cursor-based pagination. + + Returns list of dicts with keys: step_id, step_order, task_type, job_id, job_index + """ + # Build UNION subquery + task_rows_subquery = self._build_task_rows_query(invocation_id).subquery() + + # Apply ordering and pagination + stmt = select( + task_rows_subquery.c.step_id, + task_rows_subquery.c.step_order, + task_rows_subquery.c.task_type, + task_rows_subquery.c.job_id, + task_rows_subquery.c.job_index, + ).order_by( + task_rows_subquery.c.step_order, + task_rows_subquery.c.job_index, + ) + + # Apply composite keyset filter if we have a cursor + if last_token is not None: + stmt = stmt.where( + tuple_( + task_rows_subquery.c.step_order, + task_rows_subquery.c.job_index, + ) + > tuple_(last_token.step_order, last_token.job_index) + ) + + stmt = stmt.limit(limit + 1) # Fetch one extra to detect more results + + result = trans.sa_session.execute(stmt) + return [dict(row._mapping) for row in result] + + def _load_task_objects( + self, + trans: ProvidesUserContext, + task_rows: List[dict], + ) -> tuple[dict, dict]: + """Batch load Step and Job objects for task rows. + + Returns: + - steps_by_id: {step_id: WorkflowInvocationStep} + - jobs_by_id: {job_id: Job} + """ + if not task_rows: + return {}, {} + + # Extract unique IDs + step_ids = {row["step_id"] for row in task_rows} + job_ids = {row["job_id"] for row in task_rows if row["job_id"] is not None} + + # Batch load steps with workflow_step relationship + steps = ( + trans.sa_session.query(WorkflowInvocationStep) + .options(joinedload(WorkflowInvocationStep.workflow_step)) + .filter(WorkflowInvocationStep.id.in_(step_ids)) + .all() + ) + steps_by_id = {step.id: step for step in steps} + + # Batch load jobs + jobs_by_id = {} + if job_ids: + jobs = trans.sa_session.query(Job).filter(Job.id.in_(job_ids)).all() + jobs_by_id = {job.id: job for job in jobs} + + return steps_by_id, jobs_by_id + + def _task_row_to_task_log( + self, + task_row: dict, + steps_by_id: dict, + jobs_by_id: dict, + ) -> TaskLog: + """Convert a task row dict to a TaskLog object. + + Task ID format: order_index or order_index.job_index for collection mapping jobs. + """ + step_id = task_row["step_id"] + step_order = task_row["step_order"] + job_id = task_row["job_id"] + job_index = task_row["job_index"] + task_type = task_row["task_type"] + + step = steps_by_id[step_id] + workflow_step = step.workflow_step + + # Generate task ID using step order_index + if task_type == "collection": + task_id = f"{step_order}.{job_index}" + else: + task_id = str(step_order) + + # Get step name + step_name = workflow_step.label or workflow_step.tool_id or f"step_{step.order_index}" + + # Build TaskLog with or without job details + if job_id is not None: + job = jobs_by_id[job_id] + return TaskLog( + id=task_id, + name=step_name, + start_time=job.create_time.isoformat() if job.create_time else None, + end_time=job.update_time.isoformat() if job.update_time else None, + stdout=f"/api/jobs/{self._security.encode_id(job.id)}/stdout", + stderr=f"/api/jobs/{self._security.encode_id(job.id)}/stderr", + exit_code=job.exit_code, + ) + else: + # No job - use step-level timing + return TaskLog( + id=task_id, + name=step_name, + start_time=step.create_time.isoformat() if step.create_time else None, + end_time=step.update_time.isoformat() if step.update_time else None, + ) + + def get_run_tasks( + self, + trans: ProvidesUserContext, + run_id: int, + page_size: int = 10, + page_token: Optional[str] = None, + ) -> TaskListResponse: + """Get paginated list of tasks for a workflow run. + + Uses composite keyset pagination via UNION query to avoid loading + all steps/jobs into memory and for cursor-based stability. + + Args: + trans: Galaxy transaction/context + run_id: The WES run ID (Galaxy invocation ID) + page_size: Number of tasks per page (default 10, max 100) + page_token: Token for pagination (composite keyset: step_order, job_index) + + Returns: + TaskListResponse with paginated tasks + """ + invocation = self._get_invocation(trans, run_id) + + # Decode composite keyset token + token = self._keyset_pagination.decode_token(page_token, token_class=TaskKeysetToken) + + # Fetch paginated task rows (+1 to detect more results) + task_rows = self._get_paginated_task_rows( + trans, + invocation.id, + token, + page_size, + ) + + # Check if more results exist + has_more = len(task_rows) > page_size + if has_more: + task_rows = task_rows[:page_size] + + # Batch load all needed objects + steps_by_id, jobs_by_id = self._load_task_objects(trans, task_rows) + + # Convert to TaskLog objects + task_logs = [self._task_row_to_task_log(row, steps_by_id, jobs_by_id) for row in task_rows] + + # Generate next page token + next_page_token = None + if has_more and task_rows: + last_row = task_rows[-1] + token = TaskKeysetToken(step_order=last_row["step_order"], job_index=last_row["job_index"]) + next_page_token = self._keyset_pagination.encode_token(token) + + return TaskListResponse( + task_logs=task_logs if task_logs else None, + next_page_token=next_page_token, + ) + + def get_run_task( + self, + trans: ProvidesUserContext, + run_id: int, + task_id: str, + ) -> TaskLog: + """Get details for a specific task by direct lookup. + + Parses task_id to extract step order_index and optional job_index, + then directly queries for that specific step/job. + + Args: + trans: Galaxy transaction/context + run_id: The WES run ID (Galaxy invocation ID) + task_id: Task ID - "{order_index}" or "{order_index}.{job_index}" for collection mapping jobs + + Returns: + TaskLog object for the specified task + + Raises: + exceptions.ObjectNotFound: If task not found + """ + invocation = self._get_invocation(trans, run_id) + + # Parse task_id: either "{order_index}" or "{order_index}.{job_index}" + parts = task_id.split(".") + try: + step_order = int(parts[0]) + job_index = int(parts[1]) if len(parts) > 1 else None + except (ValueError, IndexError): + raise exceptions.ObjectNotFound(f"Invalid task_id format: {task_id}") + + # Fetch the specific step by order_index + step = ( + trans.sa_session.query(WorkflowInvocationStep) + .options(joinedload(WorkflowInvocationStep.workflow_step)) + .filter( + WorkflowInvocationStep.order_index == step_order, + WorkflowInvocationStep.workflow_invocation_id == invocation.id, + ) + .one_or_none() + ) + + if not step: + raise exceptions.ObjectNotFound(f"Task {task_id} not found in run {run_id}") + + # Get step name + workflow_step = step.workflow_step + step_name = workflow_step.label or workflow_step.tool_id or f"step_{step.order_index}" + + # Handle different step types + if step.job_id: + # Single job step + if job_index is not None: + raise exceptions.ObjectNotFound(f"Task {task_id} specifies job_index but step has single job") + + job = trans.sa_session.query(Job).filter(Job.id == step.job_id).one() + + return TaskLog( + id=str(step_order), + name=step_name, + start_time=job.create_time.isoformat() if job.create_time else None, + end_time=job.update_time.isoformat() if job.update_time else None, + stdout=f"/api/jobs/{self._security.encode_id(job.id)}/stdout", + stderr=f"/api/jobs/{self._security.encode_id(job.id)}/stderr", + exit_code=job.exit_code, + ) + + elif step.implicit_collection_jobs_id: + # Collection mapping job step + if job_index is None: + raise exceptions.ObjectNotFound(f"Task {task_id} missing job_index for collection mapping job step") + + # Fetch specific job from collection by order_index + job_assoc = ( + trans.sa_session.query(ImplicitCollectionJobsJobAssociation) + .filter( + ImplicitCollectionJobsJobAssociation.implicit_collection_jobs_id + == step.implicit_collection_jobs_id, + ImplicitCollectionJobsJobAssociation.order_index == job_index, + ) + .one_or_none() + ) + + if not job_assoc: + raise exceptions.ObjectNotFound(f"Task {task_id} job not found at index {job_index}") + + job = trans.sa_session.query(Job).filter(Job.id == job_assoc.job_id).one() + + return TaskLog( + id=task_id, + name=step_name, + start_time=job.create_time.isoformat() if job.create_time else None, + end_time=job.update_time.isoformat() if job.update_time else None, + stdout=f"/api/jobs/{self._security.encode_id(job.id)}/stdout", + stderr=f"/api/jobs/{self._security.encode_id(job.id)}/stderr", + exit_code=job.exit_code, + ) + + else: + # No job step + if job_index is not None: + raise exceptions.ObjectNotFound(f"Task {task_id} specifies job_index but step has no jobs") + + return TaskLog( + id=str(step_order), + name=step_name, + start_time=step.create_time.isoformat() if step.create_time else None, + end_time=step.update_time.isoformat() if step.update_time else None, + ) + + def _invocation_to_run_id(self, invocation: WorkflowInvocation) -> RunId: + """Convert a WorkflowInvocation to a WES RunId.""" + return self._invocation_id_to_run_id(self._security.encode_id(invocation.id)) + + def _invocation_id_to_run_id(self, invocation_id: str) -> RunId: + """Convert a WorkflowInvocation ID to a WES RunId.""" + return RunId(run_id=invocation_id) + + def _get_invocation( + self, + trans: ProvidesUserContext, + run_id: int, + ) -> WorkflowInvocation: + """Get a workflow invocation by ID. + + Args: + trans: Galaxy transaction/context + run_id: The invocation ID (unencoded) + + Returns: + WorkflowInvocation object + + Raises: + exceptions.ObjectNotFound: If invocation not found + exceptions.AuthenticationRequired: If user cannot access invocation + """ + invocation = trans.sa_session.query(WorkflowInvocation).filter_by(id=run_id).one_or_none() + + if not invocation: + raise exceptions.ObjectNotFound(f"Invocation {run_id} not found") + + if invocation.history.user_id != trans.user.id: + raise exceptions.AuthenticationRequired(f"Invocation {run_id} not accessible") + + return invocation + + def _invocation_to_run_summary(self, invocation: WorkflowInvocation) -> RunSummary: + """Convert a Galaxy WorkflowInvocation to a WES RunSummary. + + Args: + invocation: Galaxy WorkflowInvocation object + + Returns: + WES RunSummary object + """ + return RunSummary( + run_id=self._security.encode_id(invocation.id), + state=GALAXY_TO_WES_STATE.get(invocation.state or "", State.UNKNOWN), + start_time=invocation.create_time.isoformat() if invocation.create_time else None, + end_time=invocation.update_time.isoformat() if invocation.update_time else None, + tags={}, + ) + + def _build_drs_uri(self, trans: SessionRequestContext, dataset_id: int) -> str: + """Build a DRS URI for a dataset. + + Args: + trans: Galaxy transaction/context + dataset_id: Database ID of the dataset + + Returns: + DRS URI string in format drs://{hostname}/datasets/{encoded_id} + """ + drs_object_id = f"hda-{trans.security.encode_id(dataset_id, kind='drs')}" + request_url = trans.request.url + drs_uri = f"drs://drs.{request_url.components.netloc}/{drs_object_id}" + return drs_uri + + def _invocation_to_run_log( + self, + trans: SessionRequestContext, + invocation: WorkflowInvocation, + original_request: Optional[RunRequest] = None, + ) -> RunLog: + """Convert a Galaxy WorkflowInvocation to a WES RunLog. + + Args: + trans: Galaxy transaction/context + invocation: Galaxy WorkflowInvocation object + original_request: Original RunRequest if available + + Returns: + WES RunLog object with outputs and DRS URIs + """ + # Get invocation outputs formatted as dict + invocation_dict = invocation.to_dict(view="element") + outputs_dict = {} + + # Add dataset outputs with DRS URIs + if "outputs" in invocation_dict: + for output_name, output_info in invocation_dict["outputs"].items(): + if isinstance(output_info, dict) and output_info.get("src") == "hda": + output_id = output_info.get("id") + if output_id: + output_id_encoded = trans.security.encode_id(output_id) + drs_uri = self._build_drs_uri(trans, output_id) + output_info["drs_uri"] = drs_uri + output_info["id"] = output_id_encoded + outputs_dict[output_name] = output_info + + # Add collection outputs with DRS URIs + if "output_collections" in invocation_dict: + for output_name, output_info in invocation_dict["output_collections"].items(): + if isinstance(output_info, dict) and output_info.get("src") == "hdca": + output_id = output_info.get("id") + if output_id: + output_id_encoded = trans.security.encode_id(output_id) + output_info["id"] = output_id_encoded + outputs_dict[output_name] = output_info + + # Add value outputs as-is + if "output_values" in invocation_dict: + outputs_dict.update(invocation_dict["output_values"]) + + # Build task logs URL + encoded_run_id = self._security.encode_id(invocation.id) + task_logs_url = f"/ga4gh/wes/v1/runs/{encoded_run_id}/tasks" + + # This object has fields related to the workflow run but most of them are geared + # toward CLI environments - I don't think they make a lot of sense in terms of Galaxy. + run_log = None + + # Build the RunLog + # Note: task_logs field is deprecated per WES spec - use task_logs_url instead + return RunLog( + run_id=encoded_run_id, + request=original_request, # We don't save this so we often can't recover this and that is probably fine. - John + state=GALAXY_TO_WES_STATE.get(invocation.state or "", State.UNKNOWN), + run_log=run_log, + task_logs=None, # Deprecated field - use task_logs_url + task_logs_url=task_logs_url, + outputs=outputs_dict if outputs_dict else None, + ) diff --git a/lib/galaxy/work/context.py b/lib/galaxy/work/context.py index 67aae7c9f2fb..b6b5483aca44 100644 --- a/lib/galaxy/work/context.py +++ b/lib/galaxy/work/context.py @@ -5,6 +5,7 @@ TYPE_CHECKING, ) +from starlette.datastructures import URL from typing_extensions import Literal from galaxy.managers.context import ProvidesHistoryContext @@ -106,6 +107,11 @@ def is_secure(self) -> bool: def get_cookie(self, name): """Return cookie.""" + @property + @abc.abstractmethod + def url(self) -> URL: + """Full URL of the request.""" + class GalaxyAbstractResponse: """Abstract interface to provide access to some response utilities.""" diff --git a/lib/galaxy_test/api/test_wes.py b/lib/galaxy_test/api/test_wes.py new file mode 100644 index 000000000000..2d5d3f5ab08a --- /dev/null +++ b/lib/galaxy_test/api/test_wes.py @@ -0,0 +1,932 @@ +"""Tests for GA4GH Workflow Execution Service (WES) API endpoints.""" + +import base64 +import io +import json +from typing import ( + Any, + Optional, +) +from urllib.parse import urljoin +from uuid import uuid4 + +import pytest +import requests + +from galaxy_test.base.api_asserts import assert_status_code_is +from galaxy_test.base.workflow_fixtures import ( + WORKFLOW_NESTED_OUTPUT, + WORKFLOW_WITH_MAPPED_OUTPUT_COLLECTION, +) +from .test_workflows import BaseWorkflowsApiTestCase + +WORKFLOW_SIMPLE = """ +class: GalaxyWorkflow +name: Simple Workflow +inputs: + input1: data +outputs: + wf_output_1: + outputSource: first_cat/out_file1 +steps: + first_cat: + tool_id: cat1 + in: + input1: input1 +""" + + +def workflow_types_parametrize(func): + """Decorator to parametrize test with both Galaxy workflow formats.""" + return pytest.mark.parametrize("workflow_type", ["gx_workflow_ga", "gx_workflow_format2"])(func) + + +class TestWesApi(BaseWorkflowsApiTestCase): + """Test GA4GH Workflow Execution Service (WES) API endpoints.""" + + def test_wes_service_info(self): + """Test GET /ga4gh/wes/v1/service-info returns expected service info.""" + response = self._wes_get("ga4gh/wes/v1/service-info", authenticated=False) + self._assert_status_code_is(response, 200) + service_info = response.json() + + # Validate structure + assert "id" in service_info + assert "name" in service_info + assert "type" in service_info + assert "version" in service_info + + # Validate WES-specific fields + assert "supported_wes_versions" in service_info + assert "supported_filesystem_protocols" in service_info + assert "workflow_type_versions" in service_info + assert "default_workflow_engine_parameters" in service_info + assert "auth_instructions_url" in service_info + + # Validate supported workflow types + wf_types = service_info["workflow_type_versions"] + assert len(wf_types) > 0 + type_names = set(wf_types.keys()) + assert "gx_workflow_ga" in type_names + assert "gx_workflow_format2" in type_names + + # Validate default engine parameters + params = service_info["default_workflow_engine_parameters"] + assert len(params) > 0 + param_names = {p["name"] for p in params} + assert "history_name" in param_names + assert "history_id" in param_names + assert "preferred_object_store_id" in param_names + assert "use_cached_job" in param_names + + def test_wes_submit_run_with_workflow_url(self): + """Test POST /ga4gh/wes/v1/runs with workflow_url using base64:// URI.""" + # Create base64:// URI for workflow content + with self.dataset_populator.test_history() as history_id: + workflow_b64 = base64.b64encode(WORKFLOW_SIMPLE.encode("utf-8")).decode("utf-8") + workflow_url = f"base64://{workflow_b64}" + + # Submit run with workflow_url + data = { + "workflow_type": "gx_workflow_format2", + "workflow_type_version": "v1", + "workflow_params": json.dumps({"input1": self._get_test_dataset_id(history_id)}), + "workflow_url": workflow_url, + } + + response = self._wes_post("ga4gh/wes/v1/runs", data=data) + self._assert_status_code_is(response, 200) + result = response.json() + + # Validate response + assert "run_id" in result + assert result["run_id"] is not None + + def test_wes_submit_run_with_workflow_attachment(self): + """Test POST /ga4gh/wes/v1/runs with workflow_attachment.""" + with self.dataset_populator.test_history() as history_id: + response = self._submit_wes_workflow( + workflow_content=WORKFLOW_SIMPLE, + workflow_type="gx_workflow_ga", + input1=self._get_test_dataset_id(history_id), + ) + self._assert_status_code_is(response, 200) + result = response.json() + + # Validate response + assert "run_id" in result + assert result["run_id"] is not None + + def test_wes_submit_run_with_engine_parameters(self): + """Test workflow_engine_parameters handling (history_name, history_id, etc.).""" + # Test with custom history_name + custom_history_name = f"WES Test History {uuid4()}" + with self.dataset_populator.test_history() as history_id: + response = self._submit_wes_workflow( + workflow_content=WORKFLOW_SIMPLE, + workflow_type="gx_workflow_format2", + engine_parameters={"history_name": custom_history_name}, + input1=self._get_test_dataset_id(history_id), + ) + self._assert_status_code_is(response, 200) + result = response.json() + run_id = result["run_id"] + + # Get run details and verify history was created with custom name + run_details = self._get_run_details(run_id) + assert run_details is not None + + def test_wes_submit_run_format2_workflow(self): + """Test workflow submission with Format2 (CWL-style) workflow.""" + format2_workflow = """class: GalaxyWorkflow +inputs: + input1: data +outputs: + output1: + outputSource: first_cat/out_file1 +steps: + first_cat: + tool_id: cat1 + in: + input1: input1 +""" + + with self.dataset_populator.test_history() as history_id: + dataset_id = self._get_test_dataset_id(history_id) + response = self._submit_wes_workflow( + workflow_content=format2_workflow, + workflow_type="gx_workflow_format2", + input1=dataset_id, + ) + self._assert_status_code_is(response, 200) + result = response.json() + assert "run_id" in result + + def test_wes_list_runs(self): + """Test GET /ga4gh/wes/v1/runs returns paginated list of runs.""" + with self.dataset_populator.test_history() as history_id: + dataset_id = self._get_test_dataset_id(history_id) + + # Submit multiple runs + run_ids = [] + for _ in range(3): + response = self._submit_wes_workflow(input1=dataset_id) + self._assert_status_code_is(response, 200) + run_ids.append(response.json()["run_id"]) + + # List runs + response = self._wes_get("ga4gh/wes/v1/runs", params={"page_size": 10}) + self._assert_status_code_is(response, 200) + result = response.json() + + # Validate structure + assert "runs" in result + assert isinstance(result["runs"], list) + assert len(result["runs"]) >= 1 + + # Validate run summaries + for run_summary in result["runs"]: + assert "run_id" in run_summary + assert "state" in run_summary + assert "start_time" in run_summary or run_summary["start_time"] is None + + def test_wes_list_runs_pagination(self): + """Test pagination with next_page_token follows correctly.""" + + with self.dataset_populator.test_history() as history_id: + dataset_id = self._get_test_dataset_id(history_id) + + # Submit 3 runs and collect their IDs + submitted_ids = [] + for _ in range(3): + response = self._submit_wes_workflow( + workflow_content=WORKFLOW_SIMPLE, + workflow_type="gx_workflow_format2", + input1=dataset_id, + ) + self._assert_status_code_is(response, 200) + submitted_ids.append(response.json()["run_id"]) + + # Test page_size=1: should get 3 pages + all_ids_page1 = [] + response = self._wes_get("ga4gh/wes/v1/runs", params={"page_size": 1}) + self._assert_status_code_is(response, 200) + result = response.json() + assert len(result["runs"]) == 1 + all_ids_page1.append(result["runs"][0]["run_id"]) + assert result["next_page_token"] is not None + + # Follow to page 2 + response = self._wes_get( + "ga4gh/wes/v1/runs", params={"page_size": 1, "page_token": result["next_page_token"]} + ) + self._assert_status_code_is(response, 200) + result = response.json() + assert len(result["runs"]) == 1 + all_ids_page1.append(result["runs"][0]["run_id"]) + assert result["next_page_token"] is not None + + # Follow to page 3 + response = self._wes_get( + "ga4gh/wes/v1/runs", params={"page_size": 1, "page_token": result["next_page_token"]} + ) + self._assert_status_code_is(response, 200) + result = response.json() + assert len(result["runs"]) == 1 + all_ids_page1.append(result["runs"][0]["run_id"]) + + # Verify no duplicates and all submitted runs found + assert len(all_ids_page1) == len(set(all_ids_page1)), "Duplicate run_ids in pagination" + for run_id in submitted_ids: + assert run_id in all_ids_page1, f"Submitted run {run_id} not found in paginated results" + + # Test page_size=2: should get 2 pages (2 + 1) + all_ids_page2 = [] + response = self._wes_get("ga4gh/wes/v1/runs", params={"page_size": 2}) + self._assert_status_code_is(response, 200) + result = response.json() + assert len(result["runs"]) == 2 + all_ids_page2.extend([r["run_id"] for r in result["runs"]]) + assert result["next_page_token"] is not None + + # Follow to page 2 (should have 1 item) + response = self._wes_get( + "ga4gh/wes/v1/runs", params={"page_size": 2, "page_token": result["next_page_token"]} + ) + self._assert_status_code_is(response, 200) + result = response.json() + assert len(result["runs"]) >= 1 + all_ids_page2.extend([r["run_id"] for r in result["runs"]]) + + # Verify no duplicates + assert len(set(all_ids_page2)) == len(all_ids_page2), "Duplicate run_ids in pagination" + for run_id in submitted_ids: + assert run_id in all_ids_page2, f"Submitted run {run_id} not found in paginated results" + + def test_wes_get_run(self): + """Test GET /ga4gh/wes/v1/runs/{run_id} returns full run details.""" + with self.dataset_populator.test_history() as history_id: + invocation_id = self._submit_wes_workflow_and_get_invocation_id( + workflow_content=WORKFLOW_SIMPLE, + workflow_type="gx_workflow_ga", + input1=self._get_test_dataset_id(history_id), + ) + + # Get run details via WES API + response = self._wes_get(f"ga4gh/wes/v1/runs/{invocation_id}") + self._assert_status_code_is(response, 200) + run_log = response.json() + + # Validate RunLog structure + assert "run_id" in run_log + assert run_log["run_id"] == invocation_id + assert "state" in run_log + assert "request" in run_log + assert "outputs" in run_log + + def test_wes_get_run_outputs_drs_uris(self): + """Test that outputs in GET /ga4gh/wes/v1/runs/{run_id} have DRS URIs.""" + with self.dataset_populator.test_history() as history_id: + invocation_id = self._submit_wes_workflow_and_get_invocation_id( + workflow_content=WORKFLOW_SIMPLE, + workflow_type="gx_workflow_ga", + input1=self._get_test_dataset_id(history_id), + ) + + # Get run details + response = self._wes_get(f"ga4gh/wes/v1/runs/{invocation_id}") + self._assert_status_code_is(response, 200) + run_log = response.json() + + # Validate outputs have DRS URIs + outputs = run_log["outputs"] + if outputs: + for _, output_value in outputs.items(): + # Output values should be DRS URIs for datasets/collections + if isinstance(output_value, str) and output_value.startswith("drs://"): + assert "/datasets/" in output_value or "/collections/" in output_value + + def test_wes_get_run_status(self): + """Test GET /ga4gh/wes/v1/runs/{run_id}/status returns abbreviated status.""" + with self.dataset_populator.test_history() as history_id: + invocation_id = self._submit_wes_workflow_and_get_invocation_id( + workflow_content=WORKFLOW_SIMPLE, + workflow_type="gx_workflow_format2", + input1=self._get_test_dataset_id(history_id), + ) + + # Get run status + status = self._get_run_status_validated(invocation_id) + + # Validate RunStatus structure + assert "run_id" in status + assert status["run_id"] == invocation_id + assert "state" in status + + # State should be a valid WES state + valid_states = [ + "QUEUED", + "INITIALIZING", + "RUNNING", + "PAUSED", + "COMPLETE", + "EXECUTOR_ERROR", + "SYSTEM_ERROR", + "CANCELED", + "CANCELING", + ] + assert status["state"] in valid_states + + def test_wes_cancel_run(self): + """Test POST /ga4gh/wes/v1/runs/{run_id}/cancel cancels workflow.""" + with self.dataset_populator.test_history() as history_id: + invocation_id = self._submit_wes_workflow_and_get_invocation_id( + workflow_content=WORKFLOW_SIMPLE, + workflow_type="gx_workflow_format2", + input1=self._get_test_dataset_id(history_id), + ) + + # Cancel run + response = self._wes_post(f"ga4gh/wes/v1/runs/{invocation_id}/cancel") + self._assert_status_code_is(response, 200) + result = response.json() + + # Validate response + assert "run_id" in result + assert result["run_id"] == invocation_id + + def test_wes_cancel_run_completed_workflow(self): + """Test cancelling a completed workflow returns appropriate state.""" + with self.dataset_populator.test_history() as history_id: + invocation_id = self._submit_wes_workflow_and_get_invocation_id( + workflow_content=WORKFLOW_SIMPLE, + workflow_type="gx_workflow_format2", + input1=self._get_test_dataset_id(history_id), + history_id=history_id, + ) + + self.workflow_populator.wait_for_invocation_and_jobs(history_id, None, invocation_id, assert_ok=True) + + # Verify workflow is complete + invocation = self.workflow_populator.get_invocation(invocation_id) + assert invocation["state"] == "scheduled" + + # Try to cancel (should succeed but workflow already complete) + response = self._wes_post(f"ga4gh/wes/v1/runs/{invocation_id}/cancel") + self._assert_status_code_is(response, 200) + + def test_wes_state_mapping(self): + """Test state mapping between Galaxy invocation states and WES states.""" + with self.dataset_populator.test_history() as history_id: + # Run workflow + invocation_id = self._submit_wes_workflow_and_get_invocation_id( + workflow_content=WORKFLOW_SIMPLE, + workflow_type="gx_workflow_format2", + input1=self._get_test_dataset_id(history_id), + history_id=history_id, + ) + + # Get status via WES API + status = self._get_run_status_validated(invocation_id) + + # Verify state is properly mapped + # After waiting for completion, should be COMPLETE or similar + assert status["state"] in [ + "QUEUED", + "INITIALIZING", + "RUNNING", + "PAUSED", + "COMPLETE", + "EXECUTOR_ERROR", + "SYSTEM_ERROR", + "CANCELED", + "CANCELING", + ] + + def test_wes_error_handling_missing_workflow_type(self): + """Test error handling when workflow_type is missing.""" + with self.dataset_populator.test_history() as history_id: + dataset_id = self._get_test_dataset_id(history_id) + + data = { + "workflow_type_version": "v1", + "workflow_params": json.dumps({"input1": dataset_id}), + } + + response = self._wes_post("ga4gh/wes/v1/runs", data=data) + # Should fail due to missing required field + assert response.status_code in [400, 422] + + def test_wes_error_handling_nonexistent_run(self): + """Test error handling for nonexistent run ID.""" + fake_run_id = str(uuid4()) + + response = self._wes_get(f"ga4gh/wes/v1/runs/{fake_run_id}") + assert response.status_code in [404, 400] + + @workflow_types_parametrize + def test_wes_workflow_with_inputs_and_outputs(self, workflow_type: str): + """Test workflow submission and retrieval with complex inputs/outputs.""" + workflow_yaml = """class: GalaxyWorkflow +inputs: + input1: data + input2: data +outputs: + output1: + outputSource: cat_tool/out_file1 +steps: + cat_tool: + tool_id: cat + in: + input1: input1 + input2: input2 +""" + with self.dataset_populator.test_history() as history_id: + # Get test datasets + dataset1 = self.dataset_populator.new_dataset(history_id, content="input1 content") + dataset2 = self.dataset_populator.new_dataset(history_id, content="input2 content") + + # Run workflow with two inputs + inputs_dict = { + "input1": self._ds_entry(dataset1), + "input2": self._ds_entry(dataset2), + } + + response = self._submit_wes_workflow( + workflow_content=workflow_yaml, + workflow_type=workflow_type, + history_id=history_id, + **inputs_dict, + ) + + self._assert_status_code_is(response, 200) + result = response.json() + assert "run_id" in result + invocation_id = result["run_id"] + + self.workflow_populator.wait_for_invocation_and_jobs(history_id, None, invocation_id, assert_ok=True) + + # Get run details + response = self._wes_get(f"ga4gh/wes/v1/runs/{invocation_id}") + self._assert_status_code_is(response, 200) + run_log = response.json() + + # Validate outputs + assert "outputs" in run_log + assert isinstance(run_log["outputs"], dict) + + def test_wes_cannot_run_against_other_users_history(self): + """Test that WES prevents submitting workflows to other users' histories.""" + # Create workflow with current user + with self.dataset_populator.test_history() as my_history_id: + dataset_id = self._get_test_dataset_id(my_history_id) + + # Create history with different user + with self._different_user(): + other_history_id = self.dataset_populator.new_history() + + # Try to submit workflow using the other user's history via WES + # This should fail with 404 (history not found or not accessible) + response = self._submit_wes_workflow( + workflow_content=WORKFLOW_SIMPLE, + workflow_type="gx_workflow_ga", + history_id=other_history_id, + input1=dataset_id, + ) + self._assert_status_code_is(response, 404) + + def test_wes_submit_run_with_gxworkflow_uri(self): + """Test submitting workflow using gxworkflow:// URI scheme.""" + with self.dataset_populator.test_history() as history_id: + dataset_id = self._get_test_dataset_id(history_id) + + # First, create and upload a workflow to get its ID + workflow_id = self._upload_yaml_workflow(WORKFLOW_SIMPLE) + + # Construct gxworkflow:// URI (without instance parameter, defaults to false) + workflow_uri = f"gxworkflow://{workflow_id}" + + # Submit workflow using the gxworkflow:// URI + data = { + "workflow_type": "gx_workflow_ga", + "workflow_type_version": "v1", + "workflow_params": json.dumps({"input1": dataset_id}), + "workflow_url": workflow_uri, + } + + response = self._wes_post("ga4gh/wes/v1/runs", data=data) + self._assert_status_code_is(response, 200) + result = response.json() + + # Validate response + assert "run_id" in result + assert result["run_id"] is not None + + # Verify we can retrieve the run + run_id = result["run_id"] + response = self._wes_get(f"ga4gh/wes/v1/runs/{run_id}") + self._assert_status_code_is(response, 200) + + def test_wes_submit_run_with_gxworkflow_uri_with_instance_param(self): + """Test gxworkflow:// URI with instance=true parameter.""" + with self.dataset_populator.test_history() as history_id: + dataset_id = self._get_test_dataset_id(history_id) + + # Upload a workflow to get its ID + workflow_id = self._upload_yaml_workflow(WORKFLOW_SIMPLE) + latest_instance_id = self._latest_instance_id(workflow_id, history_id) + + # Construct gxworkflow:// URI with instance=true + workflow_uri = f"gxworkflow://{latest_instance_id}?instance=true" + + # Submit workflow using the gxworkflow:// URI + data = { + "workflow_type": "gx_workflow_ga", + "workflow_type_version": "v1", + "workflow_params": json.dumps({"input1": dataset_id}), + "workflow_url": workflow_uri, + } + + response = self._wes_post("ga4gh/wes/v1/runs", data=data) + self._assert_status_code_is(response, 200) + run_id = response.json()["run_id"] + + # Validate response + assert run_id is not None + + def test_wes_job_stdout_endpoint(self): + """Test /api/jobs/{job_id}/stdout endpoint returns job stdout.""" + with self.dataset_populator.test_history() as history_id: + self._run_simple_workflow_in_history(history_id) + + # Get job IDs from the job database + jobs = self.dataset_populator.history_jobs(history_id) + assert len(jobs) > 0 + + job_id = jobs[0]["id"] + + # Test stdout endpoint + stdout_content = self._get_job_output_content(job_id, "stdout") + assert stdout_content is not None + + def test_wes_job_stderr_endpoint(self): + """Test /api/jobs/{job_id}/stderr endpoint returns job stderr.""" + with self.dataset_populator.test_history() as history_id: + self._run_simple_workflow_in_history(history_id) + + # Get job IDs + jobs = self.dataset_populator.history_jobs(history_id) + assert len(jobs) > 0 + + job_id = jobs[0]["id"] + + # Test stderr endpoint + stderr_content = self._get_job_output_content(job_id, "stderr") + assert stderr_content is not None + + def test_wes_get_run_task_logs_in_run_log(self): + """Test that RunLog has task_logs_url (task_logs deprecated per WES spec).""" + with self.dataset_populator.test_history() as history_id: + invocation_id, dataset_id = self._run_simple_workflow_in_history(history_id) + + # Get run details + run_log = self._wes_get_and_validate(f"ga4gh/wes/v1/runs/{invocation_id}") + + # Verify task_logs_url is present (task_logs field is deprecated) + assert "task_logs_url" in run_log + assert run_log["task_logs_url"] is not None + assert "/tasks" in run_log["task_logs_url"] + + # Verify task_logs is None per WES spec deprecation + assert "task_logs" in run_log + assert run_log["task_logs"] is None + + def test_wes_get_run_tasks_list(self): + """Test /runs/{run_id}/tasks endpoint returns list of tasks.""" + with self.dataset_populator.test_history() as history_id: + invocation_id, _ = self._run_simple_workflow_in_history(history_id) + + # Get tasks via endpoint + task_list_response = self._wes_get_and_validate(f"ga4gh/wes/v1/runs/{invocation_id}/tasks") + + # Verify structure + assert "task_logs" in task_list_response + assert "next_page_token" in task_list_response + assert task_list_response["task_logs"] is not None + + # Verify task list + task_logs = task_list_response["task_logs"] + assert isinstance(task_logs, list) + assert len(task_logs) > 0 + + def test_wes_get_run_task_detail(self): + """Test /runs/{run_id}/tasks/{task_id} endpoint returns task details.""" + with self.dataset_populator.test_history() as history_id: + invocation_id, _ = self._run_simple_workflow_in_history(history_id) + + # Get task ID + task_id = self._get_task_list_and_extract_first_id(invocation_id) + + # Get specific task + task = self._wes_get_and_validate(f"ga4gh/wes/v1/runs/{invocation_id}/tasks/{task_id}") + + # Verify task details + assert task["id"] == task_id + assert "name" in task + + def test_wes_get_run_task_not_found(self): + """Test 404 when requesting nonexistent task.""" + with self.dataset_populator.test_history() as history_id: + invocation_id, _ = self._run_simple_workflow_in_history(history_id) + + # Request nonexistent task + response = self._wes_get(f"ga4gh/wes/v1/runs/{invocation_id}/tasks/999999") + assert response.status_code == 404 + + @workflow_types_parametrize + def test_wes_run_workflow_with_subworkflow_and_validate_output_content(self, workflow_type: str): + """Test running subworkflow via WES and validating output content using workflow outputs.""" + with self.dataset_populator.test_history() as history_id: + dataset = self.dataset_populator.new_dataset_from_test_data( + history_id, self.test_data_resolver, "1.bed", "bed" + ) + + # Submit workflow via WES + invocation_id = self._submit_wes_workflow_and_get_invocation_id( + workflow_content=WORKFLOW_NESTED_OUTPUT, + workflow_type=workflow_type, + history_id=history_id, + outer_input=self._ds_entry(dataset), + ) + + # Wait for completion + self.workflow_populator.wait_for_invocation_and_jobs(history_id, None, invocation_id, assert_ok=True) + + # Get run details from WES API + run_log = self._get_run_details(invocation_id) + + # Check that outputs are present + assert "outputs" in run_log + outputs = run_log["outputs"] + assert isinstance(outputs, dict) + + assert len(outputs) == 2 + assert "nested_output" in outputs + assert "outer_output" in outputs + + outer_output = outputs["outer_output"] + nested_output = outputs["nested_output"] + + assert outer_output + assert nested_output + + content = self.dataset_populator.get_history_dataset_content(history_id, dataset=outer_output) + assert ( + content + == "chrX\t152691446\t152691471\tCCDS14735.1_cds_0_0_chrX_152691447_f\t0\t+\nchrX\t152691446\t152691471\tCCDS14735.1_cds_0_0_chrX_152691447_f\t0\t+\n" + ) + + @workflow_types_parametrize + def test_wes_run_workflow_with_mapped_output_and_validate_output_content(self, workflow_type: str): + """Test running workflows with mapping via WES and validating output content using workflow outputs.""" + + with self.dataset_populator.test_history() as history_id: + fetch_response = self.dataset_collection_populator.create_list_in_history(history_id, wait=True) + hdca1 = self.dataset_collection_populator.wait_for_fetched_collection(fetch_response) + + # Submit workflow via WES + invocation_id = self._submit_wes_workflow_and_get_invocation_id( + workflow_content=WORKFLOW_WITH_MAPPED_OUTPUT_COLLECTION, + workflow_type=workflow_type, + history_id=history_id, + input1=self._ds_entry(hdca1), + ) + assert invocation_id is not None + + # Wait for completion + self.workflow_populator.wait_for_invocation_and_jobs(history_id, None, invocation_id, assert_ok=True) + + # Get tasks via endpoint + task_list_response = self._wes_get_and_validate(f"ga4gh/wes/v1/runs/{invocation_id}/tasks") + task_logs = task_list_response["task_logs"] + + # Expect 4 tasks: input step (0) + 3 collection mapping jobs (1.0, 1.1, 1.2) + assert len(task_logs) == 4 + task_ids = {t["id"] for t in task_logs} + assert task_ids == {"0", "1.0", "1.1", "1.2"} + + # Test getting details for simple task ID (input step) + input_task = self._wes_get_and_validate(f"ga4gh/wes/v1/runs/{invocation_id}/tasks/0") + assert input_task["id"] == "0" + assert "name" in input_task + + # Test getting details for collection mapping job task IDs + for job_index in range(3): + task_id = f"1.{job_index}" + task = self._wes_get_and_validate(f"ga4gh/wes/v1/runs/{invocation_id}/tasks/{task_id}") + assert task["id"] == task_id + assert "name" in task + assert "stdout" in task + assert "stderr" in task + + # Get run details from WES API + run_log = self._get_run_details(invocation_id) + + # Check that outputs are present + assert "outputs" in run_log + outputs = run_log["outputs"] + assert isinstance(outputs, dict) + assert "wf_output_1" in outputs + + wf_output_1 = outputs["wf_output_1"] + assert isinstance(wf_output_1, dict) + assert wf_output_1["src"] == "hdca" + dataset_collection = self.dataset_populator.get_history_collection_details( + history_id, content_id=wf_output_1["id"] + ) + assert dataset_collection + + def _url_join(self, suffix: str) -> str: + """Join a suffix with the base URL (not /api/ prefixed for GA4GH endpoints).""" + return urljoin(self.url, suffix) + + def _get_test_dataset_id(self, history_id: str) -> dict[str, str]: + """Get a test dataset ID for workflow inputs.""" + dataset = self.dataset_populator.new_dataset(history_id, content="test data") + return self._ds_entry(dataset) + + def _wes_post(self, endpoint: str, authenticated: bool = True, **kwargs: Any) -> requests.Response: + """Make POST request to WES API endpoint. + + Args: + endpoint: API endpoint path + authenticated: If True, includes API key header (default: True) + **kwargs: Additional arguments to pass to requests.post() + """ + api_url = self._url_join(endpoint) + headers = kwargs.pop("headers", {}) + if authenticated: + headers["x-api-key"] = self.galaxy_interactor.api_key + return requests.post(api_url, headers=headers, **kwargs) + + def _wes_get(self, endpoint: str, authenticated: bool = True, **kwargs: Any) -> requests.Response: + """Make GET request to WES API endpoint. + + Args: + endpoint: API endpoint path + authenticated: If True, includes API key header (default: True) + **kwargs: Additional arguments to pass to requests.get() + """ + api_url = self._url_join(endpoint) + headers = kwargs.pop("headers", {}) + if authenticated: + headers["x-api-key"] = self.galaxy_interactor.api_key + return requests.get(api_url, headers=headers, **kwargs) + + def _submit_wes_workflow( + self, + workflow_content: Optional[str] = None, + workflow_type: str = "gx_workflow_ga", + workflow_type_version: str = "v1", + engine_parameters: Optional[dict[str, Any]] = None, + history_id: Optional[str] = None, + **workflow_inputs: Any, + ) -> requests.Response: + """Helper to submit WES workflow with standard setup. + + Args: + workflow_content: YAML workflow definition (defaults to WORKFLOW_SIMPLE) + workflow_type: Workflow type (e.g., "gx_workflow_ga") + workflow_type_version: Workflow type version (default "v1") + engine_parameters: Optional dict of workflow engine parameters + **workflow_inputs: Keyword args become workflow parameters (e.g., input1=dataset_id) + + Returns: + Response object from submission + """ + if workflow_content is None: + workflow_content = WORKFLOW_SIMPLE + + # Build workflow params from inputs + params = {k: v for k, v in workflow_inputs.items() if v is not None} + + data: dict[str, str] = { + "workflow_type": workflow_type, + "workflow_type_version": workflow_type_version, + "workflow_params": json.dumps(params), + } + + if history_id: + if engine_parameters is None: + engine_parameters = {} + engine_parameters["history_id"] = history_id + if engine_parameters: + data["workflow_engine_parameters"] = json.dumps(engine_parameters) + + # For GA format, upload/download to normalize; for format2, pass directly + if workflow_type == "gx_workflow_ga": + workflow_id = self._upload_yaml_workflow(workflow_content) + workflow_dict = self.workflow_populator.download_workflow(workflow_id) + attachment_content = json.dumps(workflow_dict).encode("utf-8") + else: + # For format2 and other non-GA types, encode directly without conversion + attachment_content = workflow_content.encode("utf-8") + + files: dict[str, io.BytesIO] = {"workflow_attachment": io.BytesIO(attachment_content)} + return self._wes_post("ga4gh/wes/v1/runs", files=files, data=data) + + def _submit_wes_workflow_and_get_invocation_id( + self, + workflow_content: Optional[str] = None, + workflow_type: str = "gx_workflow_ga", + workflow_type_version: str = "v1", + engine_parameters: Optional[dict[str, Any]] = None, + history_id: Optional[str] = None, + **workflow_inputs: Any, + ) -> str: + response = self._submit_wes_workflow( + workflow_content=workflow_content, + workflow_type=workflow_type, + workflow_type_version=workflow_type_version, + engine_parameters=engine_parameters, + history_id=history_id, + **workflow_inputs, + ) + self._assert_status_code_is(response, 200) + invocation = response.json() + assert "run_id" in invocation + return invocation["run_id"] + + def _get_run_details(self, run_id: str) -> dict[str, Any]: + """Helper to get run details from WES API.""" + response = self._wes_get(f"ga4gh/wes/v1/runs/{run_id}") + assert_status_code_is(response, 200) + return response.json() + + def _wes_get_and_validate(self, endpoint: str) -> dict[str, Any]: + """Helper: GET WES endpoint, validate 200 response, return JSON. + + Args: + endpoint: WES API endpoint path + + Returns: + Parsed JSON response + + Raises: + AssertionError: If response status is not 200 + """ + response = self._wes_get(endpoint) + self._assert_status_code_is(response, 200) + return response.json() + + def _run_simple_workflow_in_history(self, history_id: str) -> tuple[str, dict[str, str]]: + """Helper: Submit WORKFLOW_SIMPLE and wait for completion in given history. + + Args: + history_id: History ID to run workflow in + + Returns: + Tuple of (invocation_id, dataset_entry) + """ + dataset_id = self._get_test_dataset_id(history_id) + invocation_id = self._submit_wes_workflow_and_get_invocation_id( + workflow_content=WORKFLOW_SIMPLE, + workflow_type="gx_workflow_ga", + history_id=history_id, + input1=dataset_id, + ) + self.workflow_populator.wait_for_invocation_and_jobs(history_id, None, invocation_id, assert_ok=True) + return invocation_id, dataset_id + + def _get_run_status_validated(self, run_id: str) -> dict[str, Any]: + """GET run status, validate 200 response, return status dict.""" + response = self._wes_get(f"ga4gh/wes/v1/runs/{run_id}/status") + self._assert_status_code_is(response, 200) + return response.json() + + def _get_task_list_and_extract_first_id(self, run_id: str) -> str: + """GET task list, validate 200, extract and return first task ID.""" + task_list_response = self._wes_get_and_validate(f"ga4gh/wes/v1/runs/{run_id}/tasks") + task_list = task_list_response["task_logs"] + assert len(task_list) > 0, "No tasks found in task list" + return task_list[0]["id"] + + def _get_job_output_content(self, job_id: str, output_type: str = "stdout") -> str: + """GET job stdout/stderr, validate 200 response, return content. + + Args: + job_id: Job ID + output_type: "stdout" or "stderr" (default: "stdout") + """ + api_key = self.galaxy_interactor.api_key + headers = {} + if api_key: + headers["x-api-key"] = api_key + + endpoint = f"api/jobs/{job_id}/{output_type}" + response = requests.get( + urljoin(self.url, endpoint), + headers=headers, + ) + self._assert_status_code_is(response, 200) + return response.text diff --git a/lib/galaxy_test/api/test_workflows.py b/lib/galaxy_test/api/test_workflows.py index fd93c0ca6061..0089f483abcd 100644 --- a/lib/galaxy_test/api/test_workflows.py +++ b/lib/galaxy_test/api/test_workflows.py @@ -248,6 +248,17 @@ def _assert_is_runtime_input(self, tool_state_value): assert "__class__" in tool_state_value assert tool_state_value["__class__"] == "RuntimeValue" + def _show_workflow(self, workflow_id): + show_response = self._get(f"workflows/{workflow_id}") + self._assert_status_code_is(show_response, 200) + return show_response.json() + + def _latest_instance_id(self, workflow_id: str, history_id: Optional[str] = None) -> str: + # Get latest version, to get latest instance id and confirm the name has changed + latest_download = self._download_workflow(workflow_id, style="run", history_id=history_id) + latest_instance_id = latest_download["workflow_id"] + return latest_instance_id + class ChangeDatatypeTests: dataset_populator: DatasetPopulator @@ -8544,11 +8555,6 @@ def __import_workflow(self, workflow_id, deprecated_route=False): ) return self._post(route, import_data) - def _show_workflow(self, workflow_id): - show_response = self._get(f"workflows/{workflow_id}") - self._assert_status_code_is(show_response, 200) - return show_response.json() - def _assert_looks_like_instance_workflow_representation(self, workflow): self._assert_has_keys(workflow, "url", "owner", "inputs", "annotation", "steps") for step in workflow["steps"].values(): diff --git a/lib/galaxy_test/base/populators.py b/lib/galaxy_test/base/populators.py index bf98277d6f7a..5cd67985ca9f 100644 --- a/lib/galaxy_test/base/populators.py +++ b/lib/galaxy_test/base/populators.py @@ -515,8 +515,11 @@ def new_dataset_request( return run_response def new_bam_dataset(self, history_id: str, test_data_resolver): + return self.new_dataset_from_test_data(history_id, test_data_resolver, "1.bam", "bam") + + def new_dataset_from_test_data(self, history_id: str, test_data_resolver, filename: str, file_type: str): return self.new_dataset( - history_id, content=open(test_data_resolver.get_filename("1.bam"), "rb"), file_type="bam", wait=True + history_id, content=open(test_data_resolver.get_filename(filename), "rb"), file_type=file_type, wait=True ) def new_directory_dataset( @@ -2649,7 +2652,7 @@ def get_invocation_jobs(self, invocation_id: str) -> list[dict[str, Any]]: return jobs def wait_for_invocation_and_jobs( - self, history_id: str, workflow_id: str, invocation_id: str, assert_ok: bool = True + self, history_id: str, workflow_id: Optional[str], invocation_id: str, assert_ok: bool = True ) -> None: state = self.wait_for_invocation(workflow_id, invocation_id, assert_ok=assert_ok) if assert_ok: diff --git a/test/unit/model/test_keyset_token_pagination.py b/test/unit/model/test_keyset_token_pagination.py new file mode 100644 index 000000000000..60f8ddf11ff8 --- /dev/null +++ b/test/unit/model/test_keyset_token_pagination.py @@ -0,0 +1,92 @@ +"""Tests for keyset token pagination.""" + +import pytest + +from galaxy import exceptions +from galaxy.model.keyset_token_pagination import ( + KeysetPagination, + SingleKeysetToken, +) + + +@pytest.fixture +def pagination(): + """Provide KeysetPagination instance.""" + return KeysetPagination() + + +class TestSingleKeysetToken: + """Test SingleKeysetToken implementation.""" + + def test_to_values(self): + """Test converting token to values.""" + token = SingleKeysetToken(last_id=42) + assert token.to_values() == [42] + + def test_from_values(self): + """Test reconstructing token from values.""" + token = SingleKeysetToken.from_values([42]) + assert token.last_id == 42 + + def test_from_values_multiple(self): + """Test from_values uses first value only.""" + token = SingleKeysetToken.from_values([42, 100, 200]) + assert token.last_id == 42 + + def test_from_values_empty_raises(self): + """Test from_values raises on empty values.""" + with pytest.raises(ValueError, match="requires at least 1 value"): + SingleKeysetToken.from_values([]) + + +class TestKeysetPagination: + """Test KeysetPagination encoder/decoder.""" + + def test_encode_decode_roundtrip(self, pagination): + """Test encoding and decoding roundtrip.""" + original = SingleKeysetToken(last_id=123) + encoded = pagination.encode_token(original) + decoded = pagination.decode_token(encoded, token_class=SingleKeysetToken) + assert decoded is not None + assert decoded.last_id == 123 + + def test_decode_none_token_returns_none(self, pagination): + """Test decoding None returns None.""" + result = pagination.decode_token(None, token_class=SingleKeysetToken) + assert result is None + + def test_decode_empty_string_returns_none(self, pagination): + """Test decoding empty string returns None.""" + result = pagination.decode_token("", token_class=SingleKeysetToken) + assert result is None + + def test_decode_invalid_token_raises(self, pagination): + """Test decoding invalid token raises MessageException.""" + with pytest.raises(exceptions.MessageException, match="Invalid page_token"): + pagination.decode_token("invalid_token_!@#", token_class=SingleKeysetToken) + + def test_encode_multiple_values(self, pagination): + """Test encoding handles multiple values.""" + # Create custom token class with multiple values + from dataclasses import dataclass + + @dataclass + class MultiValueToken: + val1: int + val2: int + val3: int + + def to_values(self): + return [self.val1, self.val2, self.val3] + + @classmethod + def from_values(cls, values): + return cls(val1=values[0], val2=values[1], val3=values[2]) + + original = MultiValueToken(val1=1, val2=2, val3=3) + encoded = pagination.encode_token(original) + decoded = pagination.decode_token(encoded, token_class=MultiValueToken) + assert decoded is not None + assert decoded.val1 == 1 + assert decoded.val2 == 2 + assert decoded.val3 == 3