diff --git a/backend/.sqlx/query-00d1874fcbd5a7b50e730c2a1c6c9b31e857d5b805dd226e3c1c17825b81b3d3.json b/backend/.sqlx/query-00d1874fcbd5a7b50e730c2a1c6c9b31e857d5b805dd226e3c1c17825b81b3d3.json new file mode 100644 index 0000000000000..aac0401853f05 --- /dev/null +++ b/backend/.sqlx/query-00d1874fcbd5a7b50e730c2a1c6c9b31e857d5b805dd226e3c1c17825b81b3d3.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_completed_job WHERE created_at <= now() - ($1::bigint::text || ' s')::interval AND started_at + ((duration_ms/1000 + $1::bigint) || ' s')::interval <= now() RETURNING id AS \"id!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "00d1874fcbd5a7b50e730c2a1c6c9b31e857d5b805dd226e3c1c17825b81b3d3" +} diff --git a/backend/.sqlx/query-011241a9df1c5c3962844f85d898527f2440dd512f592d2265753a684395736b.json b/backend/.sqlx/query-011241a9df1c5c3962844f85d898527f2440dd512f592d2265753a684395736b.json new file mode 100644 index 0000000000000..96f730a114731 --- /dev/null +++ b/backend/.sqlx/query-011241a9df1c5c3962844f85d898527f2440dd512f592d2265753a684395736b.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT leaf_jobs->$1::text AS \"leaf_jobs: Json>\", parent_job\n FROM v2_queue\n WHERE COALESCE((SELECT root_job FROM v2_queue WHERE id = $2), $2) = id AND workspace_id = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "leaf_jobs: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "parent_job", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Text", + "Uuid", + "Text" + ] + }, + "nullable": [ + null, + true + ] + }, + "hash": "011241a9df1c5c3962844f85d898527f2440dd512f592d2265753a684395736b" +} diff --git a/backend/.sqlx/query-016bf078cdea0aae4a05ae7e004fad573d5c7cbdca975edc34f36890c824c44b.json b/backend/.sqlx/query-016bf078cdea0aae4a05ae7e004fad573d5c7cbdca975edc34f36890c824c44b.json new file mode 100644 index 0000000000000..28a9e33022432 --- /dev/null +++ b/backend/.sqlx/query-016bf078cdea0aae4a05ae7e004fad573d5c7cbdca975edc34f36890c824c44b.json @@ -0,0 +1,71 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n email AS \"email!\",\n created_by AS \"created_by!\",\n parent_job, permissioned_as AS \"permissioned_as!\",\n script_path, schedule_path, flow_step_id, root_job,\n scheduled_for AS \"scheduled_for!: chrono::DateTime\"\n FROM queue WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "email!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "parent_job", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "permissioned_as!", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "schedule_path", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "flow_step_id", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "root_job", + "type_info": "Uuid" + }, + { + "ordinal": 8, + "name": "scheduled_for!: chrono::DateTime", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "016bf078cdea0aae4a05ae7e004fad573d5c7cbdca975edc34f36890c824c44b" +} diff --git a/backend/.sqlx/query-036c84bb9ce72748956bc9c18fbe276444fab025a281dc4784596b0e31c1cb9d.json b/backend/.sqlx/query-036c84bb9ce72748956bc9c18fbe276444fab025a281dc4784596b0e31c1cb9d.json new file mode 100644 index 0000000000000..dedd3fd2725ea --- /dev/null +++ b/backend/.sqlx/query-036c84bb9ce72748956bc9c18fbe276444fab025a281dc4784596b0e31c1cb9d.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "create index concurrently if not exists ix_job_workspace_id_created_at_new_9 ON v2_job (workspace_id, created_at DESC) where kind in ('dependencies', 'flowdependencies', 'appdependencies') AND parent_job IS NULL", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "036c84bb9ce72748956bc9c18fbe276444fab025a281dc4784596b0e31c1cb9d" +} diff --git a/backend/.sqlx/query-03e79a6b6dad733971c6b729dcf5858c315bd3489366334f624e8a857e7a2520.json b/backend/.sqlx/query-03e79a6b6dad733971c6b729dcf5858c315bd3489366334f624e8a857e7a2520.json new file mode 100644 index 0000000000000..6e355dfa66778 --- /dev/null +++ b/backend/.sqlx/query-03e79a6b6dad733971c6b729dcf5858c315bd3489366334f624e8a857e7a2520.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET last_ping = now() WHERE id = $1 AND workspace_id = $2 AND canceled = false", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "03e79a6b6dad733971c6b729dcf5858c315bd3489366334f624e8a857e7a2520" +} diff --git a/backend/.sqlx/query-0544992727a484837d5dd4366ae52417a36c01702e9b56169919ef74ee8c3445.json b/backend/.sqlx/query-0544992727a484837d5dd4366ae52417a36c01702e9b56169919ef74ee8c3445.json new file mode 100644 index 0000000000000..7beefb8af7ba5 --- /dev/null +++ b/backend/.sqlx/query-0544992727a484837d5dd4366ae52417a36c01702e9b56169919ef74ee8c3445.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\" FROM v2_queue WHERE id = ANY($1) AND schedule_path IS NULL AND ($2::text[] IS NULL OR tag = ANY($2))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "UuidArray", + "TextArray" + ] + }, + "nullable": [ + true + ] + }, + "hash": "0544992727a484837d5dd4366ae52417a36c01702e9b56169919ef74ee8c3445" +} diff --git a/backend/.sqlx/query-0616f6e1320626fc7e343b352f216d38ef746ef0d004dd939cc76010a80476c8.json b/backend/.sqlx/query-0616f6e1320626fc7e343b352f216d38ef746ef0d004dd939cc76010a80476c8.json new file mode 100644 index 0000000000000..2db85130eb93d --- /dev/null +++ b/backend/.sqlx/query-0616f6e1320626fc7e343b352f216d38ef746ef0d004dd939cc76010a80476c8.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(\n JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2), ARRAY['step'], $3)\n WHERE id = $4", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "0616f6e1320626fc7e343b352f216d38ef746ef0d004dd939cc76010a80476c8" +} diff --git a/backend/.sqlx/query-068679795613c4fbfa77e4c96cfe310d1c4399fcbda69b93dd7bf15a93ce39ff.json b/backend/.sqlx/query-068679795613c4fbfa77e4c96cfe310d1c4399fcbda69b93dd7bf15a93ce39ff.json new file mode 100644 index 0000000000000..d118aca234199 --- /dev/null +++ b/backend/.sqlx/query-068679795613c4fbfa77e4c96cfe310d1c4399fcbda69b93dd7bf15a93ce39ff.json @@ -0,0 +1,35 @@ +{ + "db_name": "PostgreSQL", + "query": "WITH runtime AS (\n SELECT GREATEST($8, r.memory_peak) AS memory_peak, flow_status\n FROM v2_job_runtime r\n LEFT JOIN v2_job_flow_runtime f USING (id)\n WHERE r.id = $1\n ), queued AS (\n DELETE FROM v2_job_queue q\n WHERE q.id = $1\n RETURNING\n q.id, q.workspace_id, q.started_at, q.worker,\n COALESCE($5, q.canceled_by) AS canceled_by,\n COALESCE($6, q.canceled_reason) AS canceled_reason\n ) INSERT INTO v2_job_completed (\n id, workspace_id, started_at, worker, memory_peak, flow_status, result,\n canceled_by, canceled_reason,\n duration_ms,\n status\n ) SELECT\n id, workspace_id, started_at, worker, memory_peak, flow_status, $7,\n canceled_by, canceled_reason,\n COALESCE($2::bigint, CASE\n WHEN started_at IS NULL THEN 0\n ELSE (EXTRACT('epoch' FROM NOW()) - EXTRACT('epoch' FROM started_at)) * 1000\n END) AS duration_ms,\n CASE\n WHEN $4::BOOLEAN THEN 'skipped'::job_status\n WHEN canceled_by IS NOT NULL THEN 'canceled'::job_status\n WHEN $3::BOOLEAN THEN 'success'::job_status\n ELSE 'failure'::job_status\n END AS status\n FROM queued, runtime\n ON CONFLICT (id) DO UPDATE SET status = EXCLUDED.status, result = EXCLUDED.result\n RETURNING status = 'canceled' AS \"canceled!\", duration_ms", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "canceled!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "duration_ms", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Int8", + "Bool", + "Bool", + "Varchar", + "Text", + "Jsonb", + "Int4" + ] + }, + "nullable": [ + null, + false + ] + }, + "hash": "068679795613c4fbfa77e4c96cfe310d1c4399fcbda69b93dd7bf15a93ce39ff" +} diff --git a/backend/.sqlx/query-079ee8e7d2c66ce04a47ac23a6265df69f64f2023c102d0b55ced6d94d06b9f8.json b/backend/.sqlx/query-079ee8e7d2c66ce04a47ac23a6265df69f64f2023c102d0b55ced6d94d06b9f8.json new file mode 100644 index 0000000000000..9f135c0f5a4ad --- /dev/null +++ b/backend/.sqlx/query-079ee8e7d2c66ce04a47ac23a6265df69f64f2023c102d0b55ced6d94d06b9f8.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT result AS \"result!: Json>\" FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result!: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "079ee8e7d2c66ce04a47ac23a6265df69f64f2023c102d0b55ced6d94d06b9f8" +} diff --git a/backend/.sqlx/query-07a7f1da7ee77324a73eb5b3743e4a801e0c446c55fc9fd8bc75e36d58073bee.json b/backend/.sqlx/query-07a7f1da7ee77324a73eb5b3743e4a801e0c446c55fc9fd8bc75e36d58073bee.json new file mode 100644 index 0000000000000..15f9729ee19ee --- /dev/null +++ b/backend/.sqlx/query-07a7f1da7ee77324a73eb5b3743e4a801e0c446c55fc9fd8bc75e36d58073bee.json @@ -0,0 +1,48 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n substr(concat(coalesce(completed_job.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) AS logs,\n mem_peak,\n CASE WHEN is_flow_step is true then NULL else flow_status END AS \"flow_status: sqlx::types::Json>\",\n job_logs.log_offset + char_length(job_logs.logs) + 1 AS log_offset,\n created_by AS \"created_by!\"\n FROM completed_job\n LEFT JOIN job_logs ON job_logs.job_id = completed_job.id \n WHERE completed_job.workspace_id = $2 AND completed_job.id = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "logs", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "mem_peak", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "log_offset", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Int4", + "Text", + "Uuid" + ] + }, + "nullable": [ + null, + true, + null, + null, + true + ] + }, + "hash": "07a7f1da7ee77324a73eb5b3743e4a801e0c446c55fc9fd8bc75e36d58073bee" +} diff --git a/backend/.sqlx/query-0db2d456de66648bc62764515f54ba8b1715d75012aa5ca7a834d627bd674ac2.json b/backend/.sqlx/query-0db2d456de66648bc62764515f54ba8b1715d75012aa5ca7a834d627bd674ac2.json new file mode 100644 index 0000000000000..fb88a53013938 --- /dev/null +++ b/backend/.sqlx/query-0db2d456de66648bc62764515f54ba8b1715d75012aa5ca7a834d627bd674ac2.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\", result AS \"result: Json>\"\n FROM v2_completed_job WHERE id = ANY($1) AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "result: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "UuidArray", + "Text" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "0db2d456de66648bc62764515f54ba8b1715d75012aa5ca7a834d627bd674ac2" +} diff --git a/backend/.sqlx/query-0dea444bfabee4cc67d100870786ed2a1f91f6cc5fb0a3cd55d1bb908068bf3c.json b/backend/.sqlx/query-0dea444bfabee4cc67d100870786ed2a1f91f6cc5fb0a3cd55d1bb908068bf3c.json new file mode 100644 index 0000000000000..4eb1f04712965 --- /dev/null +++ b/backend/.sqlx/query-0dea444bfabee4cc67d100870786ed2a1f91f6cc5fb0a3cd55d1bb908068bf3c.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(id) FROM v2_queue WHERE running = true AND email = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "0dea444bfabee4cc67d100870786ed2a1f91f6cc5fb0a3cd55d1bb908068bf3c" +} diff --git a/backend/.sqlx/query-0df84fc35f2780ceb7c473b0165ebab93a4bc1bcab166aae68244ab1f3d4df9f.json b/backend/.sqlx/query-0df84fc35f2780ceb7c473b0165ebab93a4bc1bcab166aae68244ab1f3d4df9f.json new file mode 100644 index 0000000000000..e54bd61306227 --- /dev/null +++ b/backend/.sqlx/query-0df84fc35f2780ceb7c473b0165ebab93a4bc1bcab166aae68244ab1f3d4df9f.json @@ -0,0 +1,103 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO completed_job AS cj\n ( workspace_id\n , id\n , parent_job\n , created_by\n , created_at\n , started_at\n , duration_ms\n , success\n , script_hash\n , script_path\n , args\n , result\n , raw_code\n , raw_lock\n , canceled\n , canceled_by\n , canceled_reason\n , job_kind\n , schedule_path\n , permissioned_as\n , flow_status\n , raw_flow\n , is_flow_step\n , is_skipped\n , language\n , email\n , visible_to_owner\n , mem_peak\n , tag\n , priority\n )\n VALUES ($1, $2, $3, $4, $5, COALESCE($6, now()), COALESCE($30::bigint, (EXTRACT('epoch' FROM (now())) - EXTRACT('epoch' FROM (COALESCE($6, now()))))*1000), $7, $8, $9,$10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29)\n ON CONFLICT (id) DO UPDATE SET success = $7, result = $11 RETURNING duration_ms AS \"duration_ms!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "duration_ms!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Uuid", + "Uuid", + "Varchar", + "Timestamptz", + "Timestamptz", + "Bool", + "Int8", + "Varchar", + "Jsonb", + "Jsonb", + "Text", + "Text", + "Bool", + "Varchar", + "Text", + { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + }, + "Varchar", + "Varchar", + "Jsonb", + "Jsonb", + "Bool", + "Bool", + { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp" + ] + } + } + }, + "Varchar", + "Bool", + "Int4", + "Varchar", + "Int2", + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "0df84fc35f2780ceb7c473b0165ebab93a4bc1bcab166aae68244ab1f3d4df9f" +} diff --git a/backend/.sqlx/query-0ea5ba568ec0f62b808fe938a41174646b6bdd658b8461db1bb90a871d076718.json b/backend/.sqlx/query-0ea5ba568ec0f62b808fe938a41174646b6bdd658b8461db1bb90a871d076718.json new file mode 100644 index 0000000000000..19fe39406796e --- /dev/null +++ b/backend/.sqlx/query-0ea5ba568ec0f62b808fe938a41174646b6bdd658b8461db1bb90a871d076718.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_queue SET workspace_id = $1 WHERE workspace_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "0ea5ba568ec0f62b808fe938a41174646b6bdd658b8461db1bb90a871d076718" +} diff --git a/backend/.sqlx/query-0ef638eb62cb8b285cb20855679486b78eae82901a0128b9c9c837c9e9e91212.json b/backend/.sqlx/query-0ef638eb62cb8b285cb20855679486b78eae82901a0128b9c9c837c9e9e91212.json new file mode 100644 index 0000000000000..85523ee5c3736 --- /dev/null +++ b/backend/.sqlx/query-0ef638eb62cb8b285cb20855679486b78eae82901a0128b9c9c837c9e9e91212.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n success AS \"success!\",\n result AS \"result: Json>\",\n started_at AS \"started_at!\"FROM completed_job WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4\n ORDER BY created_at DESC\n LIMIT $5", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "success!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "result: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "started_at!", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Uuid", + "Int8" + ] + }, + "nullable": [ + true, + true, + true + ] + }, + "hash": "0ef638eb62cb8b285cb20855679486b78eae82901a0128b9c9c837c9e9e91212" +} diff --git a/backend/.sqlx/query-111ee00fa6661fd4d3bb1d2e567fdaa1cbd3fe93b590b97c549ff2ad0016da1a.json b/backend/.sqlx/query-111ee00fa6661fd4d3bb1d2e567fdaa1cbd3fe93b590b97c549ff2ad0016da1a.json new file mode 100644 index 0000000000000..bc35c5b2f72f2 --- /dev/null +++ b/backend/.sqlx/query-111ee00fa6661fd4d3bb1d2e567fdaa1cbd3fe93b590b97c549ff2ad0016da1a.json @@ -0,0 +1,66 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT kind AS \"kind: JobKind\", runnable_id, runnable_path,\n flow_status AS \"flow_status: Json>\"\n FROM v2_job JOIN v2_job_completed USING (id)\n WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "kind: JobKind", + "type_info": { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + } + }, + { + "ordinal": 1, + "name": "runnable_id", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "runnable_path", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "flow_status: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + true, + true, + true + ] + }, + "hash": "111ee00fa6661fd4d3bb1d2e567fdaa1cbd3fe93b590b97c549ff2ad0016da1a" +} diff --git a/backend/.sqlx/query-119469ebfe8572c78ed3ee5ab5b1a6a1cb1b0f31e357b5370f9bb7eab1e20a7b.json b/backend/.sqlx/query-119469ebfe8572c78ed3ee5ab5b1a6a1cb1b0f31e357b5370f9bb7eab1e20a7b.json new file mode 100644 index 0000000000000..bb701df30d8c3 --- /dev/null +++ b/backend/.sqlx/query-119469ebfe8572c78ed3ee5ab5b1a6a1cb1b0f31e357b5370f9bb7eab1e20a7b.json @@ -0,0 +1,27 @@ +{ + "db_name": "PostgreSQL", + "query": "WITH uuid_table as (\n select gen_random_uuid() as uuid from generate_series(1, $6)\n )\n INSERT INTO job\n (id, workspace_id, raw_code, raw_lock, raw_flow, tag)\n (SELECT uuid, $1, $2, $3, $4, $5 FROM uuid_table)\n RETURNING id AS \"id!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Text", + "Jsonb", + "Varchar", + "Int4" + ] + }, + "nullable": [ + true + ] + }, + "hash": "119469ebfe8572c78ed3ee5ab5b1a6a1cb1b0f31e357b5370f9bb7eab1e20a7b" +} diff --git a/backend/.sqlx/query-11d59fb24aeb40f82e6fd11b697f26e14a0ae955fabeecc4a936b95937bf04d1.json b/backend/.sqlx/query-11d59fb24aeb40f82e6fd11b697f26e14a0ae955fabeecc4a936b95937bf04d1.json new file mode 100644 index 0000000000000..3375abd07a1d9 --- /dev/null +++ b/backend/.sqlx/query-11d59fb24aeb40f82e6fd11b697f26e14a0ae955fabeecc4a936b95937bf04d1.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue\n SET flow_status = jsonb_set(\n jsonb_set(flow_status, ARRAY['modules', $4::INTEGER::TEXT, 'job'], to_jsonb($1::UUID::TEXT)),\n ARRAY['modules', $4::INTEGER::TEXT, 'type'],\n to_jsonb('InProgress'::text)\n )\n WHERE id = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "11d59fb24aeb40f82e6fd11b697f26e14a0ae955fabeecc4a936b95937bf04d1" +} diff --git a/backend/.sqlx/query-12443d540d4bbfb9f9f913d94b40c912317b0d95589ecb827aea27eb0357291b.json b/backend/.sqlx/query-12443d540d4bbfb9f9f913d94b40c912317b0d95589ecb827aea27eb0357291b.json new file mode 100644 index 0000000000000..990d57d51edfd --- /dev/null +++ b/backend/.sqlx/query-12443d540d4bbfb9f9f913d94b40c912317b0d95589ecb827aea27eb0357291b.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2)\n WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "12443d540d4bbfb9f9f913d94b40c912317b0d95589ecb827aea27eb0357291b" +} diff --git a/backend/.sqlx/query-14540eef4594d9282cee3df4f92a7ed2e67243e5c1522850045b2da42fa914bc.json b/backend/.sqlx/query-14540eef4594d9282cee3df4f92a7ed2e67243e5c1522850045b2da42fa914bc.json new file mode 100644 index 0000000000000..b10496550f261 --- /dev/null +++ b/backend/.sqlx/query-14540eef4594d9282cee3df4f92a7ed2e67243e5c1522850045b2da42fa914bc.json @@ -0,0 +1,91 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n queue.job_kind AS \"job_kind!: JobKind\",\n queue.script_hash AS \"script_hash: ScriptHash\",\n queue.raw_flow AS \"raw_flow: sqlx::types::Json>\",\n completed_job.parent_job AS \"parent_job: Uuid\",\n completed_job.created_at AS \"created_at!: chrono::NaiveDateTime\",\n completed_job.created_by AS \"created_by!\",\n queue.script_path,\n queue.args AS \"args: sqlx::types::Json>\"\n FROM queue\n JOIN completed_job ON completed_job.parent_job = queue.id\n WHERE completed_job.id = $1 AND completed_job.workspace_id = $2\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "job_kind!: JobKind", + "type_info": { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + } + }, + { + "ordinal": 1, + "name": "script_hash: ScriptHash", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "raw_flow: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "parent_job: Uuid", + "type_info": "Uuid" + }, + { + "ordinal": 4, + "name": "created_at!: chrono::NaiveDateTime", + "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "args: sqlx::types::Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "14540eef4594d9282cee3df4f92a7ed2e67243e5c1522850045b2da42fa914bc" +} diff --git a/backend/.sqlx/query-15557c0acea71cee03f42516553fb4f5709e0e1a02a0187e88fa5d9e94ffb91a.json b/backend/.sqlx/query-15557c0acea71cee03f42516553fb4f5709e0e1a02a0187e88fa5d9e94ffb91a.json new file mode 100644 index 0000000000000..9ba2f7ec7a966 --- /dev/null +++ b/backend/.sqlx/query-15557c0acea71cee03f42516553fb4f5709e0e1a02a0187e88fa5d9e94ffb91a.json @@ -0,0 +1,103 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO queue\n (workspace_id, id, running, parent_job, created_by, permissioned_as, scheduled_for, \n script_hash, script_path, raw_code, raw_lock, args, job_kind, schedule_path, raw_flow, flow_status, is_flow_step, language, started_at, same_worker, pre_run_error, email, visible_to_owner, root_job, tag, concurrent_limit, concurrency_time_window_s, timeout, flow_step_id, cache_ttl, priority, last_ping)\n VALUES ($1, $2, $3, $4, $5, $6, COALESCE($7, now()), $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, CASE WHEN $3 THEN now() END, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, NULL) RETURNING id AS \"id!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Uuid", + "Bool", + "Uuid", + "Varchar", + "Varchar", + "Timestamptz", + "Int8", + "Varchar", + "Text", + "Text", + "Jsonb", + { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + }, + "Varchar", + "Jsonb", + "Jsonb", + "Bool", + { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp" + ] + } + } + }, + "Bool", + "Text", + "Varchar", + "Bool", + "Uuid", + "Varchar", + "Int4", + "Int4", + "Int4", + "Varchar", + "Int4", + "Int2" + ] + }, + "nullable": [ + true + ] + }, + "hash": "15557c0acea71cee03f42516553fb4f5709e0e1a02a0187e88fa5d9e94ffb91a" +} diff --git a/backend/.sqlx/query-15697f3b63f88b9cfa33ab0aa64b441961aad80bf9fd0125bcf55a729e556d1e.json b/backend/.sqlx/query-15697f3b63f88b9cfa33ab0aa64b441961aad80bf9fd0125bcf55a729e556d1e.json new file mode 100644 index 0000000000000..4199d677a746a --- /dev/null +++ b/backend/.sqlx/query-15697f3b63f88b9cfa33ab0aa64b441961aad80bf9fd0125bcf55a729e556d1e.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE\n FROM parallel_monitor_lock\n WHERE last_ping IS NOT NULL AND last_ping < NOW() - ($1 || ' seconds')::interval \n RETURNING parent_flow_id, job_id, last_ping, (SELECT workspace_id FROM queue q\n WHERE q.id = parent_flow_id AND q.running = true AND q.canceled = false) AS workspace_id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "parent_flow_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "job_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "last_ping", + "type_info": "Timestamptz" + }, + { + "ordinal": 3, + "name": "workspace_id", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + true, + null + ] + }, + "hash": "15697f3b63f88b9cfa33ab0aa64b441961aad80bf9fd0125bcf55a729e556d1e" +} diff --git a/backend/.sqlx/query-1639452901868c2ea759fe751fd89ee2b5e9ebfc003fec10ebbc4298da58b805.json b/backend/.sqlx/query-1639452901868c2ea759fe751fd89ee2b5e9ebfc003fec10ebbc4298da58b805.json new file mode 100644 index 0000000000000..c9cfcd54c66e9 --- /dev/null +++ b/backend/.sqlx/query-1639452901868c2ea759fe751fd89ee2b5e9ebfc003fec10ebbc4298da58b805.json @@ -0,0 +1,42 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT created_by AS \"created_by!\", CONCAT(coalesce(v2_queue.logs, ''), coalesce(job_logs.logs, '')) as logs, coalesce(job_logs.log_offset, 0) as log_offset, job_logs.log_file_index\n FROM v2_queue \n LEFT JOIN job_logs ON job_logs.job_id = v2_queue.id \n WHERE v2_queue.id = $1 AND v2_queue.workspace_id = $2 AND ($3::text[] IS NULL OR v2_queue.tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "logs", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "log_offset", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "log_file_index", + "type_info": "TextArray" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + null, + null, + true + ] + }, + "hash": "1639452901868c2ea759fe751fd89ee2b5e9ebfc003fec10ebbc4298da58b805" +} diff --git a/backend/.sqlx/query-16a8daddd13818addfb4d0a2e9c24583dca58ae6ee4775930e6c1fbe85fd885b.json b/backend/.sqlx/query-16a8daddd13818addfb4d0a2e9c24583dca58ae6ee4775930e6c1fbe85fd885b.json new file mode 100644 index 0000000000000..42816cb37f9c2 --- /dev/null +++ b/backend/.sqlx/query-16a8daddd13818addfb4d0a2e9c24583dca58ae6ee4775930e6c1fbe85fd885b.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE\n FROM parallel_monitor_lock\n WHERE last_ping IS NOT NULL AND last_ping < NOW() - ($1 || ' seconds')::interval \n RETURNING parent_flow_id, job_id, last_ping, (SELECT workspace_id FROM v2_queue q\n WHERE q.id = parent_flow_id AND q.running = true AND q.canceled = false) AS workspace_id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "parent_flow_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "job_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "last_ping", + "type_info": "Timestamptz" + }, + { + "ordinal": 3, + "name": "workspace_id", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + true, + null + ] + }, + "hash": "16a8daddd13818addfb4d0a2e9c24583dca58ae6ee4775930e6c1fbe85fd885b" +} diff --git a/backend/.sqlx/query-16b46734348a373520cf04bd9730757c933fd5e16d7f61d4e3df228908d67750.json b/backend/.sqlx/query-16b46734348a373520cf04bd9730757c933fd5e16d7f61d4e3df228908d67750.json new file mode 100644 index 0000000000000..317a8ebc5d83e --- /dev/null +++ b/backend/.sqlx/query-16b46734348a373520cf04bd9730757c933fd5e16d7f61d4e3df228908d67750.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_completed_job\n SET logs = '##DELETED##', args = '{}'::jsonb, result = '{}'::jsonb\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "16b46734348a373520cf04bd9730757c933fd5e16d7f61d4e3df228908d67750" +} diff --git a/backend/.sqlx/query-171edbdccc746e7c375b127738e4d71723acc9b39c86f644f8589b14c1b026cd.json b/backend/.sqlx/query-171edbdccc746e7c375b127738e4d71723acc9b39c86f644f8589b14c1b026cd.json new file mode 100644 index 0000000000000..11c6b64dcd80f --- /dev/null +++ b/backend/.sqlx/query-171edbdccc746e7c375b127738e4d71723acc9b39c86f644f8589b14c1b026cd.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET last_ping = null\n WHERE id = $1 AND last_ping = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Timestamptz" + ] + }, + "nullable": [] + }, + "hash": "171edbdccc746e7c375b127738e4d71723acc9b39c86f644f8589b14c1b026cd" +} diff --git a/backend/.sqlx/query-188389a7c54ff87247b9a875752c56060ef60ff1a9e77cd2315940ca0a49253d.json b/backend/.sqlx/query-188389a7c54ff87247b9a875752c56060ef60ff1a9e77cd2315940ca0a49253d.json new file mode 100644 index 0000000000000..fab4f1b1011b8 --- /dev/null +++ b/backend/.sqlx/query-188389a7c54ff87247b9a875752c56060ef60ff1a9e77cd2315940ca0a49253d.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = flow_status - 'approval_conditions'\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "188389a7c54ff87247b9a875752c56060ef60ff1a9e77cd2315940ca0a49253d" +} diff --git a/backend/.sqlx/query-19792ad1f11164931e7c859a4e83a3a335cbb9e0610d9dee44994fd4dd8abc5c.json b/backend/.sqlx/query-19792ad1f11164931e7c859a4e83a3a335cbb9e0610d9dee44994fd4dd8abc5c.json new file mode 100644 index 0000000000000..64a6f73ee0c51 --- /dev/null +++ b/backend/.sqlx/query-19792ad1f11164931e7c859a4e83a3a335cbb9e0610d9dee44994fd4dd8abc5c.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET suspend = $1, suspend_until = now() + interval '14 day', running = true\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "19792ad1f11164931e7c859a4e83a3a335cbb9e0610d9dee44994fd4dd8abc5c" +} diff --git a/backend/.sqlx/query-19ffbc1d775bcef3ed787875aa6017a4b0501ea94ac284e9245c566d3d097925.json b/backend/.sqlx/query-19ffbc1d775bcef3ed787875aa6017a4b0501ea94ac284e9245c566d3d097925.json new file mode 100644 index 0000000000000..57d5a3f676fbe --- /dev/null +++ b/backend/.sqlx/query-19ffbc1d775bcef3ed787875aa6017a4b0501ea94ac284e9245c566d3d097925.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT script_path FROM v2_queue WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "script_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "19ffbc1d775bcef3ed787875aa6017a4b0501ea94ac284e9245c566d3d097925" +} diff --git a/backend/.sqlx/query-1a85ca0a6d0ba5ab3462907e35037136fd123f80389abe6ebbcf12084da45868.json b/backend/.sqlx/query-1a85ca0a6d0ba5ab3462907e35037136fd123f80389abe6ebbcf12084da45868.json new file mode 100644 index 0000000000000..1d83c36fd8480 --- /dev/null +++ b/backend/.sqlx/query-1a85ca0a6d0ba5ab3462907e35037136fd123f80389abe6ebbcf12084da45868.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_job_queue WHERE workspace_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [] + }, + "hash": "1a85ca0a6d0ba5ab3462907e35037136fd123f80389abe6ebbcf12084da45868" +} diff --git a/backend/.sqlx/query-1c790df0b801378743ac040c5599219817e66ec2aee939853af8df64048e813d.json b/backend/.sqlx/query-1c790df0b801378743ac040c5599219817e66ec2aee939853af8df64048e813d.json new file mode 100644 index 0000000000000..7b27e980208e0 --- /dev/null +++ b/backend/.sqlx/query-1c790df0b801378743ac040c5599219817e66ec2aee939853af8df64048e813d.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4) WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Uuid", + "Text", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "1c790df0b801378743ac040c5599219817e66ec2aee939853af8df64048e813d" +} diff --git a/backend/.sqlx/query-1d1098cc9367502faa1483627bf534472a6cae70d7964ba019aa2121c3929234.json b/backend/.sqlx/query-1d1098cc9367502faa1483627bf534472a6cae70d7964ba019aa2121c3929234.json new file mode 100644 index 0000000000000..f649c600959b4 --- /dev/null +++ b/backend/.sqlx/query-1d1098cc9367502faa1483627bf534472a6cae70d7964ba019aa2121c3929234.json @@ -0,0 +1,67 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n result AS \"result: sqlx::types::Json>\",\n language AS \"language: ScriptLang\",\n flow_status AS \"flow_status: sqlx::types::Json>\",\n success AS \"success!\"\n FROM completed_job\n WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "language: ScriptLang", + "type_info": { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp" + ] + } + } + } + }, + { + "ordinal": 2, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "success!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "1d1098cc9367502faa1483627bf534472a6cae70d7964ba019aa2121c3929234" +} diff --git a/backend/.sqlx/query-20d9a1b3a6631f97836e7b8d96cdec706ba1cc2d5d432e397633a1f79e67589a.json b/backend/.sqlx/query-20d9a1b3a6631f97836e7b8d96cdec706ba1cc2d5d432e397633a1f79e67589a.json new file mode 100644 index 0000000000000..34609792bf885 --- /dev/null +++ b/backend/.sqlx/query-20d9a1b3a6631f97836e7b8d96cdec706ba1cc2d5d432e397633a1f79e67589a.json @@ -0,0 +1,68 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n result AS \"result: sqlx::types::Json>\",\n flow_status AS \"flow_status: sqlx::types::Json>\",\n language AS \"language: ScriptLang\",\n created_by AS \"created_by!\"\n FROM completed_job\n WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "language: ScriptLang", + "type_info": { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "20d9a1b3a6631f97836e7b8d96cdec706ba1cc2d5d432e397633a1f79e67589a" +} diff --git a/backend/.sqlx/query-2135a4034fb85e04d707051b134b176073160cb4e6e27435520c23dd4974345a.json b/backend/.sqlx/query-2135a4034fb85e04d707051b134b176073160cb4e6e27435520c23dd4974345a.json new file mode 100644 index 0000000000000..19ef49f51b09e --- /dev/null +++ b/backend/.sqlx/query-2135a4034fb85e04d707051b134b176073160cb4e6e27435520c23dd4974345a.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET flow_status = jsonb_set(jsonb_set(COALESCE(flow_status, '{}'::jsonb), array[$1], COALESCE(flow_status->$1, '{}'::jsonb)), array[$1, 'started_at'], to_jsonb(now()::text)) WHERE id = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "2135a4034fb85e04d707051b134b176073160cb4e6e27435520c23dd4974345a" +} diff --git a/backend/.sqlx/query-227511aeb98cf02e49eba2acda16f7f9288dc6b349c2c46945e36fdd46401c6c.json b/backend/.sqlx/query-227511aeb98cf02e49eba2acda16f7f9288dc6b349c2c46945e36fdd46401c6c.json new file mode 100644 index 0000000000000..c9007a235605f --- /dev/null +++ b/backend/.sqlx/query-227511aeb98cf02e49eba2acda16f7f9288dc6b349c2c46945e36fdd46401c6c.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\", flow_status AS \"flow_status!: Json\"\n FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "flow_status!: Json", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "227511aeb98cf02e49eba2acda16f7f9288dc6b349c2c46945e36fdd46401c6c" +} diff --git a/backend/.sqlx/query-231ece5db2fbf26d6fbe298b16f844552e5dd432f35e247241f660cb3c2f8c96.json b/backend/.sqlx/query-231ece5db2fbf26d6fbe298b16f844552e5dd432f35e247241f660cb3c2f8c96.json new file mode 100644 index 0000000000000..e8704d13ab138 --- /dev/null +++ b/backend/.sqlx/query-231ece5db2fbf26d6fbe298b16f844552e5dd432f35e247241f660cb3c2f8c96.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET running = false, started_at = null\n WHERE last_ping < now() - ($1 || ' seconds')::interval\n AND running = true AND job_kind NOT IN ('flow', 'flowpreview', 'flownode', 'singlescriptflow') AND same_worker = false RETURNING id AS \"id!\", workspace_id AS \"workspace_id!\", last_ping", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "workspace_id!", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "last_ping", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + true, + true, + true + ] + }, + "hash": "231ece5db2fbf26d6fbe298b16f844552e5dd432f35e247241f660cb3c2f8c96" +} diff --git a/backend/.sqlx/query-28c40cfbe796c6fb942cc9f72e00f9d3de633cdc4f9e23c6d9bd27eba07b0cb2.json b/backend/.sqlx/query-28c40cfbe796c6fb942cc9f72e00f9d3de633cdc4f9e23c6d9bd27eba07b0cb2.json new file mode 100644 index 0000000000000..b39b9ef58aec1 --- /dev/null +++ b/backend/.sqlx/query-28c40cfbe796c6fb942cc9f72e00f9d3de633cdc4f9e23c6d9bd27eba07b0cb2.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT null FROM v2_job_queue WHERE id = $1 FOR UPDATE", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "28c40cfbe796c6fb942cc9f72e00f9d3de633cdc4f9e23c6d9bd27eba07b0cb2" +} diff --git a/backend/.sqlx/query-2a0b59e2770b27a1f2a8baddc67dba29216a1aad733171c25cc4aae5b3c84d54.json b/backend/.sqlx/query-2a0b59e2770b27a1f2a8baddc67dba29216a1aad733171c25cc4aae5b3c84d54.json new file mode 100644 index 0000000000000..8ff5e29db1eef --- /dev/null +++ b/backend/.sqlx/query-2a0b59e2770b27a1f2a8baddc67dba29216a1aad733171c25cc4aae5b3c84d54.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue SET\n flow_status = JSONB_SET(flow_status, ARRAY['modules', flow_status->>'step'::text], $1),\n suspend = $2,\n suspend_until = now() + $3\n WHERE id = $4", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Int4", + "Interval", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "2a0b59e2770b27a1f2a8baddc67dba29216a1aad733171c25cc4aae5b3c84d54" +} diff --git a/backend/.sqlx/query-2c47c39cb7463ab3e15e062f2747a43e15f0be227265f579eccc6e525939a648.json b/backend/.sqlx/query-2c47c39cb7463ab3e15e062f2747a43e15f0be227265f579eccc6e525939a648.json new file mode 100644 index 0000000000000..e75bac0371a16 --- /dev/null +++ b/backend/.sqlx/query-2c47c39cb7463ab3e15e062f2747a43e15f0be227265f579eccc6e525939a648.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n id AS \"id!\", workspace_id AS \"workspace_id!\", parent_job, is_flow_step,\n flow_status AS \"flow_status: Box\", last_ping, same_worker\n FROM v2_queue\n WHERE running = true AND suspend = 0 AND suspend_until IS null AND scheduled_for <= now()\n AND (job_kind = 'flow' OR job_kind = 'flowpreview' OR job_kind = 'flownode')\n AND last_ping IS NOT NULL AND last_ping < NOW() - ($1 || ' seconds')::interval\n AND canceled = false\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "workspace_id!", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "parent_job", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "is_flow_step", + "type_info": "Bool" + }, + { + "ordinal": 4, + "name": "flow_status: Box", + "type_info": "Jsonb" + }, + { + "ordinal": 5, + "name": "last_ping", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "same_worker", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "2c47c39cb7463ab3e15e062f2747a43e15f0be227265f579eccc6e525939a648" +} diff --git a/backend/.sqlx/query-2cef109784efc04999e4537e0d1d3fb3221e04f3a7c1abe91dd763f366d06618.json b/backend/.sqlx/query-2cef109784efc04999e4537e0d1d3fb3221e04f3a7c1abe91dd763f366d06618.json new file mode 100644 index 0000000000000..3b7793dd05316 --- /dev/null +++ b/backend/.sqlx/query-2cef109784efc04999e4537e0d1d3fb3221e04f3a7c1abe91dd763f366d06618.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT success AS \"success!\" FROM completed_job WHERE id = ANY($1)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "success!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "UuidArray" + ] + }, + "nullable": [ + true + ] + }, + "hash": "2cef109784efc04999e4537e0d1d3fb3221e04f3a7c1abe91dd763f366d06618" +} diff --git a/backend/.sqlx/query-2d6b581a9e5df008b2a0223eae6794065e6e6bf70415d331ccea6038271a7fec.json b/backend/.sqlx/query-2d6b581a9e5df008b2a0223eae6794065e6e6bf70415d331ccea6038271a7fec.json new file mode 100644 index 0000000000000..b8c75b4ccecae --- /dev/null +++ b/backend/.sqlx/query-2d6b581a9e5df008b2a0223eae6794065e6e6bf70415d331ccea6038271a7fec.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n flow_status->>'step' = '0' \n AND (\n jsonb_array_length(flow_status->'modules') = 0 \n OR flow_status->'modules'->0->>'type' = 'WaitingForPriorSteps' \n OR (\n flow_status->'modules'->0->>'type' = 'Failure' \n AND flow_status->'modules'->0->>'job' = $1\n )\n )\n FROM v2_completed_job WHERE id = $2 AND workspace_id = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text", + "Uuid", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "2d6b581a9e5df008b2a0223eae6794065e6e6bf70415d331ccea6038271a7fec" +} diff --git a/backend/.sqlx/query-30606c485f46e737d2d0acc9deb44c43b408f3f86da47141c973af1b9f549c0a.json b/backend/.sqlx/query-30606c485f46e737d2d0acc9deb44c43b408f3f86da47141c973af1b9f549c0a.json new file mode 100644 index 0000000000000..e6e750510baad --- /dev/null +++ b/backend/.sqlx/query-30606c485f46e737d2d0acc9deb44c43b408f3f86da47141c973af1b9f549c0a.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET leaf_jobs = JSONB_SET(coalesce(leaf_jobs, '{}'::jsonb), ARRAY[$1::TEXT], $2)\n WHERE COALESCE((SELECT root_job FROM v2_queue WHERE id = $3), $3) = id", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "30606c485f46e737d2d0acc9deb44c43b408f3f86da47141c973af1b9f549c0a" +} diff --git a/backend/.sqlx/query-32662887109a0619b860551140a6f1d178813a682dfef6c21983077eb0848fdb.json b/backend/.sqlx/query-32662887109a0619b860551140a6f1d178813a682dfef6c21983077eb0848fdb.json new file mode 100644 index 0000000000000..849ca88eccff1 --- /dev/null +++ b/backend/.sqlx/query-32662887109a0619b860551140a6f1d178813a682dfef6c21983077eb0848fdb.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['modules', flow_status->>'step', 'progress'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "32662887109a0619b860551140a6f1d178813a682dfef6c21983077eb0848fdb" +} diff --git a/backend/.sqlx/query-32fdc66931dcf34f6ef5cdf3fd335d9f990eaa3dbb396290477159012e86af14.json b/backend/.sqlx/query-32fdc66931dcf34f6ef5cdf3fd335d9f990eaa3dbb396290477159012e86af14.json new file mode 100644 index 0000000000000..4767ba9a23bde --- /dev/null +++ b/backend/.sqlx/query-32fdc66931dcf34f6ef5cdf3fd335d9f990eaa3dbb396290477159012e86af14.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n id AS \"id!\", workspace_id AS \"workspace_id!\", parent_job, is_flow_step,\n flow_status AS \"flow_status: Box\", last_ping, same_worker\n FROM queue\n WHERE running = true AND suspend = 0 AND suspend_until IS null AND scheduled_for <= now()\n AND (job_kind = 'flow' OR job_kind = 'flowpreview' OR job_kind = 'flownode')\n AND last_ping IS NOT NULL AND last_ping < NOW() - ($1 || ' seconds')::interval\n AND canceled = false\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "workspace_id!", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "parent_job", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "is_flow_step", + "type_info": "Bool" + }, + { + "ordinal": 4, + "name": "flow_status: Box", + "type_info": "Jsonb" + }, + { + "ordinal": 5, + "name": "last_ping", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "same_worker", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "32fdc66931dcf34f6ef5cdf3fd335d9f990eaa3dbb396290477159012e86af14" +} diff --git a/backend/.sqlx/query-36b26b3a6458d8a0b4f770d52c1bb09370b905d610b9ceb3cfac11365586320d.json b/backend/.sqlx/query-36b26b3a6458d8a0b4f770d52c1bb09370b905d610b9ceb3cfac11365586320d.json new file mode 100644 index 0000000000000..3b3fa11ffbc6f --- /dev/null +++ b/backend/.sqlx/query-36b26b3a6458d8a0b4f770d52c1bb09370b905d610b9ceb3cfac11365586320d.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT running AS \"running!\" FROM queue WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "running!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "36b26b3a6458d8a0b4f770d52c1bb09370b905d610b9ceb3cfac11365586320d" +} diff --git a/backend/.sqlx/query-382eb81b25154b3aff04e520e2611081779292a92d283bc23e8268f46542b5d2.json b/backend/.sqlx/query-382eb81b25154b3aff04e520e2611081779292a92d283bc23e8268f46542b5d2.json new file mode 100644 index 0000000000000..34c56c37d2681 --- /dev/null +++ b/backend/.sqlx/query-382eb81b25154b3aff04e520e2611081779292a92d283bc23e8268f46542b5d2.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT success AS \"success!\" FROM v2_completed_job WHERE id = ANY($1)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "success!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "UuidArray" + ] + }, + "nullable": [ + true + ] + }, + "hash": "382eb81b25154b3aff04e520e2611081779292a92d283bc23e8268f46542b5d2" +} diff --git a/backend/.sqlx/query-3843fa02ec86e57c1f13bc66e86adf8d6d19cc6cf612c20f3fb27071bfce071a.json b/backend/.sqlx/query-3843fa02ec86e57c1f13bc66e86adf8d6d19cc6cf612c20f3fb27071bfce071a.json new file mode 100644 index 0000000000000..e83c3674caa91 --- /dev/null +++ b/backend/.sqlx/query-3843fa02ec86e57c1f13bc66e86adf8d6d19cc6cf612c20f3fb27071bfce071a.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\" FROM v2_queue WHERE schedule_path = $1 AND workspace_id = $2 AND id != $3 AND running = true", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "3843fa02ec86e57c1f13bc66e86adf8d6d19cc6cf612c20f3fb27071bfce071a" +} diff --git a/backend/.sqlx/query-3a4f6ebad63a67b648e672312a0c0eb8956d78398d97a18d095f83f0ab0a7915.json b/backend/.sqlx/query-3a4f6ebad63a67b648e672312a0c0eb8956d78398d97a18d095f83f0ab0a7915.json new file mode 100644 index 0000000000000..fdf8dd40f3351 --- /dev/null +++ b/backend/.sqlx/query-3a4f6ebad63a67b648e672312a0c0eb8956d78398d97a18d095f83f0ab0a7915.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue SET running = false, started_at = null\n WHERE last_ping < now() - ($1 || ' seconds')::interval\n AND running = true AND job_kind NOT IN ('flow', 'flowpreview', 'flownode', 'singlescriptflow') AND same_worker = false RETURNING id AS \"id!\", workspace_id AS \"workspace_id!\", last_ping", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "workspace_id!", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "last_ping", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + true, + true, + true + ] + }, + "hash": "3a4f6ebad63a67b648e672312a0c0eb8956d78398d97a18d095f83f0ab0a7915" +} diff --git a/backend/.sqlx/query-3a5edf3dd884b5a8862bb112f6520967ed4a218782192c6c6fc1498f45d753a6.json b/backend/.sqlx/query-3a5edf3dd884b5a8862bb112f6520967ed4a218782192c6c6fc1498f45d753a6.json new file mode 100644 index 0000000000000..26d0b63b5cd4d --- /dev/null +++ b/backend/.sqlx/query-3a5edf3dd884b5a8862bb112f6520967ed4a218782192c6c6fc1498f45d753a6.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "create index concurrently if not exists ix_job_created_at ON v2_job (created_at DESC)", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "3a5edf3dd884b5a8862bb112f6520967ed4a218782192c6c6fc1498f45d753a6" +} diff --git a/backend/.sqlx/query-3af91bb37638473d3d97a3a6f7fc6c6927ac0a6f6c26f11db69fbc4aa839cf64.json b/backend/.sqlx/query-3af91bb37638473d3d97a3a6f7fc6c6927ac0a6f6c26f11db69fbc4aa839cf64.json new file mode 100644 index 0000000000000..1809e60091e37 --- /dev/null +++ b/backend/.sqlx/query-3af91bb37638473d3d97a3a6f7fc6c6927ac0a6f6c26f11db69fbc4aa839cf64.json @@ -0,0 +1,91 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n v2_queue.job_kind AS \"job_kind!: JobKind\",\n v2_queue.script_hash,\n v2_queue.raw_flow AS \"raw_flow: sqlx::types::Json>\",\n v2_completed_job.parent_job AS \"parent_job: Uuid\",\n v2_completed_job.created_at AS \"created_at!: chrono::NaiveDateTime\",\n v2_completed_job.created_by AS \"created_by!\",\n v2_queue.script_path,\n v2_queue.args AS \"args: sqlx::types::Json>\"\n FROM v2_queue\n JOIN v2_completed_job ON v2_completed_job.parent_job = v2_queue.id\n WHERE v2_completed_job.id = $1 AND v2_completed_job.workspace_id = $2\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "job_kind!: JobKind", + "type_info": { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + } + }, + { + "ordinal": 1, + "name": "script_hash", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "raw_flow: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "parent_job: Uuid", + "type_info": "Uuid" + }, + { + "ordinal": 4, + "name": "created_at!: chrono::NaiveDateTime", + "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "args: sqlx::types::Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "3af91bb37638473d3d97a3a6f7fc6c6927ac0a6f6c26f11db69fbc4aa839cf64" +} diff --git a/backend/.sqlx/query-3bacf9cd9aa63f4bec5f983f4a0c3030216b5a4ed669f77962509d1c2c6cb780.json b/backend/.sqlx/query-3bacf9cd9aa63f4bec5f983f4a0c3030216b5a4ed669f77962509d1c2c6cb780.json new file mode 100644 index 0000000000000..200a5bf47f728 --- /dev/null +++ b/backend/.sqlx/query-3bacf9cd9aa63f4bec5f983f4a0c3030216b5a4ed669f77962509d1c2c6cb780.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "create index concurrently if not exists root_job_index_by_path_2 ON v2_job (workspace_id, runnable_path, created_at desc) WHERE parent_job IS NULL", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "3bacf9cd9aa63f4bec5f983f4a0c3030216b5a4ed669f77962509d1c2c6cb780" +} diff --git a/backend/.sqlx/query-3bc1919515120116705d7c250a34f2b9bf7c4bcaedb87c28f974e46c9c42200c.json b/backend/.sqlx/query-3bc1919515120116705d7c250a34f2b9bf7c4bcaedb87c28f974e46c9c42200c.json new file mode 100644 index 0000000000000..125c9593b622e --- /dev/null +++ b/backend/.sqlx/query-3bc1919515120116705d7c250a34f2b9bf7c4bcaedb87c28f974e46c9c42200c.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\" FROM queue WHERE id = ANY($1) AND schedule_path IS NULL AND ($2::text[] IS NULL OR tag = ANY($2))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "UuidArray", + "TextArray" + ] + }, + "nullable": [ + true + ] + }, + "hash": "3bc1919515120116705d7c250a34f2b9bf7c4bcaedb87c28f974e46c9c42200c" +} diff --git a/backend/.sqlx/query-3c423bcb10668bdd131f7b6a9b0fcc8f9b1909f255cdfce0fb83496aac0fc021.json b/backend/.sqlx/query-3c423bcb10668bdd131f7b6a9b0fcc8f9b1909f255cdfce0fb83496aac0fc021.json new file mode 100644 index 0000000000000..090efe49644e7 --- /dev/null +++ b/backend/.sqlx/query-3c423bcb10668bdd131f7b6a9b0fcc8f9b1909f255cdfce0fb83496aac0fc021.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\", flow_status AS \"flow_status!: Json\"\n FROM completed_job\n WHERE parent_job = $1 AND workspace_id = $2 AND flow_status IS NOT NULL", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "flow_status!: Json", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "3c423bcb10668bdd131f7b6a9b0fcc8f9b1909f255cdfce0fb83496aac0fc021" +} diff --git a/backend/.sqlx/query-3c4816f411277de2b94b65ab8d2c6d2a3fdc1f3cd8a13ffdef536cf8b6475634.json b/backend/.sqlx/query-3c4816f411277de2b94b65ab8d2c6d2a3fdc1f3cd8a13ffdef536cf8b6475634.json new file mode 100644 index 0000000000000..ffb0b4c5c4171 --- /dev/null +++ b/backend/.sqlx/query-3c4816f411277de2b94b65ab8d2c6d2a3fdc1f3cd8a13ffdef536cf8b6475634.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS (SELECT 1 FROM v2_queue WHERE workspace_id = $1 AND schedule_path = $2 AND scheduled_for = $3)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Timestamptz" + ] + }, + "nullable": [ + null + ] + }, + "hash": "3c4816f411277de2b94b65ab8d2c6d2a3fdc1f3cd8a13ffdef536cf8b6475634" +} diff --git a/backend/.sqlx/query-3c8f53194af72224d943adb5f38a7dc0c2a8676b586d36e9f949b2a056dcf1e2.json b/backend/.sqlx/query-3c8f53194af72224d943adb5f38a7dc0c2a8676b586d36e9f949b2a056dcf1e2.json new file mode 100644 index 0000000000000..2d0950f91a99d --- /dev/null +++ b/backend/.sqlx/query-3c8f53194af72224d943adb5f38a7dc0c2a8676b586d36e9f949b2a056dcf1e2.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET last_ping = now()\n WHERE id = $1 AND last_ping < now()", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "3c8f53194af72224d943adb5f38a7dc0c2a8676b586d36e9f949b2a056dcf1e2" +} diff --git a/backend/.sqlx/query-3d0e77e8a68fcd9ad6f1f27cb9f01e00776aa50f32e5cbd073d9aa1dcca944b2.json b/backend/.sqlx/query-3d0e77e8a68fcd9ad6f1f27cb9f01e00776aa50f32e5cbd073d9aa1dcca944b2.json new file mode 100644 index 0000000000000..01ce4de1142e6 --- /dev/null +++ b/backend/.sqlx/query-3d0e77e8a68fcd9ad6f1f27cb9f01e00776aa50f32e5cbd073d9aa1dcca944b2.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "args", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "Bool" + ] + }, + "nullable": [ + null + ] + }, + "hash": "3d0e77e8a68fcd9ad6f1f27cb9f01e00776aa50f32e5cbd073d9aa1dcca944b2" +} diff --git a/backend/.sqlx/query-402fd5bff6e8420c9b3477f05df625cae355fda673b9e85284e0fcd7d9232eb7.json b/backend/.sqlx/query-402fd5bff6e8420c9b3477f05df625cae355fda673b9e85284e0fcd7d9232eb7.json new file mode 100644 index 0000000000000..83d1b15744d97 --- /dev/null +++ b/backend/.sqlx/query-402fd5bff6e8420c9b3477f05df625cae355fda673b9e85284e0fcd7d9232eb7.json @@ -0,0 +1,73 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n script_path, script_hash AS \"script_hash: ScriptHash\",\n job_kind AS \"job_kind!: JobKind\",\n flow_status AS \"flow_status: Json>\",\n raw_flow AS \"raw_flow: Json>\"\n FROM completed_job WHERE id = $1 and workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "script_hash: ScriptHash", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "job_kind!: JobKind", + "type_info": { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "flow_status: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 4, + "name": "raw_flow: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + true + ] + }, + "hash": "402fd5bff6e8420c9b3477f05df625cae355fda673b9e85284e0fcd7d9232eb7" +} diff --git a/backend/.sqlx/query-40e9002c4e2aa58722ba382e307e356286035fb1aeed6268cbcab16251bfde98.json b/backend/.sqlx/query-40e9002c4e2aa58722ba382e307e356286035fb1aeed6268cbcab16251bfde98.json new file mode 100644 index 0000000000000..49439bd4476b9 --- /dev/null +++ b/backend/.sqlx/query-40e9002c4e2aa58722ba382e307e356286035fb1aeed6268cbcab16251bfde98.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET running = false, started_at = null WHERE id = $1 AND canceled = false", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "40e9002c4e2aa58722ba382e307e356286035fb1aeed6268cbcab16251bfde98" +} diff --git a/backend/.sqlx/query-41addab65f2ce7214dc7c28ed5780561c90ee1ba82f10cc966882ecf7ec106cf.json b/backend/.sqlx/query-41addab65f2ce7214dc7c28ed5780561c90ee1ba82f10cc966882ecf7ec106cf.json new file mode 100644 index 0000000000000..e3a09adc101e9 --- /dev/null +++ b/backend/.sqlx/query-41addab65f2ce7214dc7c28ed5780561c90ee1ba82f10cc966882ecf7ec106cf.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(JSONB_SET(flow_status, ARRAY['retry'], $1), ARRAY['modules', $3::TEXT, 'failed_retries'], $4)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid", + "Text", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "41addab65f2ce7214dc7c28ed5780561c90ee1ba82f10cc966882ecf7ec106cf" +} diff --git a/backend/.sqlx/query-4244640e62fffb0f6978f8f7d78291b3294a6a7d1549d752f14acf5972552ba5.json b/backend/.sqlx/query-4244640e62fffb0f6978f8f7d78291b3294a6a7d1549d752f14acf5972552ba5.json new file mode 100644 index 0000000000000..841d549e20564 --- /dev/null +++ b/backend/.sqlx/query-4244640e62fffb0f6978f8f7d78291b3294a6a7d1549d752f14acf5972552ba5.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET workspace_id = $1 WHERE workspace_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "4244640e62fffb0f6978f8f7d78291b3294a6a7d1549d752f14acf5972552ba5" +} diff --git a/backend/.sqlx/query-44a317f7647e2b515f90dc9c04f7ac75c2c87c7c3036acd96ba72fb2a21700db.json b/backend/.sqlx/query-44a317f7647e2b515f90dc9c04f7ac75c2c87c7c3036acd96ba72fb2a21700db.json new file mode 100644 index 0000000000000..8885d178e210c --- /dev/null +++ b/backend/.sqlx/query-44a317f7647e2b515f90dc9c04f7ac75c2c87c7c3036acd96ba72fb2a21700db.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM completed_job WHERE created_at <= now() - ($1::bigint::text || ' s')::interval AND started_at + ((duration_ms/1000 + $1::bigint) || ' s')::interval <= now() RETURNING id AS \"id!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "44a317f7647e2b515f90dc9c04f7ac75c2c87c7c3036acd96ba72fb2a21700db" +} diff --git a/backend/.sqlx/query-4504f3a5d3cffd56d51bd263e6759404a3a5889dd7c61cb077e17b877b027eff.json b/backend/.sqlx/query-4504f3a5d3cffd56d51bd263e6759404a3a5889dd7c61cb077e17b877b027eff.json new file mode 100644 index 0000000000000..76e4896aaaec0 --- /dev/null +++ b/backend/.sqlx/query-4504f3a5d3cffd56d51bd263e6759404a3a5889dd7c61cb077e17b877b027eff.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE queue\n SET canceled = true\n , canceled_by = 'timeout'\n , canceled_reason = $1\n WHERE id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "4504f3a5d3cffd56d51bd263e6759404a3a5889dd7c61cb077e17b877b027eff" +} diff --git a/backend/.sqlx/query-45977ad6ca30c7c84ce2a01a7cb3caab2c3ee774d9c97bbad90dc226bb54e557.json b/backend/.sqlx/query-45977ad6ca30c7c84ce2a01a7cb3caab2c3ee774d9c97bbad90dc226bb54e557.json new file mode 100644 index 0000000000000..ebd0be5cf93c6 --- /dev/null +++ b/backend/.sqlx/query-45977ad6ca30c7c84ce2a01a7cb3caab2c3ee774d9c97bbad90dc226bb54e557.json @@ -0,0 +1,93 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job (\n id, workspace_id,\n created_at, created_by, permissioned_as, permissioned_as_email,\n kind, runnable_id, runnable_path, parent_job, script_lang,\n flow_step_id, flow_root_job,\n schedule_path,\n tag, same_worker, visible_to_owner, concurrent_limit, concurrency_time_window_s,\n cache_ttl, timeout, priority,\n args, pre_run_error,\n raw_code, raw_lock, raw_flow\n ) SELECT\n unnest($1::uuid[]), $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15,\n $16, $17, $18, $19, $20, $21, $22, unnest($23::jsonb[]), $24, $25, $26, $27\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray", + "Varchar", + "Timestamptz", + "Varchar", + "Varchar", + "Varchar", + { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + }, + "Int8", + "Varchar", + "Uuid", + { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + }, + "Varchar", + "Uuid", + "Varchar", + "Varchar", + "Bool", + "Bool", + "Int4", + "Int4", + "Int4", + "Int4", + "Int2", + "JsonbArray", + "Text", + "Text", + "Text", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "45977ad6ca30c7c84ce2a01a7cb3caab2c3ee774d9c97bbad90dc226bb54e557" +} diff --git a/backend/.sqlx/query-45f6578656373d9838f1d28da40a468340f337869125f20d7078af1851209b2c.json b/backend/.sqlx/query-45f6578656373d9838f1d28da40a468340f337869125f20d7078af1851209b2c.json new file mode 100644 index 0000000000000..6521f5e9bb753 --- /dev/null +++ b/backend/.sqlx/query-45f6578656373d9838f1d28da40a468340f337869125f20d7078af1851209b2c.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET canceled = true, canceled_by = $1, canceled_reason = $2, scheduled_for = now(), suspend = 0 WHERE id = $3 AND workspace_id = $4 AND (canceled = false OR canceled_reason != $2) RETURNING id AS \"id!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "45f6578656373d9838f1d28da40a468340f337869125f20d7078af1851209b2c" +} diff --git a/backend/.sqlx/query-4616976a089af7e58c312d0b5357189a35c4691769981a5f8218cc708bd038e3.json b/backend/.sqlx/query-4616976a089af7e58c312d0b5357189a35c4691769981a5f8218cc708bd038e3.json new file mode 100644 index 0000000000000..f7ae58295b8e8 --- /dev/null +++ b/backend/.sqlx/query-4616976a089af7e58c312d0b5357189a35c4691769981a5f8218cc708bd038e3.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET suspend = $1 WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "4616976a089af7e58c312d0b5357189a35c4691769981a5f8218cc708bd038e3" +} diff --git a/backend/.sqlx/query-461bb76f9dc0a1d5f35e050d3fab309800382d5173e36c5aa6cc430d46a90324.json b/backend/.sqlx/query-461bb76f9dc0a1d5f35e050d3fab309800382d5173e36c5aa6cc430d46a90324.json new file mode 100644 index 0000000000000..baf71a48477b6 --- /dev/null +++ b/backend/.sqlx/query-461bb76f9dc0a1d5f35e050d3fab309800382d5173e36c5aa6cc430d46a90324.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT parent_job FROM v2_queue WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "parent_job", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "461bb76f9dc0a1d5f35e050d3fab309800382d5173e36c5aa6cc430d46a90324" +} diff --git a/backend/.sqlx/query-464c90d711899090eb3dc4054f5da69c278171633f8b05c29e9e91c82bf3c1c4.json b/backend/.sqlx/query-464c90d711899090eb3dc4054f5da69c278171633f8b05c29e9e91c82bf3c1c4.json new file mode 100644 index 0000000000000..a8c03d21d2639 --- /dev/null +++ b/backend/.sqlx/query-464c90d711899090eb3dc4054f5da69c278171633f8b05c29e9e91c82bf3c1c4.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT (flow_status->'step')::integer as step, jsonb_array_length(flow_status->'modules') as len FROM v2_queue WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "step", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "len", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null, + null + ] + }, + "hash": "464c90d711899090eb3dc4054f5da69c278171633f8b05c29e9e91c82bf3c1c4" +} diff --git a/backend/.sqlx/query-4671f1727d0563490534c426375738478f3d93f6bb42aaf021794392328c8875.json b/backend/.sqlx/query-4671f1727d0563490534c426375738478f3d93f6bb42aaf021794392328c8875.json new file mode 100644 index 0000000000000..e497d7f59e5e0 --- /dev/null +++ b/backend/.sqlx/query-4671f1727d0563490534c426375738478f3d93f6bb42aaf021794392328c8875.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT created_by AS \"created_by!\", args as \"args: sqlx::types::Json>\"\n FROM completed_job \n WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "args: sqlx::types::Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "4671f1727d0563490534c426375738478f3d93f6bb42aaf021794392328c8875" +} diff --git a/backend/.sqlx/query-4710cb3a00b54882619ad54d43235f1c8eb1d742a423d78ea9351ea301cfa84a.json b/backend/.sqlx/query-4710cb3a00b54882619ad54d43235f1c8eb1d742a423d78ea9351ea301cfa84a.json new file mode 100644 index 0000000000000..b23328762a5fb --- /dev/null +++ b/backend/.sqlx/query-4710cb3a00b54882619ad54d43235f1c8eb1d742a423d78ea9351ea301cfa84a.json @@ -0,0 +1,73 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n script_path, script_hash AS \"script_hash: ScriptHash\",\n job_kind AS \"job_kind!: JobKind\",\n flow_status AS \"flow_status: Json>\",\n raw_flow AS \"raw_flow: Json>\"\n FROM v2_completed_job WHERE id = $1 and workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "script_hash: ScriptHash", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "job_kind!: JobKind", + "type_info": { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "flow_status: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 4, + "name": "raw_flow: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + true + ] + }, + "hash": "4710cb3a00b54882619ad54d43235f1c8eb1d742a423d78ea9351ea301cfa84a" +} diff --git a/backend/.sqlx/query-490e8230628f3f966db05b728d25fb0adb7f383282897651500159fae4f6860a.json b/backend/.sqlx/query-490e8230628f3f966db05b728d25fb0adb7f383282897651500159fae4f6860a.json new file mode 100644 index 0000000000000..60486554a0755 --- /dev/null +++ b/backend/.sqlx/query-490e8230628f3f966db05b728d25fb0adb7f383282897651500159fae4f6860a.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT tag as \"tag!\", COUNT(*) as \"count!\"\n FROM v2_completed_job\n WHERE started_at > NOW() - make_interval(secs => $1) AND ($2::text IS NULL OR workspace_id = $2)\n GROUP BY tag\n ORDER BY \"count!\" DESC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "tag!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Float8", + "Text" + ] + }, + "nullable": [ + true, + null + ] + }, + "hash": "490e8230628f3f966db05b728d25fb0adb7f383282897651500159fae4f6860a" +} diff --git a/backend/.sqlx/query-4914b74a7956ab41e1a1c812460cc84bbd5e4045acc529f61de892a4e3aceef9.json b/backend/.sqlx/query-4914b74a7956ab41e1a1c812460cc84bbd5e4045acc529f61de892a4e3aceef9.json new file mode 100644 index 0000000000000..26af54b1facf1 --- /dev/null +++ b/backend/.sqlx/query-4914b74a7956ab41e1a1c812460cc84bbd5e4045acc529f61de892a4e3aceef9.json @@ -0,0 +1,35 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT username, is_admin, operator FROM usr WHERE\n email = $1 AND workspace_id = $2 AND disabled = false", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "username", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "is_admin", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "operator", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "4914b74a7956ab41e1a1c812460cc84bbd5e4045acc529f61de892a4e3aceef9" +} diff --git a/backend/.sqlx/query-49d23dccfd132b6919f6084670886d181e2c8f2bec2fbeb1cc296c1e7251264e.json b/backend/.sqlx/query-49d23dccfd132b6919f6084670886d181e2c8f2bec2fbeb1cc296c1e7251264e.json new file mode 100644 index 0000000000000..81dac196ad1c9 --- /dev/null +++ b/backend/.sqlx/query-49d23dccfd132b6919f6084670886d181e2c8f2bec2fbeb1cc296c1e7251264e.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO metrics (id, value)\n VALUES ($1, to_jsonb((SELECT EXTRACT(EPOCH FROM now() - scheduled_for)\n FROM v2_queue WHERE tag = $2 AND running = false AND scheduled_for <= now() - ('3 seconds')::interval\n ORDER BY priority DESC NULLS LAST, scheduled_for LIMIT 1)))", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "49d23dccfd132b6919f6084670886d181e2c8f2bec2fbeb1cc296c1e7251264e" +} diff --git a/backend/.sqlx/query-4a4971ab285cc5ca0dc4026d75a31981cb91ccabdaa786ebadb6d80e8a8b38db.json b/backend/.sqlx/query-4a4971ab285cc5ca0dc4026d75a31981cb91ccabdaa786ebadb6d80e8a8b38db.json new file mode 100644 index 0000000000000..473149f877c3d --- /dev/null +++ b/backend/.sqlx/query-4a4971ab285cc5ca0dc4026d75a31981cb91ccabdaa786ebadb6d80e8a8b38db.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT flow_status AS \"flow_status: Json>\"\n FROM v2_job_completed WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "flow_status: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "4a4971ab285cc5ca0dc4026d75a31981cb91ccabdaa786ebadb6d80e8a8b38db" +} diff --git a/backend/.sqlx/query-4cdbdd36bee7563ae7d09d20543e45600f695149652362400db1f16724c04c6f.json b/backend/.sqlx/query-4cdbdd36bee7563ae7d09d20543e45600f695149652362400db1f16724c04c6f.json new file mode 100644 index 0000000000000..107287a6f9585 --- /dev/null +++ b/backend/.sqlx/query-4cdbdd36bee7563ae7d09d20543e45600f695149652362400db1f16724c04c6f.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM v2_queue WHERE canceled = false AND (scheduled_for <= now()\n OR (suspend_until IS NOT NULL\n AND ( suspend <= 0\n OR suspend_until <= now())))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "4cdbdd36bee7563ae7d09d20543e45600f695149652362400db1f16724c04c6f" +} diff --git a/backend/.sqlx/query-4d3ef32120623584bf5c13d86ea6ad7b3aa41d9b581738d16fbfff4cc5b72a7a.json b/backend/.sqlx/query-4d3ef32120623584bf5c13d86ea6ad7b3aa41d9b581738d16fbfff4cc5b72a7a.json new file mode 100644 index 0000000000000..9fad8009ec451 --- /dev/null +++ b/backend/.sqlx/query-4d3ef32120623584bf5c13d86ea6ad7b3aa41d9b581738d16fbfff4cc5b72a7a.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT canceled AS \"canceled!\" FROM queue WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "canceled!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "4d3ef32120623584bf5c13d86ea6ad7b3aa41d9b581738d16fbfff4cc5b72a7a" +} diff --git a/backend/.sqlx/query-5102b5f5eedc9a5d59d67e141c523decca8d6ba819fd076dcbd17001b9c5af8a.json b/backend/.sqlx/query-5102b5f5eedc9a5d59d67e141c523decca8d6ba819fd076dcbd17001b9c5af8a.json new file mode 100644 index 0000000000000..457e0f56d946f --- /dev/null +++ b/backend/.sqlx/query-5102b5f5eedc9a5d59d67e141c523decca8d6ba819fd076dcbd17001b9c5af8a.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT tag AS \"tag!\", count(*) AS \"count!\" FROM v2_queue WHERE\n scheduled_for <= now() - ('3 seconds')::interval AND running = false\n GROUP BY tag", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "tag!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + null + ] + }, + "hash": "5102b5f5eedc9a5d59d67e141c523decca8d6ba819fd076dcbd17001b9c5af8a" +} diff --git a/backend/.sqlx/query-52bd8efeaec0d0c2aa77d777a0b6559a1aa4ca9ebd4f9b535014cbcb113f9b92.json b/backend/.sqlx/query-52bd8efeaec0d0c2aa77d777a0b6559a1aa4ca9ebd4f9b535014cbcb113f9b92.json new file mode 100644 index 0000000000000..b4576d8682133 --- /dev/null +++ b/backend/.sqlx/query-52bd8efeaec0d0c2aa77d777a0b6559a1aa4ca9ebd4f9b535014cbcb113f9b92.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT created_by AS \"created_by!\", args as \"args: sqlx::types::Json>\"\n FROM queue\n WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "args: sqlx::types::Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "52bd8efeaec0d0c2aa77d777a0b6559a1aa4ca9ebd4f9b535014cbcb113f9b92" +} diff --git a/backend/.sqlx/query-536556e00c2f7a05219adbbcf34ec25a2d548cf832193f6c6ac922c1979ea7d1.json b/backend/.sqlx/query-536556e00c2f7a05219adbbcf34ec25a2d548cf832193f6c6ac922c1979ea7d1.json new file mode 100644 index 0000000000000..129a3355bf392 --- /dev/null +++ b/backend/.sqlx/query-536556e00c2f7a05219adbbcf34ec25a2d548cf832193f6c6ac922c1979ea7d1.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET suspend = 0 WHERE parent_job = $1 AND suspend = $2 AND (flow_status->'step')::int = 0", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "536556e00c2f7a05219adbbcf34ec25a2d548cf832193f6c6ac922c1979ea7d1" +} diff --git a/backend/.sqlx/query-5401c521b5e63b7d9e7bc51c19d116599f6bcedbe70f3bf346b482fe79501958.json b/backend/.sqlx/query-5401c521b5e63b7d9e7bc51c19d116599f6bcedbe70f3bf346b482fe79501958.json new file mode 100644 index 0000000000000..9a1b65ff521bb --- /dev/null +++ b/backend/.sqlx/query-5401c521b5e63b7d9e7bc51c19d116599f6bcedbe70f3bf346b482fe79501958.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "create index concurrently if not exists ix_job_workspace_id_created_at_new_8 ON v2_job (workspace_id, created_at DESC) where kind in ('deploymentcallback') AND parent_job IS NULL", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "5401c521b5e63b7d9e7bc51c19d116599f6bcedbe70f3bf346b482fe79501958" +} diff --git a/backend/.sqlx/query-58dd434aadbee8a55cf422246adc228d95574775a0824b29287e836f51433dc8.json b/backend/.sqlx/query-58dd434aadbee8a55cf422246adc228d95574775a0824b29287e836f51433dc8.json new file mode 100644 index 0000000000000..f4e9b9c3b3a8b --- /dev/null +++ b/backend/.sqlx/query-58dd434aadbee8a55cf422246adc228d95574775a0824b29287e836f51433dc8.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(\n JSONB_SET(flow_status, ARRAY['preprocessor_module'], $1), ARRAY['step'], $2)\n WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "58dd434aadbee8a55cf422246adc228d95574775a0824b29287e836f51433dc8" +} diff --git a/backend/.sqlx/query-596dcd11848e4196eb82f7fcf491db0d4077a186fab0a00f609bd16129f016f2.json b/backend/.sqlx/query-596dcd11848e4196eb82f7fcf491db0d4077a186fab0a00f609bd16129f016f2.json new file mode 100644 index 0000000000000..a663cedf6e60c --- /dev/null +++ b/backend/.sqlx/query-596dcd11848e4196eb82f7fcf491db0d4077a186fab0a00f609bd16129f016f2.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT created_by AS \"created_by!\", args as \"args: sqlx::types::Json>\"\n FROM v2_queue\n WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "args: sqlx::types::Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "596dcd11848e4196eb82f7fcf491db0d4077a186fab0a00f609bd16129f016f2" +} diff --git a/backend/.sqlx/query-5a335118fbe54a23269ef7a4ee1c4393d83e674e16d280c1281e6cba857ea65e.json b/backend/.sqlx/query-5a335118fbe54a23269ef7a4ee1c4393d83e674e16d280c1281e6cba857ea65e.json new file mode 100644 index 0000000000000..cf500e5bc7439 --- /dev/null +++ b/backend/.sqlx/query-5a335118fbe54a23269ef7a4ee1c4393d83e674e16d280c1281e6cba857ea65e.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = jsonb_set(\n jsonb_set(flow_status, ARRAY['preprocessor_module', 'job'], to_jsonb($1::UUID::TEXT)),\n ARRAY['preprocessor_module', 'type'],\n to_jsonb('InProgress'::text)\n )\n WHERE id = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "5a335118fbe54a23269ef7a4ee1c4393d83e674e16d280c1281e6cba857ea65e" +} diff --git a/backend/.sqlx/query-5ac9630d8d339adb6023570875cd827b38b9f55333a86cb45bbd4c6fe51af023.json b/backend/.sqlx/query-5ac9630d8d339adb6023570875cd827b38b9f55333a86cb45bbd4c6fe51af023.json new file mode 100644 index 0000000000000..2810a877bd891 --- /dev/null +++ b/backend/.sqlx/query-5ac9630d8d339adb6023570875cd827b38b9f55333a86cb45bbd4c6fe51af023.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2)\n WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "5ac9630d8d339adb6023570875cd827b38b9f55333a86cb45bbd4c6fe51af023" +} diff --git a/backend/.sqlx/query-5acc194f4081cd81677eae9f1fbfe694c3b1aa2769a131d805600e764c458cdc.json b/backend/.sqlx/query-5acc194f4081cd81677eae9f1fbfe694c3b1aa2769a131d805600e764c458cdc.json new file mode 100644 index 0000000000000..fd787fdf3b1cf --- /dev/null +++ b/backend/.sqlx/query-5acc194f4081cd81677eae9f1fbfe694c3b1aa2769a131d805600e764c458cdc.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_completed_job\n SET logs = '##DELETED##', args = '{}'::jsonb, result = '{}'::jsonb\n WHERE id = ANY($1)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray" + ] + }, + "nullable": [] + }, + "hash": "5acc194f4081cd81677eae9f1fbfe694c3b1aa2769a131d805600e764c458cdc" +} diff --git a/backend/.sqlx/query-5bce731932a35dbecc38c7b9665ef1117a15acf7d0d41b93de165e788b55d93f.json b/backend/.sqlx/query-5bce731932a35dbecc38c7b9665ef1117a15acf7d0d41b93de165e788b55d93f.json new file mode 100644 index 0000000000000..f3ff1e599dd35 --- /dev/null +++ b/backend/.sqlx/query-5bce731932a35dbecc38c7b9665ef1117a15acf7d0d41b93de165e788b55d93f.json @@ -0,0 +1,42 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT created_by AS \"created_by!\", CONCAT(coalesce(completed_job.logs, ''), coalesce(job_logs.logs, '')) as logs, job_logs.log_offset, job_logs.log_file_index\n FROM completed_job \n LEFT JOIN job_logs ON job_logs.job_id = completed_job.id \n WHERE completed_job.id = $1 AND completed_job.workspace_id = $2 AND ($3::text[] IS NULL OR completed_job.tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "logs", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "log_offset", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "log_file_index", + "type_info": "TextArray" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + null, + false, + true + ] + }, + "hash": "5bce731932a35dbecc38c7b9665ef1117a15acf7d0d41b93de165e788b55d93f" +} diff --git a/backend/.sqlx/query-5bedae1ed06d199f787158a724be75534db7e1f3e34d86de83c6a3f17cde492b.json b/backend/.sqlx/query-5bedae1ed06d199f787158a724be75534db7e1f3e34d86de83c6a3f17cde492b.json new file mode 100644 index 0000000000000..b3f4300d29aef --- /dev/null +++ b/backend/.sqlx/query-5bedae1ed06d199f787158a724be75534db7e1f3e34d86de83c6a3f17cde492b.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'branchall', 'branch'], ((flow_status->'modules'->$1::int->'branchall'->>'branch')::int + 1)::text::jsonb),\n last_ping = NULL\n WHERE id = $2\n RETURNING (flow_status->'modules'->$1::int->'branchall'->>'branch')::int", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "int4", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "5bedae1ed06d199f787158a724be75534db7e1f3e34d86de83c6a3f17cde492b" +} diff --git a/backend/.sqlx/query-5c45748ab3df322f6a5cf4b52d9185ceecf81aaa9a2e4e53d926b70b1cde2771.json b/backend/.sqlx/query-5c45748ab3df322f6a5cf4b52d9185ceecf81aaa9a2e4e53d926b70b1cde2771.json new file mode 100644 index 0000000000000..f37cf1381d5b4 --- /dev/null +++ b/backend/.sqlx/query-5c45748ab3df322f6a5cf4b52d9185ceecf81aaa9a2e4e53d926b70b1cde2771.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET\n flow_status = JSONB_SET(flow_status, ARRAY['modules', flow_status->>'step'::text], $1),\n suspend = $2,\n suspend_until = now() + $3\n WHERE id = $4", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Int4", + "Interval", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "5c45748ab3df322f6a5cf4b52d9185ceecf81aaa9a2e4e53d926b70b1cde2771" +} diff --git a/backend/.sqlx/query-5d0284028d0dc16fbbf353496a2204f9df256e03a588c23b33b4edab0d9c3e1a.json b/backend/.sqlx/query-5d0284028d0dc16fbbf353496a2204f9df256e03a588c23b33b4edab0d9c3e1a.json new file mode 100644 index 0000000000000..981c595d69156 --- /dev/null +++ b/backend/.sqlx/query-5d0284028d0dc16fbbf353496a2204f9df256e03a588c23b33b4edab0d9c3e1a.json @@ -0,0 +1,21 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job_queue (\n id, workspace_id, started_at, scheduled_for, running, created_at, tag, priority\n ) SELECT unnest($1::uuid[]), $2, $3, $4, $5, $6, $7, $8", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray", + "Varchar", + "Timestamptz", + "Timestamptz", + "Bool", + "Timestamptz", + "Varchar", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "5d0284028d0dc16fbbf353496a2204f9df256e03a588c23b33b4edab0d9c3e1a" +} diff --git a/backend/.sqlx/query-5e4d30de66cc4289013e1f2da6c954c76fc4bfcd0e7a465cab6ef1386f67b61c.json b/backend/.sqlx/query-5e4d30de66cc4289013e1f2da6c954c76fc4bfcd0e7a465cab6ef1386f67b61c.json new file mode 100644 index 0000000000000..31c5f018ad32e --- /dev/null +++ b/backend/.sqlx/query-5e4d30de66cc4289013e1f2da6c954c76fc4bfcd0e7a465cab6ef1386f67b61c.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n success AS \"success!\",\n result AS \"result: Json>\",\n started_at AS \"started_at!\"FROM v2_completed_job WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4\n ORDER BY created_at DESC\n LIMIT $5", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "success!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "result: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "started_at!", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Uuid", + "Int8" + ] + }, + "nullable": [ + true, + true, + true + ] + }, + "hash": "5e4d30de66cc4289013e1f2da6c954c76fc4bfcd0e7a465cab6ef1386f67b61c" +} diff --git a/backend/.sqlx/query-5fa2be2d293b8a215752642cf9ddf162960b9e35fe53f961d36f137453f854f0.json b/backend/.sqlx/query-5fa2be2d293b8a215752642cf9ddf162960b9e35fe53f961d36f137453f854f0.json new file mode 100644 index 0000000000000..621cf4e31f09a --- /dev/null +++ b/backend/.sqlx/query-5fa2be2d293b8a215752642cf9ddf162960b9e35fe53f961d36f137453f854f0.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT running AS \"running!\" FROM v2_queue WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "running!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "5fa2be2d293b8a215752642cf9ddf162960b9e35fe53f961d36f137453f854f0" +} diff --git a/backend/.sqlx/query-631beba7482e25cff1e89d00adf4f4f46fec5640b63a13f9ffaad56d40a47812.json b/backend/.sqlx/query-631beba7482e25cff1e89d00adf4f4f46fec5640b63a13f9ffaad56d40a47812.json new file mode 100644 index 0000000000000..f477a9ebc83f9 --- /dev/null +++ b/backend/.sqlx/query-631beba7482e25cff1e89d00adf4f4f46fec5640b63a13f9ffaad56d40a47812.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT root_job FROM v2_queue WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "root_job", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "631beba7482e25cff1e89d00adf4f4f46fec5640b63a13f9ffaad56d40a47812" +} diff --git a/backend/.sqlx/query-639dbfa0c98d8b91006823f4c645a1105d6c1cc58990937c9bcf693a8812920c.json b/backend/.sqlx/query-639dbfa0c98d8b91006823f4c645a1105d6c1cc58990937c9bcf693a8812920c.json new file mode 100644 index 0000000000000..739be2be53248 --- /dev/null +++ b/backend/.sqlx/query-639dbfa0c98d8b91006823f4c645a1105d6c1cc58990937c9bcf693a8812920c.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT result #> $3 AS \"result: Json>\"\n FROM completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + null + ] + }, + "hash": "639dbfa0c98d8b91006823f4c645a1105d6c1cc58990937c9bcf693a8812920c" +} diff --git a/backend/.sqlx/query-641087f3166faee8baad063fd569b61aa4d21a15a9bc06e0c2fd15b47eb7beb0.json b/backend/.sqlx/query-641087f3166faee8baad063fd569b61aa4d21a15a9bc06e0c2fd15b47eb7beb0.json new file mode 100644 index 0000000000000..b070f8eb7f689 --- /dev/null +++ b/backend/.sqlx/query-641087f3166faee8baad063fd569b61aa4d21a15a9bc06e0c2fd15b47eb7beb0.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n id As \"id!\",\n flow_status->'restarted_from'->'flow_job_id' AS \"restarted_from: Json\"\n FROM queue\n WHERE COALESCE((SELECT root_job FROM queue WHERE id = $1), $1) = id AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "restarted_from: Json", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + null + ] + }, + "hash": "641087f3166faee8baad063fd569b61aa4d21a15a9bc06e0c2fd15b47eb7beb0" +} diff --git a/backend/.sqlx/query-661f472ff3860983322162420457f5033b9c9afc344d9c3e385ba20a3ad2197a.json b/backend/.sqlx/query-661f472ff3860983322162420457f5033b9c9afc344d9c3e385ba20a3ad2197a.json index 1fa370e682ca6..75b8108281532 100644 --- a/backend/.sqlx/query-661f472ff3860983322162420457f5033b9c9afc344d9c3e385ba20a3ad2197a.json +++ b/backend/.sqlx/query-661f472ff3860983322162420457f5033b9c9afc344d9c3e385ba20a3ad2197a.json @@ -5,7 +5,7 @@ "columns": [ { "ordinal": 0, - "name": "bool", + "name": "?column?", "type_info": "Bool" } ], diff --git a/backend/.sqlx/query-66bf488f2eeaf5b4c4cb8c579d7a15eb87516c319c1fcbb3e46032bb9fdf718e.json b/backend/.sqlx/query-66bf488f2eeaf5b4c4cb8c579d7a15eb87516c319c1fcbb3e46032bb9fdf718e.json new file mode 100644 index 0000000000000..faf1201af104e --- /dev/null +++ b/backend/.sqlx/query-66bf488f2eeaf5b4c4cb8c579d7a15eb87516c319c1fcbb3e46032bb9fdf718e.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n success AS \"success!\",\n result AS \"result: Json>\",\n started_at AS \"started_at!\"\n FROM completed_job\n WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4\n ORDER BY created_at DESC\n LIMIT $5", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "success!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "result: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "started_at!", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Uuid", + "Int8" + ] + }, + "nullable": [ + true, + true, + true + ] + }, + "hash": "66bf488f2eeaf5b4c4cb8c579d7a15eb87516c319c1fcbb3e46032bb9fdf718e" +} diff --git a/backend/.sqlx/query-67227115cc54ff3f8f8c539ea8906d62775fe3f8b68e054db73d1eaadaa53bfd.json b/backend/.sqlx/query-67227115cc54ff3f8f8c539ea8906d62775fe3f8b68e054db73d1eaadaa53bfd.json new file mode 100644 index 0000000000000..8b8abcd8a37ca --- /dev/null +++ b/backend/.sqlx/query-67227115cc54ff3f8f8c539ea8906d62775fe3f8b68e054db73d1eaadaa53bfd.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT success AS \"success!\"\n FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "success!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "67227115cc54ff3f8f8c539ea8906d62775fe3f8b68e054db73d1eaadaa53bfd" +} diff --git a/backend/.sqlx/query-672363560895871e4ab19e0dd0dc36afdbc58470664b6cefa8cec25515a42f13.json b/backend/.sqlx/query-672363560895871e4ab19e0dd0dc36afdbc58470664b6cefa8cec25515a42f13.json new file mode 100644 index 0000000000000..53ed43a6e03bf --- /dev/null +++ b/backend/.sqlx/query-672363560895871e4ab19e0dd0dc36afdbc58470664b6cefa8cec25515a42f13.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT result #> $3 AS \"result: Json>\"\n FROM completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + null + ] + }, + "hash": "672363560895871e4ab19e0dd0dc36afdbc58470664b6cefa8cec25515a42f13" +} diff --git a/backend/.sqlx/query-6b0115e40d4361b3ca72dbd071b0a8c0319c5ae0b92f289ec1e74d2478c9e740.json b/backend/.sqlx/query-6b0115e40d4361b3ca72dbd071b0a8c0319c5ae0b92f289ec1e74d2478c9e740.json new file mode 100644 index 0000000000000..265ededd17031 --- /dev/null +++ b/backend/.sqlx/query-6b0115e40d4361b3ca72dbd071b0a8c0319c5ae0b92f289ec1e74d2478c9e740.json @@ -0,0 +1,42 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT created_by AS \"created_by!\", CONCAT(coalesce(queue.logs, ''), coalesce(job_logs.logs, '')) as logs, coalesce(job_logs.log_offset, 0) as log_offset, job_logs.log_file_index\n FROM queue \n LEFT JOIN job_logs ON job_logs.job_id = queue.id \n WHERE queue.id = $1 AND queue.workspace_id = $2 AND ($3::text[] IS NULL OR queue.tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "logs", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "log_offset", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "log_file_index", + "type_info": "TextArray" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + null, + null, + true + ] + }, + "hash": "6b0115e40d4361b3ca72dbd071b0a8c0319c5ae0b92f289ec1e74d2478c9e740" +} diff --git a/backend/.sqlx/query-6bdb3fcfe16fc40222dc7010a11026d4d4e0d381b31fe02da7d2667c0cdc1a85.json b/backend/.sqlx/query-6bdb3fcfe16fc40222dc7010a11026d4d4e0d381b31fe02da7d2667c0cdc1a85.json new file mode 100644 index 0000000000000..c498b1790a2fd --- /dev/null +++ b/backend/.sqlx/query-6bdb3fcfe16fc40222dc7010a11026d4d4e0d381b31fe02da7d2667c0cdc1a85.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_completed SET workspace_id = $1 WHERE workspace_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "6bdb3fcfe16fc40222dc7010a11026d4d4e0d381b31fe02da7d2667c0cdc1a85" +} diff --git a/backend/.sqlx/query-6cb2c77bb90679a36189007b1f70406fe28923f51fc465ae0f45d7f317077bf5.json b/backend/.sqlx/query-6cb2c77bb90679a36189007b1f70406fe28923f51fc465ae0f45d7f317077bf5.json new file mode 100644 index 0000000000000..e41413a7bfd87 --- /dev/null +++ b/backend/.sqlx/query-6cb2c77bb90679a36189007b1f70406fe28923f51fc465ae0f45d7f317077bf5.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['cleanup_module', 'flow_jobs_to_clean'], COALESCE(flow_status->'cleanup_module'->'flow_jobs_to_clean', '[]'::jsonb) || $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "6cb2c77bb90679a36189007b1f70406fe28923f51fc465ae0f45d7f317077bf5" +} diff --git a/backend/.sqlx/query-6cd65694d617ef381e592ef03fbeeb25152c67b2a82c45ee4f64019076b1a167.json b/backend/.sqlx/query-6cd65694d617ef381e592ef03fbeeb25152c67b2a82c45ee4f64019076b1a167.json new file mode 100644 index 0000000000000..90ab5ca8eed70 --- /dev/null +++ b/backend/.sqlx/query-6cd65694d617ef381e592ef03fbeeb25152c67b2a82c45ee4f64019076b1a167.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = flow_status - 'retry'\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "6cd65694d617ef381e592ef03fbeeb25152c67b2a82c45ee4f64019076b1a167" +} diff --git a/backend/.sqlx/query-6d5dceb0f6b2fe5287d0f7b158f45182e27141db8c7e2186d80c9948b302d531.json b/backend/.sqlx/query-6d5dceb0f6b2fe5287d0f7b158f45182e27141db8c7e2186d80c9948b302d531.json new file mode 100644 index 0000000000000..fa816e300f13a --- /dev/null +++ b/backend/.sqlx/query-6d5dceb0f6b2fe5287d0f7b158f45182e27141db8c7e2186d80c9948b302d531.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\", flow_status AS \"flow_status!: Json\"\n FROM v2_completed_job\n WHERE parent_job = $1 AND workspace_id = $2 AND flow_status IS NOT NULL", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "flow_status!: Json", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "6d5dceb0f6b2fe5287d0f7b158f45182e27141db8c7e2186d80c9948b302d531" +} diff --git a/backend/.sqlx/query-6dffdd5066f109292ff14e0b81073c80b0126183c23f2996092d6e9124e37b9c.json b/backend/.sqlx/query-6dffdd5066f109292ff14e0b81073c80b0126183c23f2996092d6e9124e37b9c.json new file mode 100644 index 0000000000000..acea33426d857 --- /dev/null +++ b/backend/.sqlx/query-6dffdd5066f109292ff14e0b81073c80b0126183c23f2996092d6e9124e37b9c.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT result AS \"result: SqlxJson>\", success AS \"success!\"\n FROM completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: SqlxJson>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "success!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "6dffdd5066f109292ff14e0b81073c80b0126183c23f2996092d6e9124e37b9c" +} diff --git a/backend/.sqlx/query-6f74f8befca28192ff457580922c0991974385350bdc1d9cd11c211269242e70.json b/backend/.sqlx/query-6f74f8befca28192ff457580922c0991974385350bdc1d9cd11c211269242e70.json new file mode 100644 index 0000000000000..730045955e658 --- /dev/null +++ b/backend/.sqlx/query-6f74f8befca28192ff457580922c0991974385350bdc1d9cd11c211269242e70.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET args = (select result FROM v2_completed_job WHERE id = $1) WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "6f74f8befca28192ff457580922c0991974385350bdc1d9cd11c211269242e70" +} diff --git a/backend/.sqlx/query-70dc88f1af928fc2046bb216a31c48a94ed020fb9398ed242610a190f809e12f.json b/backend/.sqlx/query-70dc88f1af928fc2046bb216a31c48a94ed020fb9398ed242610a190f809e12f.json new file mode 100644 index 0000000000000..34a4ec9df9659 --- /dev/null +++ b/backend/.sqlx/query-70dc88f1af928fc2046bb216a31c48a94ed020fb9398ed242610a190f809e12f.json @@ -0,0 +1,41 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n script_path, args AS \"args: sqlx::types::Json>>\",\n tag AS \"tag!\", priority\n FROM v2_completed_job\n WHERE id = $1 and workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "args: sqlx::types::Json>>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "tag!", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "priority", + "type_info": "Int2" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "70dc88f1af928fc2046bb216a31c48a94ed020fb9398ed242610a190f809e12f" +} diff --git a/backend/.sqlx/query-7463c1eb3f690239c9277b62d935af4f372c07263c6f86bc89b0d25cb573fa80.json b/backend/.sqlx/query-7463c1eb3f690239c9277b62d935af4f372c07263c6f86bc89b0d25cb573fa80.json new file mode 100644 index 0000000000000..77ab2c28bdf0e --- /dev/null +++ b/backend/.sqlx/query-7463c1eb3f690239c9277b62d935af4f372c07263c6f86bc89b0d25cb573fa80.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_job WHERE workspace_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [] + }, + "hash": "7463c1eb3f690239c9277b62d935af4f372c07263c6f86bc89b0d25cb573fa80" +} diff --git a/backend/.sqlx/query-74a2a90d12ca0179c8a80f9bf574066db4e8735c0f717d91391a28bf832c0e71.json b/backend/.sqlx/query-74a2a90d12ca0179c8a80f9bf574066db4e8735c0f717d91391a28bf832c0e71.json new file mode 100644 index 0000000000000..fc80d816cf311 --- /dev/null +++ b/backend/.sqlx/query-74a2a90d12ca0179c8a80f9bf574066db4e8735c0f717d91391a28bf832c0e71.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue SET canceled = true, canceled_by = $1, canceled_reason = $2, scheduled_for = now(), suspend = 0 WHERE id = $3 AND workspace_id = $4 AND (canceled = false OR canceled_reason != $2) RETURNING id AS \"id!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "74a2a90d12ca0179c8a80f9bf574066db4e8735c0f717d91391a28bf832c0e71" +} diff --git a/backend/.sqlx/query-75011df502302611691a9a332e64daa3ef2610a53a8819d49d356431969975f3.json b/backend/.sqlx/query-75011df502302611691a9a332e64daa3ef2610a53a8819d49d356431969975f3.json new file mode 100644 index 0000000000000..ec4f9e1dd8837 --- /dev/null +++ b/backend/.sqlx/query-75011df502302611691a9a332e64daa3ef2610a53a8819d49d356431969975f3.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['step'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "75011df502302611691a9a332e64daa3ef2610a53a8819d49d356431969975f3" +} diff --git a/backend/.sqlx/query-768bf629de97958b8060188ae6ccdde234625202448cc9a2131118975c4e3f31.json b/backend/.sqlx/query-768bf629de97958b8060188ae6ccdde234625202448cc9a2131118975c4e3f31.json new file mode 100644 index 0000000000000..d30b280e0ac2a --- /dev/null +++ b/backend/.sqlx/query-768bf629de97958b8060188ae6ccdde234625202448cc9a2131118975c4e3f31.json @@ -0,0 +1,67 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n job_kind AS \"job_kind!: JobKind\",\n script_hash AS \"script_hash: ScriptHash\",\n flow_status AS \"flow_status!: Json>\",\n raw_flow AS \"raw_flow: Json>\"\n FROM v2_queue WHERE id = $1 AND workspace_id = $2 LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "job_kind!: JobKind", + "type_info": { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + } + }, + { + "ordinal": 1, + "name": "script_hash: ScriptHash", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "flow_status!: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "raw_flow: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "768bf629de97958b8060188ae6ccdde234625202448cc9a2131118975c4e3f31" +} diff --git a/backend/.sqlx/query-786d6618eec56d41be7731f0f07075e03d2f64d826a3817fc74197eb960e35c9.json b/backend/.sqlx/query-786d6618eec56d41be7731f0f07075e03d2f64d826a3817fc74197eb960e35c9.json new file mode 100644 index 0000000000000..45107e972138c --- /dev/null +++ b/backend/.sqlx/query-786d6618eec56d41be7731f0f07075e03d2f64d826a3817fc74197eb960e35c9.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COALESCE((SELECT MIN(started_at) as min_started_at\n FROM v2_queue\n WHERE script_path = $1 AND job_kind != 'dependencies' AND running = true AND workspace_id = $2 AND canceled = false AND concurrent_limit > 0), $3) as min_started_at, now() AS now", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "min_started_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 1, + "name": "now", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Timestamptz" + ] + }, + "nullable": [ + null, + null + ] + }, + "hash": "786d6618eec56d41be7731f0f07075e03d2f64d826a3817fc74197eb960e35c9" +} diff --git a/backend/.sqlx/query-7907fb4b819090b16d44aefb08bd09e76b97ac5c96f604bdc1dfeb433ac5cffb.json b/backend/.sqlx/query-7907fb4b819090b16d44aefb08bd09e76b97ac5c96f604bdc1dfeb433ac5cffb.json new file mode 100644 index 0000000000000..a6d052d5b3155 --- /dev/null +++ b/backend/.sqlx/query-7907fb4b819090b16d44aefb08bd09e76b97ac5c96f604bdc1dfeb433ac5cffb.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET running = false\n , started_at = null\n , scheduled_for = $1\n , last_ping = null\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Timestamptz", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "7907fb4b819090b16d44aefb08bd09e76b97ac5c96f604bdc1dfeb433ac5cffb" +} diff --git a/backend/.sqlx/query-7993b4f2d46b0cf56f4cb784237ae8790ce4982a3d619fc50c4a0a2ee368c2e7.json b/backend/.sqlx/query-7993b4f2d46b0cf56f4cb784237ae8790ce4982a3d619fc50c4a0a2ee368c2e7.json new file mode 100644 index 0000000000000..6926189c247ce --- /dev/null +++ b/backend/.sqlx/query-7993b4f2d46b0cf56f4cb784237ae8790ce4982a3d619fc50c4a0a2ee368c2e7.json @@ -0,0 +1,42 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT created_by AS \"created_by!\", CONCAT(coalesce(v2_completed_job.logs, ''), coalesce(job_logs.logs, '')) as logs, job_logs.log_offset, job_logs.log_file_index\n FROM v2_completed_job \n LEFT JOIN job_logs ON job_logs.job_id = v2_completed_job.id \n WHERE v2_completed_job.id = $1 AND v2_completed_job.workspace_id = $2 AND ($3::text[] IS NULL OR v2_completed_job.tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "logs", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "log_offset", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "log_file_index", + "type_info": "TextArray" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + null, + false, + true + ] + }, + "hash": "7993b4f2d46b0cf56f4cb784237ae8790ce4982a3d619fc50c4a0a2ee368c2e7" +} diff --git a/backend/.sqlx/query-7a706906e461df376f5238fd1dda387ab71bc0c9d9ad0c3e7737138adb6cc224.json b/backend/.sqlx/query-7a706906e461df376f5238fd1dda387ab71bc0c9d9ad0c3e7737138adb6cc224.json new file mode 100644 index 0000000000000..d1db543ca0961 --- /dev/null +++ b/backend/.sqlx/query-7a706906e461df376f5238fd1dda387ab71bc0c9d9ad0c3e7737138adb6cc224.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET flow_status = jsonb_set(COALESCE(flow_status, '{}'::jsonb), array[$1], jsonb_set(jsonb_set('{}'::jsonb, '{scheduled_for}', to_jsonb(now()::text)), '{name}', to_jsonb($4::text))) WHERE id = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Uuid", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "7a706906e461df376f5238fd1dda387ab71bc0c9d9ad0c3e7737138adb6cc224" +} diff --git a/backend/.sqlx/query-7c7234d8234fbaff4d42eb47d1e1ea1747ffe28986055c01a4661628e47c0e51.json b/backend/.sqlx/query-7c7234d8234fbaff4d42eb47d1e1ea1747ffe28986055c01a4661628e47c0e51.json new file mode 100644 index 0000000000000..b1ace4f3154eb --- /dev/null +++ b/backend/.sqlx/query-7c7234d8234fbaff4d42eb47d1e1ea1747ffe28986055c01a4661628e47c0e51.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_completed SET canceled_by = $1 WHERE canceled_by = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "7c7234d8234fbaff4d42eb47d1e1ea1747ffe28986055c01a4661628e47c0e51" +} diff --git a/backend/.sqlx/query-7dc7bc4e22942792938d273655962a95486f6da82cdc08f79dd6cef508256474.json b/backend/.sqlx/query-7dc7bc4e22942792938d273655962a95486f6da82cdc08f79dd6cef508256474.json new file mode 100644 index 0000000000000..94ad2087885d5 --- /dev/null +++ b/backend/.sqlx/query-7dc7bc4e22942792938d273655962a95486f6da82cdc08f79dd6cef508256474.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO completed_job AS cj\n ( workspace_id\n , id\n , parent_job\n , created_by\n , created_at\n , started_at\n , duration_ms\n , success\n , script_hash\n , script_path\n , args\n , result\n , raw_code\n , raw_lock\n , canceled\n , canceled_by\n , canceled_reason\n , job_kind\n , schedule_path\n , permissioned_as\n , flow_status\n , raw_flow\n , is_flow_step\n , is_skipped\n , language\n , email\n , visible_to_owner\n , mem_peak\n , tag\n , priority\n )\n SELECT workspace_id\n , id\n , parent_job\n , created_by\n , created_at\n , now()\n , 0\n , false\n , script_hash\n , script_path\n , args\n , $4\n , raw_code\n , raw_lock\n , true\n , $1\n , canceled_reason\n , job_kind\n , schedule_path\n , permissioned_as\n , flow_status\n , raw_flow\n , is_flow_step\n , false\n , language\n , email\n , visible_to_owner\n , mem_peak\n , tag\n , priority FROM queue \n WHERE id = any($2) AND running = false AND parent_job IS NULL AND workspace_id = $3 AND schedule_path IS NULL FOR UPDATE SKIP LOCKED\n ON CONFLICT (id) DO NOTHING RETURNING id AS \"id!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "UuidArray", + "Text", + "Jsonb" + ] + }, + "nullable": [ + true + ] + }, + "hash": "7dc7bc4e22942792938d273655962a95486f6da82cdc08f79dd6cef508256474" +} diff --git a/backend/.sqlx/query-7e95417c9bf5812d5c24ff3ffaebd3ec590384ed64dd738302df10a6a783c75f.json b/backend/.sqlx/query-7e95417c9bf5812d5c24ff3ffaebd3ec590384ed64dd738302df10a6a783c75f.json new file mode 100644 index 0000000000000..07799d5196a03 --- /dev/null +++ b/backend/.sqlx/query-7e95417c9bf5812d5c24ff3ffaebd3ec590384ed64dd738302df10a6a783c75f.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['preprocessor_module'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "7e95417c9bf5812d5c24ff3ffaebd3ec590384ed64dd738302df10a6a783c75f" +} diff --git a/backend/.sqlx/query-7f7feb429e0a7b4540469fb5e8ec532d4e08d5416b0566c34be201e98deab75c.json b/backend/.sqlx/query-7f7feb429e0a7b4540469fb5e8ec532d4e08d5416b0566c34be201e98deab75c.json new file mode 100644 index 0000000000000..e9c7d4acd8eb2 --- /dev/null +++ b/backend/.sqlx/query-7f7feb429e0a7b4540469fb5e8ec532d4e08d5416b0566c34be201e98deab75c.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS(SELECT 1 FROM v2_completed_job WHERE id = $1 AND workspace_id = $2)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "7f7feb429e0a7b4540469fb5e8ec532d4e08d5416b0566c34be201e98deab75c" +} diff --git a/backend/.sqlx/query-82057acdd9bce4c67598e410dbca062bcfe0d319a330969e4a97fb0a714be923.json b/backend/.sqlx/query-82057acdd9bce4c67598e410dbca062bcfe0d319a330969e4a97fb0a714be923.json new file mode 100644 index 0000000000000..1d1526099ac39 --- /dev/null +++ b/backend/.sqlx/query-82057acdd9bce4c67598e410dbca062bcfe0d319a330969e4a97fb0a714be923.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['failure_module'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "82057acdd9bce4c67598e410dbca062bcfe0d319a330969e4a97fb0a714be923" +} diff --git a/backend/.sqlx/query-8381cee8bec06f8eff847b22d25574fc2cbcb32fef9f44632a8bc4c5c487fc4f.json b/backend/.sqlx/query-8381cee8bec06f8eff847b22d25574fc2cbcb32fef9f44632a8bc4c5c487fc4f.json new file mode 100644 index 0000000000000..0a5c7ae2c85ce --- /dev/null +++ b/backend/.sqlx/query-8381cee8bec06f8eff847b22d25574fc2cbcb32fef9f44632a8bc4c5c487fc4f.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM v2_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "args", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "Bool" + ] + }, + "nullable": [ + null + ] + }, + "hash": "8381cee8bec06f8eff847b22d25574fc2cbcb32fef9f44632a8bc4c5c487fc4f" +} diff --git a/backend/.sqlx/query-8440081df15fa7874dded86b8af572d971d5892b7dd4c8e824b953113bd6c4a9.json b/backend/.sqlx/query-8440081df15fa7874dded86b8af572d971d5892b7dd4c8e824b953113bd6c4a9.json new file mode 100644 index 0000000000000..4a7ebbc0f7142 --- /dev/null +++ b/backend/.sqlx/query-8440081df15fa7874dded86b8af572d971d5892b7dd4c8e824b953113bd6c4a9.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT parent_job\n FROM v2_job\n WHERE id = $1 AND workspace_id = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "parent_job", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "8440081df15fa7874dded86b8af572d971d5892b7dd4c8e824b953113bd6c4a9" +} diff --git a/backend/.sqlx/query-8512298656d6872a2123c2801d842119537374a815c1292fac4c5b75bdd58898.json b/backend/.sqlx/query-8512298656d6872a2123c2801d842119537374a815c1292fac4c5b75bdd58898.json new file mode 100644 index 0000000000000..8390708116782 --- /dev/null +++ b/backend/.sqlx/query-8512298656d6872a2123c2801d842119537374a815c1292fac4c5b75bdd58898.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET runnable_path = REGEXP_REPLACE(runnable_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\\1') WHERE runnable_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "8512298656d6872a2123c2801d842119537374a815c1292fac4c5b75bdd58898" +} diff --git a/backend/.sqlx/query-858db2a501abcfffbcce19d60cc241060c93354de19f0fa80b8f45290e8b992d.json b/backend/.sqlx/query-858db2a501abcfffbcce19d60cc241060c93354de19f0fa80b8f45290e8b992d.json new file mode 100644 index 0000000000000..f3d1c484fe6a2 --- /dev/null +++ b/backend/.sqlx/query-858db2a501abcfffbcce19d60cc241060c93354de19f0fa80b8f45290e8b992d.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2)\n WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "858db2a501abcfffbcce19d60cc241060c93354de19f0fa80b8f45290e8b992d" +} diff --git a/backend/.sqlx/query-866c1b86d63466df84877e81655ce999284f3d2d854a00fefcaf7c044dcf71ca.json b/backend/.sqlx/query-866c1b86d63466df84877e81655ce999284f3d2d854a00fefcaf7c044dcf71ca.json new file mode 100644 index 0000000000000..c4905590b645f --- /dev/null +++ b/backend/.sqlx/query-866c1b86d63466df84877e81655ce999284f3d2d854a00fefcaf7c044dcf71ca.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue\n SET flow_status = jsonb_set(\n jsonb_set(flow_status, ARRAY['failure_module', 'job'], to_jsonb($1::UUID::TEXT)),\n ARRAY['failure_module', 'type'],\n to_jsonb('InProgress'::text)\n )\n WHERE id = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "866c1b86d63466df84877e81655ce999284f3d2d854a00fefcaf7c044dcf71ca" +} diff --git a/backend/.sqlx/query-867a864a81c9d649acc13c6c91b1c63c345d443ce636114e30ad6a0da464aa50.json b/backend/.sqlx/query-867a864a81c9d649acc13c6c91b1c63c345d443ce636114e30ad6a0da464aa50.json new file mode 100644 index 0000000000000..2a48be3e97163 --- /dev/null +++ b/backend/.sqlx/query-867a864a81c9d649acc13c6c91b1c63c345d443ce636114e30ad6a0da464aa50.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['user_states'], JSONB_SET(COALESCE(flow_status->'user_states', '{}'::jsonb), ARRAY[$1], $2))\n WHERE id = $3 AND workspace_id = $4 AND job_kind IN ('flow', 'flowpreview', 'flownode') RETURNING 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Uuid", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "867a864a81c9d649acc13c6c91b1c63c345d443ce636114e30ad6a0da464aa50" +} diff --git a/backend/.sqlx/query-86d5b121a8ae458ae3b050ecf6e3f82368534b77d3720200cb90829b03b55595.json b/backend/.sqlx/query-86d5b121a8ae458ae3b050ecf6e3f82368534b77d3720200cb90829b03b55595.json new file mode 100644 index 0000000000000..95e8f1c24a49e --- /dev/null +++ b/backend/.sqlx/query-86d5b121a8ae458ae3b050ecf6e3f82368534b77d3720200cb90829b03b55595.json @@ -0,0 +1,70 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n result #> $3 AS \"result: sqlx::types::Json>\",\n flow_status AS \"flow_status: sqlx::types::Json>\",\n language AS \"language: ScriptLang\",\n created_by AS \"created_by!\"\n FROM v2_completed_job\n WHERE id = $1 AND workspace_id = $2 AND ($4::text[] IS NULL OR tag = ANY($4))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "language: ScriptLang", + "type_info": { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray", + "TextArray" + ] + }, + "nullable": [ + null, + true, + true, + true + ] + }, + "hash": "86d5b121a8ae458ae3b050ecf6e3f82368534b77d3720200cb90829b03b55595" +} diff --git a/backend/.sqlx/query-86e730a3481868a8d63824904df42788dd8a777d643d46d5086b5f8f33bbc97e.json b/backend/.sqlx/query-86e730a3481868a8d63824904df42788dd8a777d643d46d5086b5f8f33bbc97e.json new file mode 100644 index 0000000000000..70406daddded0 --- /dev/null +++ b/backend/.sqlx/query-86e730a3481868a8d63824904df42788dd8a777d643d46d5086b5f8f33bbc97e.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "VACUUM (skip_locked) v2_job_queue, v2_job_runtime, v2_job_flow_runtime", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "86e730a3481868a8d63824904df42788dd8a777d643d46d5086b5f8f33bbc97e" +} diff --git a/backend/.sqlx/query-8995f08b2e3097b21bfc519d0b89181ff9fd37e4f716de4b52e5a5960b8e6d30.json b/backend/.sqlx/query-8995f08b2e3097b21bfc519d0b89181ff9fd37e4f716de4b52e5a5960b8e6d30.json new file mode 100644 index 0000000000000..6946d2774b3af --- /dev/null +++ b/backend/.sqlx/query-8995f08b2e3097b21bfc519d0b89181ff9fd37e4f716de4b52e5a5960b8e6d30.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(\n JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4),\n ARRAY['modules', $1::TEXT, 'iterator', 'index'],\n ((flow_status->'modules'->$1::int->'iterator'->>'index')::int + 1)::text::jsonb\n ),\n last_ping = NULL\n WHERE id = $2\n RETURNING (flow_status->'modules'->$1::int->'iterator'->>'index')::int", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "int4", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4", + "Uuid", + "Text", + "Jsonb" + ] + }, + "nullable": [ + null + ] + }, + "hash": "8995f08b2e3097b21bfc519d0b89181ff9fd37e4f716de4b52e5a5960b8e6d30" +} diff --git a/backend/.sqlx/query-8a5e36ed9dcc1712ec9b8ebc26cd76eab45b172da907b40fc87c6300944a0108.json b/backend/.sqlx/query-8a5e36ed9dcc1712ec9b8ebc26cd76eab45b172da907b40fc87c6300944a0108.json new file mode 100644 index 0000000000000..4a0022b5c0f2c --- /dev/null +++ b/backend/.sqlx/query-8a5e36ed9dcc1712ec9b8ebc26cd76eab45b172da907b40fc87c6300944a0108.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET suspend = 0 WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "8a5e36ed9dcc1712ec9b8ebc26cd76eab45b172da907b40fc87c6300944a0108" +} diff --git a/backend/.sqlx/query-8b244ce1a4ff15ed50163b9b48cb92bb5bba659766d44ad769a9f36ad905627a.json b/backend/.sqlx/query-8b244ce1a4ff15ed50163b9b48cb92bb5bba659766d44ad769a9f36ad905627a.json new file mode 100644 index 0000000000000..ec1068258c99a --- /dev/null +++ b/backend/.sqlx/query-8b244ce1a4ff15ed50163b9b48cb92bb5bba659766d44ad769a9f36ad905627a.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_queue WHERE workspace_id = $1 AND id = $2 RETURNING 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "8b244ce1a4ff15ed50163b9b48cb92bb5bba659766d44ad769a9f36ad905627a" +} diff --git a/backend/.sqlx/query-8b882b7bb74443972929e0805d4f1adb0555ab8caf7240fa97c1d4da55e6cc95.json b/backend/.sqlx/query-8b882b7bb74443972929e0805d4f1adb0555ab8caf7240fa97c1d4da55e6cc95.json new file mode 100644 index 0000000000000..bb24c3f5ae12b --- /dev/null +++ b/backend/.sqlx/query-8b882b7bb74443972929e0805d4f1adb0555ab8caf7240fa97c1d4da55e6cc95.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT CAST(ROUND(AVG(duration_ms), 0) AS BIGINT) AS avg_duration_s FROM\n (SELECT duration_ms FROM concurrency_key LEFT JOIN v2_completed_job ON v2_completed_job.id = concurrency_key.job_id WHERE key = $1 AND ended_at IS NOT NULL\n ORDER BY ended_at\n DESC LIMIT 10) AS t", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "avg_duration_s", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "8b882b7bb74443972929e0805d4f1adb0555ab8caf7240fa97c1d4da55e6cc95" +} diff --git a/backend/.sqlx/query-8be277b89102a26dda506202a3ef7eb05342cfb3aa9b4f5d80c70fbc50d437ba.json b/backend/.sqlx/query-8be277b89102a26dda506202a3ef7eb05342cfb3aa9b4f5d80c70fbc50d437ba.json new file mode 100644 index 0000000000000..e5819f5c6ab3f --- /dev/null +++ b/backend/.sqlx/query-8be277b89102a26dda506202a3ef7eb05342cfb3aa9b4f5d80c70fbc50d437ba.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "create index concurrently if not exists ix_job_workspace_id_created_at_new_6 ON v2_job (workspace_id, created_at DESC) where kind in ('script', 'flow') AND parent_job IS NULL", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "8be277b89102a26dda506202a3ef7eb05342cfb3aa9b4f5d80c70fbc50d437ba" +} diff --git a/backend/.sqlx/query-8d0b19f20452acb51a2ddaa1aca69b6d7287ca0b19cc2b400511434e7f57be07.json b/backend/.sqlx/query-8d0b19f20452acb51a2ddaa1aca69b6d7287ca0b19cc2b400511434e7f57be07.json new file mode 100644 index 0000000000000..dcd01c48343a3 --- /dev/null +++ b/backend/.sqlx/query-8d0b19f20452acb51a2ddaa1aca69b6d7287ca0b19cc2b400511434e7f57be07.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['cleanup_module', 'flow_jobs_to_clean'], COALESCE(flow_status->'cleanup_module'->'flow_jobs_to_clean', '[]'::jsonb) || $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "8d0b19f20452acb51a2ddaa1aca69b6d7287ca0b19cc2b400511434e7f57be07" +} diff --git a/backend/.sqlx/query-8d368d944beb43e0e8c32e83fe2bad5ff2221cde22000da636e7e33e7db6ddce.json b/backend/.sqlx/query-8d368d944beb43e0e8c32e83fe2bad5ff2221cde22000da636e7e33e7db6ddce.json new file mode 100644 index 0000000000000..5a8c755632b65 --- /dev/null +++ b/backend/.sqlx/query-8d368d944beb43e0e8c32e83fe2bad5ff2221cde22000da636e7e33e7db6ddce.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_completed_job SET args = '{\"reason\":\"PREPROCESSOR_ARGS_ARE_DISCARDED\"}'::jsonb WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "8d368d944beb43e0e8c32e83fe2bad5ff2221cde22000da636e7e33e7db6ddce" +} diff --git a/backend/.sqlx/query-8d3ded447c42747c9018053a068eec9c80e7fb29102a046792673c42f463c076.json b/backend/.sqlx/query-8d3ded447c42747c9018053a068eec9c80e7fb29102a046792673c42f463c076.json new file mode 100644 index 0000000000000..c94fae87ecb0c --- /dev/null +++ b/backend/.sqlx/query-8d3ded447c42747c9018053a068eec9c80e7fb29102a046792673c42f463c076.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = jsonb_set(\n jsonb_set(flow_status, ARRAY['failure_module', 'job'], to_jsonb($1::UUID::TEXT)),\n ARRAY['failure_module', 'type'],\n to_jsonb('InProgress'::text)\n )\n WHERE id = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "8d3ded447c42747c9018053a068eec9c80e7fb29102a046792673c42f463c076" +} diff --git a/backend/.sqlx/query-8dd93be44f66c0744ddaff12a9664d9ad745a4a2bb4c0fa36d3caf77fa60e035.json b/backend/.sqlx/query-8dd93be44f66c0744ddaff12a9664d9ad745a4a2bb4c0fa36d3caf77fa60e035.json new file mode 100644 index 0000000000000..dead4b0a556fb --- /dev/null +++ b/backend/.sqlx/query-8dd93be44f66c0744ddaff12a9664d9ad745a4a2bb4c0fa36d3caf77fa60e035.json @@ -0,0 +1,54 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n running AS \"running!\",\n substr(concat(coalesce(queue.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) AS logs,\n mem_peak,\n CASE WHEN is_flow_step is true then NULL else flow_status END AS \"flow_status: sqlx::types::Json>\",\n job_logs.log_offset + char_length(job_logs.logs) + 1 AS log_offset,\n created_by AS \"created_by!\"\n FROM queue\n LEFT JOIN job_logs ON job_logs.job_id = queue.id \n WHERE queue.workspace_id = $2 AND queue.id = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "running!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "logs", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "mem_peak", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 4, + "name": "log_offset", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Int4", + "Text", + "Uuid" + ] + }, + "nullable": [ + true, + null, + true, + null, + null, + true + ] + }, + "hash": "8dd93be44f66c0744ddaff12a9664d9ad745a4a2bb4c0fa36d3caf77fa60e035" +} diff --git a/backend/.sqlx/query-8e2a03ebe1311e98b855a57693deaa210c01d980065864e5439ee726cf1f9bef.json b/backend/.sqlx/query-8e2a03ebe1311e98b855a57693deaa210c01d980065864e5439ee726cf1f9bef.json new file mode 100644 index 0000000000000..4d50f9e1df904 --- /dev/null +++ b/backend/.sqlx/query-8e2a03ebe1311e98b855a57693deaa210c01d980065864e5439ee726cf1f9bef.json @@ -0,0 +1,75 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n result AS \"result: sqlx::types::Json>\", success AS \"success!\",\n language AS \"language: ScriptLang\",\n flow_status AS \"flow_status: sqlx::types::Json>\",\n created_by AS \"created_by!\"\n FROM v2_completed_job\n WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "success!", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "language: ScriptLang", + "type_info": { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 4, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + true, + true, + true, + true + ] + }, + "hash": "8e2a03ebe1311e98b855a57693deaa210c01d980065864e5439ee726cf1f9bef" +} diff --git a/backend/.sqlx/query-902a16d9e7ac34e7f1a0ad633bae754a0d64bc934a2b6434b9cb67f28d3ef950.json b/backend/.sqlx/query-902a16d9e7ac34e7f1a0ad633bae754a0d64bc934a2b6434b9cb67f28d3ef950.json new file mode 100644 index 0000000000000..7a6d47ba81157 --- /dev/null +++ b/backend/.sqlx/query-902a16d9e7ac34e7f1a0ad633bae754a0d64bc934a2b6434b9cb67f28d3ef950.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_queue SET canceled_by = $1 WHERE canceled_by = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "902a16d9e7ac34e7f1a0ad633bae754a0d64bc934a2b6434b9cb67f28d3ef950" +} diff --git a/backend/.sqlx/query-90d1c1ed47f1a9186cdde2f720fe86cde5ffefb4578d6e091855bc7a4e94d1b0.json b/backend/.sqlx/query-90d1c1ed47f1a9186cdde2f720fe86cde5ffefb4578d6e091855bc7a4e94d1b0.json new file mode 100644 index 0000000000000..259d59e3eab18 --- /dev/null +++ b/backend/.sqlx/query-90d1c1ed47f1a9186cdde2f720fe86cde5ffefb4578d6e091855bc7a4e94d1b0.json @@ -0,0 +1,47 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO script (\n summary, description, dedicated_worker, content, workspace_id, path, hash,\n language, tag, created_by, lock\n ) VALUES ('', '', true, $1, $2, $3, $4, $5, $6, $7, '')\n ON CONFLICT (workspace_id, hash) DO NOTHING", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Varchar", + "Varchar", + "Int8", + { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + }, + "Varchar", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "90d1c1ed47f1a9186cdde2f720fe86cde5ffefb4578d6e091855bc7a4e94d1b0" +} diff --git a/backend/.sqlx/query-9250b087485e51af83aef1f412c85edc114add7a6b6c1e3845ffda344effa03c.json b/backend/.sqlx/query-9250b087485e51af83aef1f412c85edc114add7a6b6c1e3845ffda344effa03c.json new file mode 100644 index 0000000000000..b46a8f1a22db2 --- /dev/null +++ b/backend/.sqlx/query-9250b087485e51af83aef1f412c85edc114add7a6b6c1e3845ffda344effa03c.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue\n SET flow_status = flow_status - 'approval_conditions'\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "9250b087485e51af83aef1f412c85edc114add7a6b6c1e3845ffda344effa03c" +} diff --git a/backend/.sqlx/query-92b80a77d292ec734b097b815261ce0cd51d7f699ff296314c9801e115c52228.json b/backend/.sqlx/query-92b80a77d292ec734b097b815261ce0cd51d7f699ff296314c9801e115c52228.json new file mode 100644 index 0000000000000..1975b59d5ac26 --- /dev/null +++ b/backend/.sqlx/query-92b80a77d292ec734b097b815261ce0cd51d7f699ff296314c9801e115c52228.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue\n SET flow_status = JSONB_SET(JSONB_SET(flow_status, ARRAY['retry'], $1), ARRAY['modules', $3::TEXT, 'failed_retries'], $4)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid", + "Text", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "92b80a77d292ec734b097b815261ce0cd51d7f699ff296314c9801e115c52228" +} diff --git a/backend/.sqlx/query-931703a98d2ee5fb58d3380896baaee032e731db1e6bd49d991a54f49ab8fa46.json b/backend/.sqlx/query-931703a98d2ee5fb58d3380896baaee032e731db1e6bd49d991a54f49ab8fa46.json new file mode 100644 index 0000000000000..5f86a45011fe0 --- /dev/null +++ b/backend/.sqlx/query-931703a98d2ee5fb58d3380896baaee032e731db1e6bd49d991a54f49ab8fa46.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "create index concurrently if not exists ix_completed_job_workspace_id_started_at_new_2 ON v2_job_completed (workspace_id, started_at DESC)", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "931703a98d2ee5fb58d3380896baaee032e731db1e6bd49d991a54f49ab8fa46" +} diff --git a/backend/.sqlx/query-95d12525781746a0deb60572242366471a2945c4470712f512b5f82b12312109.json b/backend/.sqlx/query-95d12525781746a0deb60572242366471a2945c4470712f512b5f82b12312109.json new file mode 100644 index 0000000000000..bddbb520a0dfe --- /dev/null +++ b/backend/.sqlx/query-95d12525781746a0deb60572242366471a2945c4470712f512b5f82b12312109.json @@ -0,0 +1,91 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n v2_queue.job_kind AS \"job_kind!: JobKind\",\n v2_queue.script_hash AS \"script_hash: ScriptHash\",\n v2_queue.raw_flow AS \"raw_flow: sqlx::types::Json>\",\n v2_completed_job.parent_job AS \"parent_job: Uuid\",\n v2_completed_job.created_at AS \"created_at!: chrono::NaiveDateTime\",\n v2_completed_job.created_by AS \"created_by!\",\n v2_queue.script_path,\n v2_queue.args AS \"args: sqlx::types::Json>\"\n FROM v2_queue\n JOIN v2_completed_job ON v2_completed_job.parent_job = v2_queue.id\n WHERE v2_completed_job.id = $1 AND v2_completed_job.workspace_id = $2\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "job_kind!: JobKind", + "type_info": { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + } + }, + { + "ordinal": 1, + "name": "script_hash: ScriptHash", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "raw_flow: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "parent_job: Uuid", + "type_info": "Uuid" + }, + { + "ordinal": 4, + "name": "created_at!: chrono::NaiveDateTime", + "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "args: sqlx::types::Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "95d12525781746a0deb60572242366471a2945c4470712f512b5f82b12312109" +} diff --git a/backend/.sqlx/query-96a9357888af26e5ec1e314bb565af05de561a8f9899e4ddca958982fdb67803.json b/backend/.sqlx/query-96a9357888af26e5ec1e314bb565af05de561a8f9899e4ddca958982fdb67803.json new file mode 100644 index 0000000000000..5fbab5bf206e2 --- /dev/null +++ b/backend/.sqlx/query-96a9357888af26e5ec1e314bb565af05de561a8f9899e4ddca958982fdb67803.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\" FROM queue WHERE schedule_path = $1 AND workspace_id = $2 AND id != $3 AND running = true", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "96a9357888af26e5ec1e314bb565af05de561a8f9899e4ddca958982fdb67803" +} diff --git a/backend/.sqlx/query-971a9eeb3d20be893655d972d51c0dd85ca930a5a725e8c6a0682b2dcc7f8a0e.json b/backend/.sqlx/query-971a9eeb3d20be893655d972d51c0dd85ca930a5a725e8c6a0682b2dcc7f8a0e.json new file mode 100644 index 0000000000000..f841dcedb1daa --- /dev/null +++ b/backend/.sqlx/query-971a9eeb3d20be893655d972d51c0dd85ca930a5a725e8c6a0682b2dcc7f8a0e.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(\n JSONB_SET(flow_status, ARRAY['failure_module'], $1), ARRAY['step'], $2)\n WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "971a9eeb3d20be893655d972d51c0dd85ca930a5a725e8c6a0682b2dcc7f8a0e" +} diff --git a/backend/.sqlx/query-979549eec044480a4d8d21a22530178c73c6102f22971e6ce67b1b4473bc8c6f.json b/backend/.sqlx/query-979549eec044480a4d8d21a22530178c73c6102f22971e6ce67b1b4473bc8c6f.json new file mode 100644 index 0000000000000..e5d1ab2ccabff --- /dev/null +++ b/backend/.sqlx/query-979549eec044480a4d8d21a22530178c73c6102f22971e6ce67b1b4473bc8c6f.json @@ -0,0 +1,71 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n email AS \"email!\",\n created_by AS \"created_by!\",\n parent_job, permissioned_as AS \"permissioned_as!\",\n script_path, schedule_path, flow_step_id, root_job,\n scheduled_for AS \"scheduled_for!: chrono::DateTime\"\n FROM v2_queue WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "email!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "parent_job", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "permissioned_as!", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "schedule_path", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "flow_step_id", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "root_job", + "type_info": "Uuid" + }, + { + "ordinal": 8, + "name": "scheduled_for!: chrono::DateTime", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "979549eec044480a4d8d21a22530178c73c6102f22971e6ce67b1b4473bc8c6f" +} diff --git a/backend/.sqlx/query-979fe49e07c27dbed06c3fab2ba896933219e14eb1129e67430d9723f227e48e.json b/backend/.sqlx/query-979fe49e07c27dbed06c3fab2ba896933219e14eb1129e67430d9723f227e48e.json new file mode 100644 index 0000000000000..97eda7d01eaa1 --- /dev/null +++ b/backend/.sqlx/query-979fe49e07c27dbed06c3fab2ba896933219e14eb1129e67430d9723f227e48e.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "create index concurrently if not exists root_job_index_by_path_2 ON v2_job (workspace_id, entity_path, created_at desc) WHERE parent_job IS NULL", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "979fe49e07c27dbed06c3fab2ba896933219e14eb1129e67430d9723f227e48e" +} diff --git a/backend/.sqlx/query-992ebe3a73d0bd3b2122c08ca41b2d2deae208d7e95dd7d39884b7058a0512ff.json b/backend/.sqlx/query-992ebe3a73d0bd3b2122c08ca41b2d2deae208d7e95dd7d39884b7058a0512ff.json new file mode 100644 index 0000000000000..16e45505b8419 --- /dev/null +++ b/backend/.sqlx/query-992ebe3a73d0bd3b2122c08ca41b2d2deae208d7e95dd7d39884b7058a0512ff.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job_flow_runtime (id, flow_status) SELECT unnest($1::uuid[]), $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "992ebe3a73d0bd3b2122c08ca41b2d2deae208d7e95dd7d39884b7058a0512ff" +} diff --git a/backend/.sqlx/query-9b2c42327378963b16eec5037537b8d9ae95c194b201bd1ce1d8f90925d7dba8.json b/backend/.sqlx/query-9b2c42327378963b16eec5037537b8d9ae95c194b201bd1ce1d8f90925d7dba8.json new file mode 100644 index 0000000000000..e73b49d4b0a6c --- /dev/null +++ b/backend/.sqlx/query-9b2c42327378963b16eec5037537b8d9ae95c194b201bd1ce1d8f90925d7dba8.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET permissioned_as = ('u/' || $1) WHERE permissioned_as = ('u/' || $2) AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "9b2c42327378963b16eec5037537b8d9ae95c194b201bd1ce1d8f90925d7dba8" +} diff --git a/backend/.sqlx/query-9bdad9fbe8990588d8d769d4a38e2397ee789f6732199a5259f5f4ee2c5a166d.json b/backend/.sqlx/query-9bdad9fbe8990588d8d769d4a38e2397ee789f6732199a5259f5f4ee2c5a166d.json new file mode 100644 index 0000000000000..b49d76d6cf091 --- /dev/null +++ b/backend/.sqlx/query-9bdad9fbe8990588d8d769d4a38e2397ee789f6732199a5259f5f4ee2c5a166d.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\", result AS \"result: Json>\"\n FROM completed_job WHERE id = ANY($1) AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "result: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "UuidArray", + "Text" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "9bdad9fbe8990588d8d769d4a38e2397ee789f6732199a5259f5f4ee2c5a166d" +} diff --git a/backend/.sqlx/query-9c34c717b218c09e3784a5413f7972e5e805dae837a075da4e503494624b518a.json b/backend/.sqlx/query-9c34c717b218c09e3784a5413f7972e5e805dae837a075da4e503494624b518a.json new file mode 100644 index 0000000000000..d22420d6e4d41 --- /dev/null +++ b/backend/.sqlx/query-9c34c717b218c09e3784a5413f7972e5e805dae837a075da4e503494624b518a.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue\n SET flow_status = jsonb_set(\n jsonb_set(flow_status, ARRAY['preprocessor_module', 'job'], to_jsonb($1::UUID::TEXT)),\n ARRAY['preprocessor_module', 'type'],\n to_jsonb('InProgress'::text)\n )\n WHERE id = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "9c34c717b218c09e3784a5413f7972e5e805dae837a075da4e503494624b518a" +} diff --git a/backend/.sqlx/query-9d616812c5a6ae514f047ce2d035a07ff11d13472c25533824dec93c41ea609c.json b/backend/.sqlx/query-9d616812c5a6ae514f047ce2d035a07ff11d13472c25533824dec93c41ea609c.json new file mode 100644 index 0000000000000..7471bb0bf3a2e --- /dev/null +++ b/backend/.sqlx/query-9d616812c5a6ae514f047ce2d035a07ff11d13472c25533824dec93c41ea609c.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT args AS \"args: Json>>\"\n FROM completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "args: Json>>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "9d616812c5a6ae514f047ce2d035a07ff11d13472c25533824dec93c41ea609c" +} diff --git a/backend/.sqlx/query-9e3985087c66b5bf64a48f4f77409b889a584d03f12b8189c3e817b06ee4b459.json b/backend/.sqlx/query-9e3985087c66b5bf64a48f4f77409b889a584d03f12b8189c3e817b06ee4b459.json new file mode 100644 index 0000000000000..9d63b24a7bf00 --- /dev/null +++ b/backend/.sqlx/query-9e3985087c66b5bf64a48f4f77409b889a584d03f12b8189c3e817b06ee4b459.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET canceled = true WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "9e3985087c66b5bf64a48f4f77409b889a584d03f12b8189c3e817b06ee4b459" +} diff --git a/backend/.sqlx/query-a0c35cb515a842067b294343c90f1bfbe4e2db85da9a478a07460733999e9beb.json b/backend/.sqlx/query-a0c35cb515a842067b294343c90f1bfbe4e2db85da9a478a07460733999e9beb.json new file mode 100644 index 0000000000000..298a66c89ccb5 --- /dev/null +++ b/backend/.sqlx/query-a0c35cb515a842067b294343c90f1bfbe4e2db85da9a478a07460733999e9beb.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT count(*) AS \"count!\" FROM resume_job", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "a0c35cb515a842067b294343c90f1bfbe4e2db85da9a478a07460733999e9beb" +} diff --git a/backend/.sqlx/query-a1eae6af90a6ee03b86fb1b96224c14ff1c98d08d22185e7db9fb042c2d95f97.json b/backend/.sqlx/query-a1eae6af90a6ee03b86fb1b96224c14ff1c98d08d22185e7db9fb042c2d95f97.json new file mode 100644 index 0000000000000..08282518d2e96 --- /dev/null +++ b/backend/.sqlx/query-a1eae6af90a6ee03b86fb1b96224c14ff1c98d08d22185e7db9fb042c2d95f97.json @@ -0,0 +1,54 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n running AS \"running!\",\n substr(concat(coalesce(v2_queue.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) AS logs,\n mem_peak,\n CASE WHEN is_flow_step is true then NULL else flow_status END AS \"flow_status: sqlx::types::Json>\",\n job_logs.log_offset + char_length(job_logs.logs) + 1 AS log_offset,\n created_by AS \"created_by!\"\n FROM v2_queue\n LEFT JOIN job_logs ON job_logs.job_id = v2_queue.id \n WHERE v2_queue.workspace_id = $2 AND v2_queue.id = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "running!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "logs", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "mem_peak", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 4, + "name": "log_offset", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Int4", + "Text", + "Uuid" + ] + }, + "nullable": [ + true, + null, + true, + null, + null, + true + ] + }, + "hash": "a1eae6af90a6ee03b86fb1b96224c14ff1c98d08d22185e7db9fb042c2d95f97" +} diff --git a/backend/.sqlx/query-a2b7d364705468bd9dfb8f10e0bcc151604feae83d4671dfb9ca77eb8fd8d4f1.json b/backend/.sqlx/query-a2b7d364705468bd9dfb8f10e0bcc151604feae83d4671dfb9ca77eb8fd8d4f1.json new file mode 100644 index 0000000000000..fffd61d36011a --- /dev/null +++ b/backend/.sqlx/query-a2b7d364705468bd9dfb8f10e0bcc151604feae83d4671dfb9ca77eb8fd8d4f1.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_job_completed WHERE workspace_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [] + }, + "hash": "a2b7d364705468bd9dfb8f10e0bcc151604feae83d4671dfb9ca77eb8fd8d4f1" +} diff --git a/backend/.sqlx/query-a33e282d02c53e5d6142dc7e6882a6d8f3d068c55cddf209eb6b6431ca26c910.json b/backend/.sqlx/query-a33e282d02c53e5d6142dc7e6882a6d8f3d068c55cddf209eb6b6431ca26c910.json new file mode 100644 index 0000000000000..20d1bfb0459eb --- /dev/null +++ b/backend/.sqlx/query-a33e282d02c53e5d6142dc7e6882a6d8f3d068c55cddf209eb6b6431ca26c910.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\" FROM queue WHERE workspace_id = $1 and root_job = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Text", + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "a33e282d02c53e5d6142dc7e6882a6d8f3d068c55cddf209eb6b6431ca26c910" +} diff --git a/backend/.sqlx/query-a3ef2d382a5584d47915296c131947b1d235325f4e6f6cb32e51f9c97a0c6285.json b/backend/.sqlx/query-a3ef2d382a5584d47915296c131947b1d235325f4e6f6cb32e51f9c97a0c6285.json new file mode 100644 index 0000000000000..3c5a32c66cb8e --- /dev/null +++ b/backend/.sqlx/query-a3ef2d382a5584d47915296c131947b1d235325f4e6f6cb32e51f9c97a0c6285.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(\n JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4),\n ARRAY['modules', $1::TEXT, 'branchall', 'branch'], ((flow_status->'modules'->$1::int->'branchall'->>'branch')::int + 1)::text::jsonb),\n last_ping = NULL\n WHERE id = $2\n RETURNING (flow_status->'modules'->$1::int->'branchall'->>'branch')::int", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "int4", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4", + "Uuid", + "Text", + "Jsonb" + ] + }, + "nullable": [ + null + ] + }, + "hash": "a3ef2d382a5584d47915296c131947b1d235325f4e6f6cb32e51f9c97a0c6285" +} diff --git a/backend/.sqlx/query-a49314e28fc285aac3a73bf4c5689c90f5fcf2905da0356fd015d9e9e9de6004.json b/backend/.sqlx/query-a49314e28fc285aac3a73bf4c5689c90f5fcf2905da0356fd015d9e9e9de6004.json new file mode 100644 index 0000000000000..d18e9d9606910 --- /dev/null +++ b/backend/.sqlx/query-a49314e28fc285aac3a73bf4c5689c90f5fcf2905da0356fd015d9e9e9de6004.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\" FROM v2_queue WHERE parent_job = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "a49314e28fc285aac3a73bf4c5689c90f5fcf2905da0356fd015d9e9e9de6004" +} diff --git a/backend/.sqlx/query-a4ebd27fad738b9320d1a2b4b3674aba094562f2c353f1cefe3af8c0d04d4687.json b/backend/.sqlx/query-a4ebd27fad738b9320d1a2b4b3674aba094562f2c353f1cefe3af8c0d04d4687.json new file mode 100644 index 0000000000000..7f19c49cee3a9 --- /dev/null +++ b/backend/.sqlx/query-a4ebd27fad738b9320d1a2b4b3674aba094562f2c353f1cefe3af8c0d04d4687.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(id) FROM v2_queue WHERE workspace_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "a4ebd27fad738b9320d1a2b4b3674aba094562f2c353f1cefe3af8c0d04d4687" +} diff --git a/backend/.sqlx/query-a4f55c2450e5bd55aaf2bc4348c3a72efddae2bda30fe27d3359ce848d6ffc98.json b/backend/.sqlx/query-a4f55c2450e5bd55aaf2bc4348c3a72efddae2bda30fe27d3359ce848d6ffc98.json new file mode 100644 index 0000000000000..21fe2cc3cea9b --- /dev/null +++ b/backend/.sqlx/query-a4f55c2450e5bd55aaf2bc4348c3a72efddae2bda30fe27d3359ce848d6ffc98.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n success AS \"success!\",\n result AS \"result: Json>\",\n started_at AS \"started_at!\"\n FROM v2_completed_job\n WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4\n ORDER BY created_at DESC\n LIMIT $5", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "success!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "result: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "started_at!", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Uuid", + "Int8" + ] + }, + "nullable": [ + true, + true, + true + ] + }, + "hash": "a4f55c2450e5bd55aaf2bc4348c3a72efddae2bda30fe27d3359ce848d6ffc98" +} diff --git a/backend/.sqlx/query-a91798f58fa5948cd1739df4fa2e07cbb3eb08c5d2d22b057796e1156ae2a122.json b/backend/.sqlx/query-a91798f58fa5948cd1739df4fa2e07cbb3eb08c5d2d22b057796e1156ae2a122.json new file mode 100644 index 0000000000000..3dfa6d79e2cee --- /dev/null +++ b/backend/.sqlx/query-a91798f58fa5948cd1739df4fa2e07cbb3eb08c5d2d22b057796e1156ae2a122.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT leaf_jobs->$1::text AS \"leaf_jobs: Json>\", parent_job\n FROM queue\n WHERE COALESCE((SELECT root_job FROM queue WHERE id = $2), $2) = id AND workspace_id = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "leaf_jobs: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "parent_job", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Text", + "Uuid", + "Text" + ] + }, + "nullable": [ + null, + true + ] + }, + "hash": "a91798f58fa5948cd1739df4fa2e07cbb3eb08c5d2d22b057796e1156ae2a122" +} diff --git a/backend/.sqlx/query-aaacf51f3a43617b60e4253549ec4cc6a3648992b590c6e0121e4d7434da516b.json b/backend/.sqlx/query-aaacf51f3a43617b60e4253549ec4cc6a3648992b590c6e0121e4d7434da516b.json new file mode 100644 index 0000000000000..5ce9a0ea5b8cb --- /dev/null +++ b/backend/.sqlx/query-aaacf51f3a43617b60e4253549ec4cc6a3648992b590c6e0121e4d7434da516b.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET created_by = $1 WHERE created_by = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "aaacf51f3a43617b60e4253549ec4cc6a3648992b590c6e0121e4d7434da516b" +} diff --git a/backend/.sqlx/query-ab093b3bef1cd60395cc763bfdb538590334508a4f6cdbbf759ee1e3a099bda4.json b/backend/.sqlx/query-ab093b3bef1cd60395cc763bfdb538590334508a4f6cdbbf759ee1e3a099bda4.json new file mode 100644 index 0000000000000..94181f2a4ddb9 --- /dev/null +++ b/backend/.sqlx/query-ab093b3bef1cd60395cc763bfdb538590334508a4f6cdbbf759ee1e3a099bda4.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT flow_status->'user_states'->$1\n FROM v2_queue\n WHERE id = $2 AND workspace_id = $3\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Text", + "Uuid", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "ab093b3bef1cd60395cc763bfdb538590334508a4f6cdbbf759ee1e3a099bda4" +} diff --git a/backend/.sqlx/query-ae8dfecd46425d5f86003eea9a578e9831fc0e700cc76ab9627afe9040a4efe0.json b/backend/.sqlx/query-ae8dfecd46425d5f86003eea9a578e9831fc0e700cc76ab9627afe9040a4efe0.json new file mode 100644 index 0000000000000..c0703bc518664 --- /dev/null +++ b/backend/.sqlx/query-ae8dfecd46425d5f86003eea9a578e9831fc0e700cc76ab9627afe9040a4efe0.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "create index concurrently if not exists ix_job_workspace_id_created_at_new_7 ON v2_job (workspace_id, created_at DESC) where kind in ('script', 'flow') AND parent_job IS NULL", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "ae8dfecd46425d5f86003eea9a578e9831fc0e700cc76ab9627afe9040a4efe0" +} diff --git a/backend/.sqlx/query-afc40924751740054bd9a02f4fad4eaa2ed964088f4f35a1f74f912867819bc8.json b/backend/.sqlx/query-afc40924751740054bd9a02f4fad4eaa2ed964088f4f35a1f74f912867819bc8.json new file mode 100644 index 0000000000000..0ba28d2b3ee3b --- /dev/null +++ b/backend/.sqlx/query-afc40924751740054bd9a02f4fad4eaa2ed964088f4f35a1f74f912867819bc8.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['approval_conditions'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "afc40924751740054bd9a02f4fad4eaa2ed964088f4f35a1f74f912867819bc8" +} diff --git a/backend/.sqlx/query-b06915e02398511033717ea13b710c86a24fe666884cfd49996dee961751ce51.json b/backend/.sqlx/query-b06915e02398511033717ea13b710c86a24fe666884cfd49996dee961751ce51.json new file mode 100644 index 0000000000000..ee6047940e521 --- /dev/null +++ b/backend/.sqlx/query-b06915e02398511033717ea13b710c86a24fe666884cfd49996dee961751ce51.json @@ -0,0 +1,88 @@ +{ + "db_name": "PostgreSQL", + "query": "WITH uuid_table as (\n select unnest($11::uuid[]) as uuid\n )\n INSERT INTO queue \n (id, script_hash, script_path, job_kind, language, args, tag, created_by, permissioned_as, email, scheduled_for, workspace_id, concurrent_limit, concurrency_time_window_s, timeout, flow_status)\n (SELECT uuid, $1, $2, $3, $4, ('{ \"uuid\": \"' || uuid || '\" }')::jsonb, $5, $6, $7, $8, $9, $10, $12, $13, $14, $15 FROM uuid_table) \n RETURNING id AS \"id!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Int8", + "Varchar", + { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + }, + { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp" + ] + } + } + }, + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Timestamptz", + "Varchar", + "UuidArray", + "Int4", + "Int4", + "Int4", + "Jsonb" + ] + }, + "nullable": [ + true + ] + }, + "hash": "b06915e02398511033717ea13b710c86a24fe666884cfd49996dee961751ce51" +} diff --git a/backend/.sqlx/query-b0890c1bac6931d848afd88539a0b766a018957c1a325940df8914b28df60aca.json b/backend/.sqlx/query-b0890c1bac6931d848afd88539a0b766a018957c1a325940df8914b28df60aca.json new file mode 100644 index 0000000000000..15b7b27002e71 --- /dev/null +++ b/backend/.sqlx/query-b0890c1bac6931d848afd88539a0b766a018957c1a325940df8914b28df60aca.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT tag AS \"tag!\", count(*) AS \"count!\" FROM queue WHERE\n scheduled_for <= now() - ('3 seconds')::interval AND running = false\n GROUP BY tag", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "tag!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + null + ] + }, + "hash": "b0890c1bac6931d848afd88539a0b766a018957c1a325940df8914b28df60aca" +} diff --git a/backend/.sqlx/query-b3e41eaff54c5da5e38cff785c17b2d9e014be9d0794e72dc8566485e61492cd.json b/backend/.sqlx/query-b3e41eaff54c5da5e38cff785c17b2d9e014be9d0794e72dc8566485e61492cd.json new file mode 100644 index 0000000000000..ce125516d25ef --- /dev/null +++ b/backend/.sqlx/query-b3e41eaff54c5da5e38cff785c17b2d9e014be9d0794e72dc8566485e61492cd.json @@ -0,0 +1,41 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n script_path, args AS \"args: sqlx::types::Json>>\",\n tag AS \"tag!\", priority\n FROM completed_job\n WHERE id = $1 and workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "args: sqlx::types::Json>>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "tag!", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "priority", + "type_info": "Int2" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "b3e41eaff54c5da5e38cff785c17b2d9e014be9d0794e72dc8566485e61492cd" +} diff --git a/backend/.sqlx/query-b4900c683bffc10b9cbd035208ad26c89cb78e4c877fd161073beadfa9c9251e.json b/backend/.sqlx/query-b4900c683bffc10b9cbd035208ad26c89cb78e4c877fd161073beadfa9c9251e.json new file mode 100644 index 0000000000000..76a10ae529e66 --- /dev/null +++ b/backend/.sqlx/query-b4900c683bffc10b9cbd035208ad26c89cb78e4c877fd161073beadfa9c9251e.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'approvers'], $2)\n WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "b4900c683bffc10b9cbd035208ad26c89cb78e4c877fd161073beadfa9c9251e" +} diff --git a/backend/.sqlx/query-b4a9abcb38997587b28655b0f4a212a5bd4039b57fab20b163617e33a4c9dd46.json b/backend/.sqlx/query-b4a9abcb38997587b28655b0f4a212a5bd4039b57fab20b163617e33a4c9dd46.json new file mode 100644 index 0000000000000..a49baeefaff5a --- /dev/null +++ b/backend/.sqlx/query-b4a9abcb38997587b28655b0f4a212a5bd4039b57fab20b163617e33a4c9dd46.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job_runtime (id) SELECT unnest($1::uuid[])", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray" + ] + }, + "nullable": [] + }, + "hash": "b4a9abcb38997587b28655b0f4a212a5bd4039b57fab20b163617e33a4c9dd46" +} diff --git a/backend/.sqlx/query-b544bb25abf9562738851c25711275a7027a5936539d766ecffeed13eb0e2fc3.json b/backend/.sqlx/query-b544bb25abf9562738851c25711275a7027a5936539d766ecffeed13eb0e2fc3.json new file mode 100644 index 0000000000000..48e072973f65c --- /dev/null +++ b/backend/.sqlx/query-b544bb25abf9562738851c25711275a7027a5936539d766ecffeed13eb0e2fc3.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT args AS \"args: Json>>\"\n FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "args: Json>>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "b544bb25abf9562738851c25711275a7027a5936539d766ecffeed13eb0e2fc3" +} diff --git a/backend/.sqlx/query-b86590a0f3825889b930f584eba074b76e008f0acdd13720c84e987e8f7fc710.json b/backend/.sqlx/query-b86590a0f3825889b930f584eba074b76e008f0acdd13720c84e987e8f7fc710.json new file mode 100644 index 0000000000000..9f5ef95947422 --- /dev/null +++ b/backend/.sqlx/query-b86590a0f3825889b930f584eba074b76e008f0acdd13720c84e987e8f7fc710.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM v2_queue LEFT JOIN concurrency_key ON concurrency_key.job_id = v2_queue.id\n WHERE key = $1 AND running = false AND canceled = false AND scheduled_for >= $2 AND scheduled_for < $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Timestamptz", + "Timestamptz" + ] + }, + "nullable": [ + null + ] + }, + "hash": "b86590a0f3825889b930f584eba074b76e008f0acdd13720c84e987e8f7fc710" +} diff --git a/backend/.sqlx/query-bb93ba18709648b47cfbd04d91afd3b38546b1a718d0abff6b2795d7c2a29c97.json b/backend/.sqlx/query-bb93ba18709648b47cfbd04d91afd3b38546b1a718d0abff6b2795d7c2a29c97.json new file mode 100644 index 0000000000000..d6288765fe4cd --- /dev/null +++ b/backend/.sqlx/query-bb93ba18709648b47cfbd04d91afd3b38546b1a718d0abff6b2795d7c2a29c97.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT id AS \"id!\", flow_status, suspend AS \"suspend!\", script_path\n FROM queue\n WHERE id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "flow_status", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "suspend!", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "script_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "bb93ba18709648b47cfbd04d91afd3b38546b1a718d0abff6b2795d7c2a29c97" +} diff --git a/backend/.sqlx/query-bba2f9b0ae5024c4cd3522eeb56c528b709e47f628266a020a1e04db2facd6d7.json b/backend/.sqlx/query-bba2f9b0ae5024c4cd3522eeb56c528b709e47f628266a020a1e04db2facd6d7.json new file mode 100644 index 0000000000000..09f6347fb9eb0 --- /dev/null +++ b/backend/.sqlx/query-bba2f9b0ae5024c4cd3522eeb56c528b709e47f628266a020a1e04db2facd6d7.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(id) FROM v2_queue WHERE email = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "bba2f9b0ae5024c4cd3522eeb56c528b709e47f628266a020a1e04db2facd6d7" +} diff --git a/backend/.sqlx/query-bedfb7153d8fcc936263372cc0001cd68b7bad0e5604d016ddc3e293ef5fd70e.json b/backend/.sqlx/query-bedfb7153d8fcc936263372cc0001cd68b7bad0e5604d016ddc3e293ef5fd70e.json new file mode 100644 index 0000000000000..80810625be2c5 --- /dev/null +++ b/backend/.sqlx/query-bedfb7153d8fcc936263372cc0001cd68b7bad0e5604d016ddc3e293ef5fd70e.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT created_by AS \"created_by!\", args as \"args: sqlx::types::Json>\"\n FROM v2_completed_job \n WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "args: sqlx::types::Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "bedfb7153d8fcc936263372cc0001cd68b7bad0e5604d016ddc3e293ef5fd70e" +} diff --git a/backend/.sqlx/query-bf91cb319e5b83c2235292a9e3ce8aa1c097c94b01aad0d9f7bce76a2a272bcc.json b/backend/.sqlx/query-bf91cb319e5b83c2235292a9e3ce8aa1c097c94b01aad0d9f7bce76a2a272bcc.json new file mode 100644 index 0000000000000..25caf0d4fe6c0 --- /dev/null +++ b/backend/.sqlx/query-bf91cb319e5b83c2235292a9e3ce8aa1c097c94b01aad0d9f7bce76a2a272bcc.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT success AS \"success!\"\n FROM completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "success!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "bf91cb319e5b83c2235292a9e3ce8aa1c097c94b01aad0d9f7bce76a2a272bcc" +} diff --git a/backend/.sqlx/query-bfff3d8df18db198d6ebba8a049b00147fc8bcd42f3df37ef81b9ded80974bd0.json b/backend/.sqlx/query-bfff3d8df18db198d6ebba8a049b00147fc8bcd42f3df37ef81b9ded80974bd0.json index b378d68f5b09b..4843d959c190c 100644 --- a/backend/.sqlx/query-bfff3d8df18db198d6ebba8a049b00147fc8bcd42f3df37ef81b9ded80974bd0.json +++ b/backend/.sqlx/query-bfff3d8df18db198d6ebba8a049b00147fc8bcd42f3df37ef81b9ded80974bd0.json @@ -5,7 +5,7 @@ "columns": [ { "ordinal": 0, - "name": "bool", + "name": "?column?", "type_info": "Bool" } ], diff --git a/backend/.sqlx/query-c03b4c5878bd0a0c545cb66b146d0b6ceae9de456d3ddb6008707589dbe30747.json b/backend/.sqlx/query-c03b4c5878bd0a0c545cb66b146d0b6ceae9de456d3ddb6008707589dbe30747.json new file mode 100644 index 0000000000000..5cb05f49a0866 --- /dev/null +++ b/backend/.sqlx/query-c03b4c5878bd0a0c545cb66b146d0b6ceae9de456d3ddb6008707589dbe30747.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT 1 FROM v2_job WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "c03b4c5878bd0a0c545cb66b146d0b6ceae9de456d3ddb6008707589dbe30747" +} diff --git a/backend/.sqlx/query-c0b96d2f421afc43e256a8475825623bcb3dd4cbc37d570fc4273127bbf77c24.json b/backend/.sqlx/query-c0b96d2f421afc43e256a8475825623bcb3dd4cbc37d570fc4273127bbf77c24.json new file mode 100644 index 0000000000000..2015ab01d61cc --- /dev/null +++ b/backend/.sqlx/query-c0b96d2f421afc43e256a8475825623bcb3dd4cbc37d570fc4273127bbf77c24.json @@ -0,0 +1,67 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n job_kind AS \"job_kind!: JobKind\",\n script_hash AS \"script_hash: ScriptHash\",\n flow_status AS \"flow_status!: Json>\",\n raw_flow AS \"raw_flow: Json>\"\n FROM queue WHERE id = $1 AND workspace_id = $2 LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "job_kind!: JobKind", + "type_info": { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + } + }, + { + "ordinal": 1, + "name": "script_hash: ScriptHash", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "flow_status!: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "raw_flow: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "c0b96d2f421afc43e256a8475825623bcb3dd4cbc37d570fc4273127bbf77c24" +} diff --git a/backend/.sqlx/query-c2f2ae011a42a1d4d9f29d34e4c7ea70e3bff866a6f566c768408b5f527bef27.json b/backend/.sqlx/query-c2f2ae011a42a1d4d9f29d34e4c7ea70e3bff866a6f566c768408b5f527bef27.json new file mode 100644 index 0000000000000..8417e24d029ef --- /dev/null +++ b/backend/.sqlx/query-c2f2ae011a42a1d4d9f29d34e4c7ea70e3bff866a6f566c768408b5f527bef27.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT suspend > 0 AS \"r!\" FROM v2_queue WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "r!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "c2f2ae011a42a1d4d9f29d34e4c7ea70e3bff866a6f566c768408b5f527bef27" +} diff --git a/backend/.sqlx/query-c33664546c6114d04bfaeaeb5215057e2e03053a71440559d0806d4aca258797.json b/backend/.sqlx/query-c33664546c6114d04bfaeaeb5215057e2e03053a71440559d0806d4aca258797.json new file mode 100644 index 0000000000000..3f9db2b73ea24 --- /dev/null +++ b/backend/.sqlx/query-c33664546c6114d04bfaeaeb5215057e2e03053a71440559d0806d4aca258797.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['retry'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "c33664546c6114d04bfaeaeb5215057e2e03053a71440559d0806d4aca258797" +} diff --git a/backend/.sqlx/query-c3b5abbf2c9079d597a55f7c63bc83b8b4da98bda204a40f045a62172cfb4ebb.json b/backend/.sqlx/query-c3b5abbf2c9079d597a55f7c63bc83b8b4da98bda204a40f045a62172cfb4ebb.json index b545c4859953d..3c5109c33faca 100644 --- a/backend/.sqlx/query-c3b5abbf2c9079d597a55f7c63bc83b8b4da98bda204a40f045a62172cfb4ebb.json +++ b/backend/.sqlx/query-c3b5abbf2c9079d597a55f7c63bc83b8b4da98bda204a40f045a62172cfb4ebb.json @@ -21,7 +21,7 @@ ] }, "nullable": [ - false, + true, null ] }, diff --git a/backend/.sqlx/query-c4bdbf1c6bc7d93db4cf2633105b088b781354cb7c02628d9f8ff7f9ea0e7ed9.json b/backend/.sqlx/query-c4bdbf1c6bc7d93db4cf2633105b088b781354cb7c02628d9f8ff7f9ea0e7ed9.json new file mode 100644 index 0000000000000..4d138db49c35f --- /dev/null +++ b/backend/.sqlx/query-c4bdbf1c6bc7d93db4cf2633105b088b781354cb7c02628d9f8ff7f9ea0e7ed9.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "CREATE INDEX CONCURRENTLY labeled_jobs_on_jobs ON v2_job_completed USING GIN ((result -> 'wm_labels')) WHERE result ? 'wm_labels'", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "c4bdbf1c6bc7d93db4cf2633105b088b781354cb7c02628d9f8ff7f9ea0e7ed9" +} diff --git a/backend/.sqlx/query-c68af2428470b30146bd9a8e119f2242b439788840fee36cf03da7c8c0d888f5.json b/backend/.sqlx/query-c68af2428470b30146bd9a8e119f2242b439788840fee36cf03da7c8c0d888f5.json new file mode 100644 index 0000000000000..6fa80b6aca083 --- /dev/null +++ b/backend/.sqlx/query-c68af2428470b30146bd9a8e119f2242b439788840fee36cf03da7c8c0d888f5.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n id As \"id!\",\n flow_status->'restarted_from'->'flow_job_id' AS \"restarted_from: Json\"\n FROM v2_queue\n WHERE COALESCE((SELECT root_job FROM v2_queue WHERE id = $1), $1) = id AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "restarted_from: Json", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + null + ] + }, + "hash": "c68af2428470b30146bd9a8e119f2242b439788840fee36cf03da7c8c0d888f5" +} diff --git a/backend/.sqlx/query-c7be5fa2eaf66147c1213046e615f5e9fd168ef1e3aba8af64b15341055d6007.json b/backend/.sqlx/query-c7be5fa2eaf66147c1213046e615f5e9fd168ef1e3aba8af64b15341055d6007.json new file mode 100644 index 0000000000000..d79a7f54652d3 --- /dev/null +++ b/backend/.sqlx/query-c7be5fa2eaf66147c1213046e615f5e9fd168ef1e3aba8af64b15341055d6007.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "create index concurrently if not exists ix_job_workspace_id_created_at_new_5 ON v2_job (workspace_id, created_at DESC) where kind in ('preview', 'flowpreview') AND parent_job IS NULL", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "c7be5fa2eaf66147c1213046e615f5e9fd168ef1e3aba8af64b15341055d6007" +} diff --git a/backend/.sqlx/query-c7d595d2a12228c49359440ca3a9622f1de5f5ee4bbe5d2b23f6fdb6379cebf3.json b/backend/.sqlx/query-c7d595d2a12228c49359440ca3a9622f1de5f5ee4bbe5d2b23f6fdb6379cebf3.json new file mode 100644 index 0000000000000..a3d8c2502f908 --- /dev/null +++ b/backend/.sqlx/query-c7d595d2a12228c49359440ca3a9622f1de5f5ee4bbe5d2b23f6fdb6379cebf3.json @@ -0,0 +1,47 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE token SET last_used_at = now() WHERE\n token = $1\n AND (expiration > NOW() OR expiration IS NULL)\n AND (workspace_id IS NULL OR workspace_id = $2)\n RETURNING owner, email, super_admin, scopes, label", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "owner", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "super_admin", + "type_info": "Bool" + }, + { + "ordinal": 3, + "name": "scopes", + "type_info": "TextArray" + }, + { + "ordinal": 4, + "name": "label", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + true, + true, + false, + true, + true + ] + }, + "hash": "c7d595d2a12228c49359440ca3a9622f1de5f5ee4bbe5d2b23f6fdb6379cebf3" +} diff --git a/backend/.sqlx/query-cd83b6127846baf104de3be4f7a69e14867957dfc518746f867dee1d5d06cb5a.json b/backend/.sqlx/query-cd83b6127846baf104de3be4f7a69e14867957dfc518746f867dee1d5d06cb5a.json new file mode 100644 index 0000000000000..518ba5906f887 --- /dev/null +++ b/backend/.sqlx/query-cd83b6127846baf104de3be4f7a69e14867957dfc518746f867dee1d5d06cb5a.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_queue WHERE schedule_path = $1 AND running = false AND workspace_id = $2 AND is_flow_step = false", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "cd83b6127846baf104de3be4f7a69e14867957dfc518746f867dee1d5d06cb5a" +} diff --git a/backend/.sqlx/query-cf65514f25af5a85d455be52f373933aacc1572370059815f5abf69c7aba420f.json b/backend/.sqlx/query-cf65514f25af5a85d455be52f373933aacc1572370059815f5abf69c7aba420f.json new file mode 100644 index 0000000000000..5b1fffd10368e --- /dev/null +++ b/backend/.sqlx/query-cf65514f25af5a85d455be52f373933aacc1572370059815f5abf69c7aba420f.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET flow_status = jsonb_set(\n jsonb_set(\n COALESCE(flow_status, '{}'::jsonb),\n array[$1],\n COALESCE(flow_status->$1, '{}'::jsonb)\n ),\n array[$1, 'duration_ms'],\n to_jsonb($2::bigint)\n ) WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "cf65514f25af5a85d455be52f373933aacc1572370059815f5abf69c7aba420f" +} diff --git a/backend/.sqlx/query-cf80f068b6a8906939f7ea0f1a8311fdabf78d6d5bd12e71070b1dae24df2352.json b/backend/.sqlx/query-cf80f068b6a8906939f7ea0f1a8311fdabf78d6d5bd12e71070b1dae24df2352.json new file mode 100644 index 0000000000000..7373aec488189 --- /dev/null +++ b/backend/.sqlx/query-cf80f068b6a8906939f7ea0f1a8311fdabf78d6d5bd12e71070b1dae24df2352.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'approvers'], $2)\n WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "cf80f068b6a8906939f7ea0f1a8311fdabf78d6d5bd12e71070b1dae24df2352" +} diff --git a/backend/.sqlx/query-d25e94bc84aa182c760259078d21f477193992a3b1a417318c1747f955a85f3c.json b/backend/.sqlx/query-d25e94bc84aa182c760259078d21f477193992a3b1a417318c1747f955a85f3c.json new file mode 100644 index 0000000000000..a727f03c84937 --- /dev/null +++ b/backend/.sqlx/query-d25e94bc84aa182c760259078d21f477193992a3b1a417318c1747f955a85f3c.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT script_path FROM v2_completed_job WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "script_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "d25e94bc84aa182c760259078d21f477193992a3b1a417318c1747f955a85f3c" +} diff --git a/backend/.sqlx/query-d29c07b81377d4617289e9e7bd6355ddee00bf30f9e4acd000fc820b6a63d682.json b/backend/.sqlx/query-d29c07b81377d4617289e9e7bd6355ddee00bf30f9e4acd000fc820b6a63d682.json new file mode 100644 index 0000000000000..e5579950753da --- /dev/null +++ b/backend/.sqlx/query-d29c07b81377d4617289e9e7bd6355ddee00bf30f9e4acd000fc820b6a63d682.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT job_kind = 'identity' FROM v2_completed_job WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "d29c07b81377d4617289e9e7bd6355ddee00bf30f9e4acd000fc820b6a63d682" +} diff --git a/backend/.sqlx/query-d357006fe927cab6cfc51b5087358b795433b77f67560f4e53cf36e57439018c.json b/backend/.sqlx/query-d357006fe927cab6cfc51b5087358b795433b77f67560f4e53cf36e57439018c.json new file mode 100644 index 0000000000000..4ba8cbc9e7170 --- /dev/null +++ b/backend/.sqlx/query-d357006fe927cab6cfc51b5087358b795433b77f67560f4e53cf36e57439018c.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT script_path FROM v2_completed_job WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "script_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "d357006fe927cab6cfc51b5087358b795433b77f67560f4e53cf36e57439018c" +} diff --git a/backend/.sqlx/query-d5522859ae6234eaed85d75e0f139ab0ed411e9059a1541645c8014c466d77c2.json b/backend/.sqlx/query-d5522859ae6234eaed85d75e0f139ab0ed411e9059a1541645c8014c466d77c2.json new file mode 100644 index 0000000000000..6bcb1cddd0808 --- /dev/null +++ b/backend/.sqlx/query-d5522859ae6234eaed85d75e0f139ab0ed411e9059a1541645c8014c466d77c2.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT id AS \"id!\", flow_status, suspend AS \"suspend!\", script_path\n FROM v2_queue\n WHERE id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "flow_status", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "suspend!", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "script_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "d5522859ae6234eaed85d75e0f139ab0ed411e9059a1541645c8014c466d77c2" +} diff --git a/backend/.sqlx/query-d74d3511d394c7ab2931c413e5ae87df1799a0ea64822449350abab02ab570be.json b/backend/.sqlx/query-d74d3511d394c7ab2931c413e5ae87df1799a0ea64822449350abab02ab570be.json new file mode 100644 index 0000000000000..e4f5aed798a9c --- /dev/null +++ b/backend/.sqlx/query-d74d3511d394c7ab2931c413e5ae87df1799a0ea64822449350abab02ab570be.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['approval_conditions'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "d74d3511d394c7ab2931c413e5ae87df1799a0ea64822449350abab02ab570be" +} diff --git a/backend/.sqlx/query-d7ce28c7cbd4974c72969858659a2a5c7448c919ae522e91332fa9a6212f5ddf.json b/backend/.sqlx/query-d7ce28c7cbd4974c72969858659a2a5c7448c919ae522e91332fa9a6212f5ddf.json new file mode 100644 index 0000000000000..d1b5b0030c44b --- /dev/null +++ b/backend/.sqlx/query-d7ce28c7cbd4974c72969858659a2a5c7448c919ae522e91332fa9a6212f5ddf.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['retry'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "d7ce28c7cbd4974c72969858659a2a5c7448c919ae522e91332fa9a6212f5ddf" +} diff --git a/backend/.sqlx/query-d7f1e2920aec0f4eab9238d01370465945acdfa779f16b99cdc1a6b7ef84943e.json b/backend/.sqlx/query-d7f1e2920aec0f4eab9238d01370465945acdfa779f16b99cdc1a6b7ef84943e.json new file mode 100644 index 0000000000000..dbc893740bbe6 --- /dev/null +++ b/backend/.sqlx/query-d7f1e2920aec0f4eab9238d01370465945acdfa779f16b99cdc1a6b7ef84943e.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT id AS \"id!\", flow_status, suspend AS \"suspend!\", script_path\n FROM queue\n WHERE id = ( SELECT parent_job FROM queue WHERE id = $1 UNION ALL SELECT parent_job FROM completed_job WHERE id = $1)\n FOR UPDATE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "flow_status", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "suspend!", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "script_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "d7f1e2920aec0f4eab9238d01370465945acdfa779f16b99cdc1a6b7ef84943e" +} diff --git a/backend/.sqlx/query-d91a447f3abcd39559d614ab7d423d0287bd34e463967fbaf0a3d590b59c9865.json b/backend/.sqlx/query-d91a447f3abcd39559d614ab7d423d0287bd34e463967fbaf0a3d590b59c9865.json new file mode 100644 index 0000000000000..31ec3fdf38ea9 --- /dev/null +++ b/backend/.sqlx/query-d91a447f3abcd39559d614ab7d423d0287bd34e463967fbaf0a3d590b59c9865.json @@ -0,0 +1,74 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n result AS \"result: sqlx::types::Json>\", success AS \"success!\",\n language AS \"language: ScriptLang\",\n flow_status AS \"flow_status: sqlx::types::Json>\",\n created_by AS \"created_by!\"\n FROM completed_job\n WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "success!", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "language: ScriptLang", + "type_info": { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 4, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + true, + true, + true, + true + ] + }, + "hash": "d91a447f3abcd39559d614ab7d423d0287bd34e463967fbaf0a3d590b59c9865" +} diff --git a/backend/.sqlx/query-d99ae453289ae967ce92a221375aeca87ed9764bfa683b9d49294c24495ad392.json b/backend/.sqlx/query-d99ae453289ae967ce92a221375aeca87ed9764bfa683b9d49294c24495ad392.json new file mode 100644 index 0000000000000..739ec473468d5 --- /dev/null +++ b/backend/.sqlx/query-d99ae453289ae967ce92a221375aeca87ed9764bfa683b9d49294c24495ad392.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT raw_flow->'failure_module' != 'null'::jsonb FROM v2_job WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "d99ae453289ae967ce92a221375aeca87ed9764bfa683b9d49294c24495ad392" +} diff --git a/backend/.sqlx/query-d9f7f5e82b32d3c0fbc1757e238be723ad0a17ad313844f75f6edbbddb5d6ef3.json b/backend/.sqlx/query-d9f7f5e82b32d3c0fbc1757e238be723ad0a17ad313844f75f6edbbddb5d6ef3.json new file mode 100644 index 0000000000000..de40f2989dc12 --- /dev/null +++ b/backend/.sqlx/query-d9f7f5e82b32d3c0fbc1757e238be723ad0a17ad313844f75f6edbbddb5d6ef3.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT result AS \"result: SqlxJson>\", success AS \"success!\"\n FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: SqlxJson>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "success!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "d9f7f5e82b32d3c0fbc1757e238be723ad0a17ad313844f75f6edbbddb5d6ef3" +} diff --git a/backend/.sqlx/query-dbfbb6f56ce9bc832ac8f529cd2a261b6575faecd147926e0a742b8c893de1a3.json b/backend/.sqlx/query-dbfbb6f56ce9bc832ac8f529cd2a261b6575faecd147926e0a742b8c893de1a3.json new file mode 100644 index 0000000000000..85e2ec011d284 --- /dev/null +++ b/backend/.sqlx/query-dbfbb6f56ce9bc832ac8f529cd2a261b6575faecd147926e0a742b8c893de1a3.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT usr.email, usage.executions\n FROM usr\n , LATERAL (\n SELECT COALESCE(SUM(duration_ms + 1000)/1000 , 0)::BIGINT executions\n FROM v2_completed_job\n WHERE workspace_id = $1\n AND job_kind NOT IN ('flow', 'flowpreview', 'flownode')\n AND email = usr.email\n AND now() - '1 week'::interval < created_at \n ) usage\n WHERE workspace_id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "executions", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + null + ] + }, + "hash": "dbfbb6f56ce9bc832ac8f529cd2a261b6575faecd147926e0a742b8c893de1a3" +} diff --git a/backend/.sqlx/query-dd10cbb77c6137cd8bde7a3ab0531cd1299f0fb5a21613cb2069a9b4421a8069.json b/backend/.sqlx/query-dd10cbb77c6137cd8bde7a3ab0531cd1299f0fb5a21613cb2069a9b4421a8069.json new file mode 100644 index 0000000000000..1c535f156ca31 --- /dev/null +++ b/backend/.sqlx/query-dd10cbb77c6137cd8bde7a3ab0531cd1299f0fb5a21613cb2069a9b4421a8069.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET last_ping = null\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "dd10cbb77c6137cd8bde7a3ab0531cd1299f0fb5a21613cb2069a9b4421a8069" +} diff --git a/backend/.sqlx/query-df533f1988e409b70a3e0966825d01993cd52e8e85943440081b8dbd3b9ae5a4.json b/backend/.sqlx/query-df533f1988e409b70a3e0966825d01993cd52e8e85943440081b8dbd3b9ae5a4.json new file mode 100644 index 0000000000000..a4f3b9678286e --- /dev/null +++ b/backend/.sqlx/query-df533f1988e409b70a3e0966825d01993cd52e8e85943440081b8dbd3b9ae5a4.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue\n SET flow_status = flow_status - 'retry'\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "df533f1988e409b70a3e0966825d01993cd52e8e85943440081b8dbd3b9ae5a4" +} diff --git a/backend/.sqlx/query-e0fffa04a21586fe01ea575373f240a56ef7687335930d1587f934dd68d4ee81.json b/backend/.sqlx/query-e0fffa04a21586fe01ea575373f240a56ef7687335930d1587f934dd68d4ee81.json new file mode 100644 index 0000000000000..4306b4a622530 --- /dev/null +++ b/backend/.sqlx/query-e0fffa04a21586fe01ea575373f240a56ef7687335930d1587f934dd68d4ee81.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS (\n SELECT 1 FROM v2_completed_job \n WHERE workspace_id = $2 \n AND (job_kind = 'appscript' OR job_kind = 'preview')\n AND created_by = 'anonymous' \n AND started_at > now() - interval '3 hours'\n AND script_path LIKE $3 || '/%' \n AND result @> ('{\"s3\":\"' || $1 || '\"}')::jsonb \n )", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "e0fffa04a21586fe01ea575373f240a56ef7687335930d1587f934dd68d4ee81" +} diff --git a/backend/.sqlx/query-e1339acd0338c02e6f92edddd57863aba209ffe7160c7b5790aed987f1441104.json b/backend/.sqlx/query-e1339acd0338c02e6f92edddd57863aba209ffe7160c7b5790aed987f1441104.json new file mode 100644 index 0000000000000..e2c9f1236968e --- /dev/null +++ b/backend/.sqlx/query-e1339acd0338c02e6f92edddd57863aba209ffe7160c7b5790aed987f1441104.json @@ -0,0 +1,69 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n result AS \"result: sqlx::types::Json>\",\n flow_status AS \"flow_status: sqlx::types::Json>\",\n language AS \"language: ScriptLang\",\n created_by AS \"created_by!\"\n FROM v2_completed_job\n WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "language: ScriptLang", + "type_info": { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "e1339acd0338c02e6f92edddd57863aba209ffe7160c7b5790aed987f1441104" +} diff --git a/backend/.sqlx/query-e19d0f30bd84680302ff26dd037ceb16d0174118fdc99132bcf7d9461ff998f8.json b/backend/.sqlx/query-e19d0f30bd84680302ff26dd037ceb16d0174118fdc99132bcf7d9461ff998f8.json new file mode 100644 index 0000000000000..87e9c3aa79cd9 --- /dev/null +++ b/backend/.sqlx/query-e19d0f30bd84680302ff26dd037ceb16d0174118fdc99132bcf7d9461ff998f8.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT flow_status->'failure_module'->>'parent_module' FROM v2_queue WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "e19d0f30bd84680302ff26dd037ceb16d0174118fdc99132bcf7d9461ff998f8" +} diff --git a/backend/.sqlx/query-e336b19cd0e4717fe1eb49c8ddd6b6b5c227d100819c6d6231ffd606d1d2986e.json b/backend/.sqlx/query-e336b19cd0e4717fe1eb49c8ddd6b6b5c227d100819c6d6231ffd606d1d2986e.json new file mode 100644 index 0000000000000..87e75979d52c8 --- /dev/null +++ b/backend/.sqlx/query-e336b19cd0e4717fe1eb49c8ddd6b6b5c227d100819c6d6231ffd606d1d2986e.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT result #> $3 AS \"result: Json>\"\n FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + null + ] + }, + "hash": "e336b19cd0e4717fe1eb49c8ddd6b6b5c227d100819c6d6231ffd606d1d2986e" +} diff --git a/backend/.sqlx/query-e47b00656e5a3321fefef0e02cdec4dd12900340d7ffcc076a4f969e0b73c4a8.json b/backend/.sqlx/query-e47b00656e5a3321fefef0e02cdec4dd12900340d7ffcc076a4f969e0b73c4a8.json new file mode 100644 index 0000000000000..b14b7ce59d871 --- /dev/null +++ b/backend/.sqlx/query-e47b00656e5a3321fefef0e02cdec4dd12900340d7ffcc076a4f969e0b73c4a8.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT q.id, f.flow_status, q.suspend, j.runnable_path AS script_path\n FROM v2_job_queue q\n JOIN v2_job j USING (id)\n JOIN v2_job_flow_runtime f USING (id)\n WHERE id = ( SELECT parent_job FROM v2_job WHERE id = $1 )\n FOR UPDATE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "flow_status", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "suspend", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "script_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + true + ] + }, + "hash": "e47b00656e5a3321fefef0e02cdec4dd12900340d7ffcc076a4f969e0b73c4a8" +} diff --git a/backend/.sqlx/query-e495f9817420619cdec4e54f920635301be35d5d1813273bef13524f8fbec6b7.json b/backend/.sqlx/query-e495f9817420619cdec4e54f920635301be35d5d1813273bef13524f8fbec6b7.json new file mode 100644 index 0000000000000..4eae891537a40 --- /dev/null +++ b/backend/.sqlx/query-e495f9817420619cdec4e54f920635301be35d5d1813273bef13524f8fbec6b7.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT result #> $3 AS \"result: Json>\"\n FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + null + ] + }, + "hash": "e495f9817420619cdec4e54f920635301be35d5d1813273bef13524f8fbec6b7" +} diff --git a/backend/.sqlx/query-eb499eb563839de4c94d9ffb88936f4441b038e9992eebd64fda43a3f5dd8b10.json b/backend/.sqlx/query-eb499eb563839de4c94d9ffb88936f4441b038e9992eebd64fda43a3f5dd8b10.json new file mode 100644 index 0000000000000..9d2ed125e68f8 --- /dev/null +++ b/backend/.sqlx/query-eb499eb563839de4c94d9ffb88936f4441b038e9992eebd64fda43a3f5dd8b10.json @@ -0,0 +1,48 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n substr(concat(coalesce(v2_completed_job.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) AS logs,\n mem_peak,\n CASE WHEN is_flow_step is true then NULL else flow_status END AS \"flow_status: sqlx::types::Json>\",\n job_logs.log_offset + char_length(job_logs.logs) + 1 AS log_offset,\n created_by AS \"created_by!\"\n FROM v2_completed_job\n LEFT JOIN job_logs ON job_logs.job_id = v2_completed_job.id \n WHERE v2_completed_job.workspace_id = $2 AND v2_completed_job.id = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "logs", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "mem_peak", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "log_offset", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Int4", + "Text", + "Uuid" + ] + }, + "nullable": [ + null, + true, + null, + null, + true + ] + }, + "hash": "eb499eb563839de4c94d9ffb88936f4441b038e9992eebd64fda43a3f5dd8b10" +} diff --git a/backend/.sqlx/query-ebd8349ebda8ecb57e15257233a7ac50acac152fc0a5315e90fce6d0abeac652.json b/backend/.sqlx/query-ebd8349ebda8ecb57e15257233a7ac50acac152fc0a5315e90fce6d0abeac652.json new file mode 100644 index 0000000000000..55dc29c3a273c --- /dev/null +++ b/backend/.sqlx/query-ebd8349ebda8ecb57e15257233a7ac50acac152fc0a5315e90fce6d0abeac652.json @@ -0,0 +1,67 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n job_kind AS \"job_kind!: JobKind\",\n script_hash,\n flow_status AS \"flow_status!: Json>\",\n raw_flow AS \"raw_flow: Json>\"\n FROM v2_queue WHERE id = $1 AND workspace_id = $2 LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "job_kind!: JobKind", + "type_info": { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + } + }, + { + "ordinal": 1, + "name": "script_hash", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "flow_status!: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "raw_flow: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "ebd8349ebda8ecb57e15257233a7ac50acac152fc0a5315e90fce6d0abeac652" +} diff --git a/backend/.sqlx/query-ecbfd613827163b2e7963a7436a01f39cd4c22ee155d11f173f16ff23af2ffda.json b/backend/.sqlx/query-ecbfd613827163b2e7963a7436a01f39cd4c22ee155d11f173f16ff23af2ffda.json new file mode 100644 index 0000000000000..e8e0aceb743bc --- /dev/null +++ b/backend/.sqlx/query-ecbfd613827163b2e7963a7436a01f39cd4c22ee155d11f173f16ff23af2ffda.json @@ -0,0 +1,198 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT schedule.*, t.jobs FROM schedule, LATERAL ( SELECT ARRAY (SELECT json_build_object('id', id, 'success', success, 'duration_ms', duration_ms) FROM v2_completed_job WHERE\n v2_completed_job.schedule_path = schedule.path AND v2_completed_job.workspace_id = $1 AND parent_job IS NULL AND is_skipped = False ORDER BY started_at DESC LIMIT 20) AS jobs ) t\n WHERE schedule.workspace_id = $1 ORDER BY schedule.edited_at desc LIMIT $2 OFFSET $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "workspace_id", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "path", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "edited_by", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "edited_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 4, + "name": "schedule", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "enabled", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "args", + "type_info": "Jsonb" + }, + { + "ordinal": 8, + "name": "extra_perms", + "type_info": "Jsonb" + }, + { + "ordinal": 9, + "name": "is_flow", + "type_info": "Bool" + }, + { + "ordinal": 10, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 11, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 12, + "name": "timezone", + "type_info": "Varchar" + }, + { + "ordinal": 13, + "name": "on_failure", + "type_info": "Varchar" + }, + { + "ordinal": 14, + "name": "on_recovery", + "type_info": "Varchar" + }, + { + "ordinal": 15, + "name": "on_failure_times", + "type_info": "Int4" + }, + { + "ordinal": 16, + "name": "on_failure_exact", + "type_info": "Bool" + }, + { + "ordinal": 17, + "name": "on_failure_extra_args", + "type_info": "Json" + }, + { + "ordinal": 18, + "name": "on_recovery_times", + "type_info": "Int4" + }, + { + "ordinal": 19, + "name": "on_recovery_extra_args", + "type_info": "Json" + }, + { + "ordinal": 20, + "name": "ws_error_handler_muted", + "type_info": "Bool" + }, + { + "ordinal": 21, + "name": "retry", + "type_info": "Jsonb" + }, + { + "ordinal": 22, + "name": "summary", + "type_info": "Varchar" + }, + { + "ordinal": 23, + "name": "no_flow_overlap", + "type_info": "Bool" + }, + { + "ordinal": 24, + "name": "tag", + "type_info": "Varchar" + }, + { + "ordinal": 25, + "name": "paused_until", + "type_info": "Timestamptz" + }, + { + "ordinal": 26, + "name": "on_success", + "type_info": "Varchar" + }, + { + "ordinal": 27, + "name": "on_success_extra_args", + "type_info": "Json" + }, + { + "ordinal": 28, + "name": "cron_version", + "type_info": "Text" + }, + { + "ordinal": 29, + "name": "jobs", + "type_info": "JsonArray" + } + ], + "parameters": { + "Left": [ + "Text", + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + false, + true, + false, + true, + true, + true, + true, + true, + true, + true, + false, + true, + true, + false, + true, + true, + true, + true, + true, + null + ] + }, + "hash": "ecbfd613827163b2e7963a7436a01f39cd4c22ee155d11f173f16ff23af2ffda" +} diff --git a/backend/.sqlx/query-ecd62c48fe2fba2fc2582e9e7ae5590d5dea8c67f6ae7b14743ac4f265dd89a3.json b/backend/.sqlx/query-ecd62c48fe2fba2fc2582e9e7ae5590d5dea8c67f6ae7b14743ac4f265dd89a3.json new file mode 100644 index 0000000000000..7431f3e1fbc05 --- /dev/null +++ b/backend/.sqlx/query-ecd62c48fe2fba2fc2582e9e7ae5590d5dea8c67f6ae7b14743ac4f265dd89a3.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_job WHERE id = ANY($1)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray" + ] + }, + "nullable": [] + }, + "hash": "ecd62c48fe2fba2fc2582e9e7ae5590d5dea8c67f6ae7b14743ac4f265dd89a3" +} diff --git a/backend/.sqlx/query-ef20932aac9dec17eef7853aeacd8bfdf7f5cd9ad3865416e7aa0a09fd135a99.json b/backend/.sqlx/query-ef20932aac9dec17eef7853aeacd8bfdf7f5cd9ad3865416e7aa0a09fd135a99.json new file mode 100644 index 0000000000000..cbf7ed056a919 --- /dev/null +++ b/backend/.sqlx/query-ef20932aac9dec17eef7853aeacd8bfdf7f5cd9ad3865416e7aa0a09fd135a99.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE v2_queue\n SET canceled = true\n , canceled_by = 'timeout'\n , canceled_reason = $1\n WHERE id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "ef20932aac9dec17eef7853aeacd8bfdf7f5cd9ad3865416e7aa0a09fd135a99" +} diff --git a/backend/.sqlx/query-ef387687bd247fccdab8b0f8086c0dfcb7feab3426b1365e6be960530973e3ef.json b/backend/.sqlx/query-ef387687bd247fccdab8b0f8086c0dfcb7feab3426b1365e6be960530973e3ef.json new file mode 100644 index 0000000000000..5d0c0f90f490b --- /dev/null +++ b/backend/.sqlx/query-ef387687bd247fccdab8b0f8086c0dfcb7feab3426b1365e6be960530973e3ef.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = jsonb_set(\n jsonb_set(flow_status, ARRAY['modules', $4::INTEGER::TEXT, 'job'], to_jsonb($1::UUID::TEXT)),\n ARRAY['modules', $4::INTEGER::TEXT, 'type'],\n to_jsonb('InProgress'::text)\n )\n WHERE id = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "ef387687bd247fccdab8b0f8086c0dfcb7feab3426b1365e6be960530973e3ef" +} diff --git a/backend/.sqlx/query-ef8868893643a1a71531c1113d5cb38c5c204b3bc34c921b2f653c738af556a9.json b/backend/.sqlx/query-ef8868893643a1a71531c1113d5cb38c5c204b3bc34c921b2f653c738af556a9.json new file mode 100644 index 0000000000000..055944bd706a8 --- /dev/null +++ b/backend/.sqlx/query-ef8868893643a1a71531c1113d5cb38c5c204b3bc34c921b2f653c738af556a9.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\", flow_status AS \"flow_status!: Json\"\n FROM completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "flow_status!: Json", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "ef8868893643a1a71531c1113d5cb38c5c204b3bc34c921b2f653c738af556a9" +} diff --git a/backend/.sqlx/query-efe6d8ff451eeab03bdad90d1b2283b63e99b0c7e1cdddac029e218bc31c5dcd.json b/backend/.sqlx/query-efe6d8ff451eeab03bdad90d1b2283b63e99b0c7e1cdddac029e218bc31c5dcd.json new file mode 100644 index 0000000000000..643701e2ff34e --- /dev/null +++ b/backend/.sqlx/query-efe6d8ff451eeab03bdad90d1b2283b63e99b0c7e1cdddac029e218bc31c5dcd.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT canceled AS \"canceled!\" FROM v2_queue WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "canceled!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "efe6d8ff451eeab03bdad90d1b2283b63e99b0c7e1cdddac029e218bc31c5dcd" +} diff --git a/backend/.sqlx/query-f03794eda2e3ad0b2d30854120cb3e78a9129aaacf24c84c6ecee7819845b3dc.json b/backend/.sqlx/query-f03794eda2e3ad0b2d30854120cb3e78a9129aaacf24c84c6ecee7819845b3dc.json new file mode 100644 index 0000000000000..72b02b68d18b8 --- /dev/null +++ b/backend/.sqlx/query-f03794eda2e3ad0b2d30854120cb3e78a9129aaacf24c84c6ecee7819845b3dc.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue\n SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'iterator', 'index'], ((flow_status->'modules'->$1::int->'iterator'->>'index')::int + 1)::text::jsonb),\n last_ping = NULL\n WHERE id = $2\n RETURNING (flow_status->'modules'->$1::int->'iterator'->>'index')::int", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "int4", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "f03794eda2e3ad0b2d30854120cb3e78a9129aaacf24c84c6ecee7819845b3dc" +} diff --git a/backend/.sqlx/query-f1280cedcac66a0257b45c2d1ec9f3c5e8efd23a323194f2210ae4e98de52358.json b/backend/.sqlx/query-f1280cedcac66a0257b45c2d1ec9f3c5e8efd23a323194f2210ae4e98de52358.json new file mode 100644 index 0000000000000..c943ecad92862 --- /dev/null +++ b/backend/.sqlx/query-f1280cedcac66a0257b45c2d1ec9f3c5e8efd23a323194f2210ae4e98de52358.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT coalesce(COUNT(*) FILTER(WHERE suspend = 0 AND running = false), 0) as \"database_length!\", coalesce(COUNT(*) FILTER(WHERE suspend > 0), 0) as \"suspended!\" FROM v2_queue WHERE (workspace_id = $1 OR $2) AND scheduled_for <= now()", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "database_length!", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "suspended!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Bool" + ] + }, + "nullable": [ + null, + null + ] + }, + "hash": "f1280cedcac66a0257b45c2d1ec9f3c5e8efd23a323194f2210ae4e98de52358" +} diff --git a/backend/.sqlx/query-f23941db8b1bac5964788985c3651be202adc8e6e591fca66df87d3dd7b74a2d.json b/backend/.sqlx/query-f23941db8b1bac5964788985c3651be202adc8e6e591fca66df87d3dd7b74a2d.json new file mode 100644 index 0000000000000..e54eb5bf65d8a --- /dev/null +++ b/backend/.sqlx/query-f23941db8b1bac5964788985c3651be202adc8e6e591fca66df87d3dd7b74a2d.json @@ -0,0 +1,68 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n result AS \"result: sqlx::types::Json>\",\n language AS \"language: ScriptLang\",\n flow_status AS \"flow_status: sqlx::types::Json>\",\n success AS \"success!\"\n FROM v2_completed_job\n WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "language: ScriptLang", + "type_info": { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + } + }, + { + "ordinal": 2, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "success!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "f23941db8b1bac5964788985c3651be202adc8e6e591fca66df87d3dd7b74a2d" +} diff --git a/backend/.sqlx/query-f29d3d500ae3fe6c775b6ec8eb3e5c36cf0d4759f708ea27dc68e80a737c564d.json b/backend/.sqlx/query-f29d3d500ae3fe6c775b6ec8eb3e5c36cf0d4759f708ea27dc68e80a737c564d.json new file mode 100644 index 0000000000000..bb0a5a5d1d208 --- /dev/null +++ b/backend/.sqlx/query-f29d3d500ae3fe6c775b6ec8eb3e5c36cf0d4759f708ea27dc68e80a737c564d.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "WITH queued AS (\n DELETE FROM v2_job_queue q\n USING v2_job j\n WHERE\n q.id = ANY ($2) AND q.id = j.id\n AND q.running = false AND j.parent_job IS NULL AND q.workspace_id = $3\n AND j.schedule_path IS NULL\n RETURNING\n q.id, q.workspace_id, q.started_at, q.worker\n ), queued_and_runtime AS (\n SELECT queued.*, memory_peak, flow_status\n FROM queued\n JOIN v2_job_runtime USING (id)\n LEFT JOIN v2_job_flow_runtime USING (id)\n ) INSERT INTO v2_job_completed (\n id, workspace_id,\n duration_ms, result, canceled_by, canceled_reason, status,\n flow_status, started_at, memory_peak, worker\n ) SELECT\n id, workspace_id,\n 0, $4, $1, 'cancel all', 'canceled'::job_status,\n flow_status, started_at, memory_peak, worker\n FROM queued_and_runtime\n ON CONFLICT (id) DO NOTHING RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "UuidArray", + "Text", + "Jsonb" + ] + }, + "nullable": [ + false + ] + }, + "hash": "f29d3d500ae3fe6c775b6ec8eb3e5c36cf0d4759f708ea27dc68e80a737c564d" +} diff --git a/backend/.sqlx/query-f64ae18811e211dbf0cb98b43d3b018b0dcc0abc7e4a1f0b45885cfe18efd9b2.json b/backend/.sqlx/query-f64ae18811e211dbf0cb98b43d3b018b0dcc0abc7e4a1f0b45885cfe18efd9b2.json new file mode 100644 index 0000000000000..99e04c9bff7ba --- /dev/null +++ b/backend/.sqlx/query-f64ae18811e211dbf0cb98b43d3b018b0dcc0abc7e4a1f0b45885cfe18efd9b2.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "create index concurrently if not exists ix_job_workspace_id_created_at_new_3 ON v2_job (workspace_id, created_at DESC)", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "f64ae18811e211dbf0cb98b43d3b018b0dcc0abc7e4a1f0b45885cfe18efd9b2" +} diff --git a/backend/.sqlx/query-f7b1445ec1f0d86efb6f8e0939430e7294bcac06bb7930dcf4d46427571662cb.json b/backend/.sqlx/query-f7b1445ec1f0d86efb6f8e0939430e7294bcac06bb7930dcf4d46427571662cb.json new file mode 100644 index 0000000000000..5948eedef5160 --- /dev/null +++ b/backend/.sqlx/query-f7b1445ec1f0d86efb6f8e0939430e7294bcac06bb7930dcf4d46427571662cb.json @@ -0,0 +1,35 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE queue SET mem_peak = $1, last_ping = now()\n WHERE id = $2\n RETURNING canceled AS \"canceled!\", canceled_by, canceled_reason", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "canceled!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "canceled_by", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "canceled_reason", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int4", + "Uuid" + ] + }, + "nullable": [ + true, + true, + true + ] + }, + "hash": "f7b1445ec1f0d86efb6f8e0939430e7294bcac06bb7930dcf4d46427571662cb" +} diff --git a/backend/.sqlx/query-f8f25948ae14fcb71c666cdc5e51d888e1f22fb2300a78bbeafebf64e82658db.json b/backend/.sqlx/query-f8f25948ae14fcb71c666cdc5e51d888e1f22fb2300a78bbeafebf64e82658db.json new file mode 100644 index 0000000000000..51675232120ca --- /dev/null +++ b/backend/.sqlx/query-f8f25948ae14fcb71c666cdc5e51d888e1f22fb2300a78bbeafebf64e82658db.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\" FROM queue WHERE parent_job = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "f8f25948ae14fcb71c666cdc5e51d888e1f22fb2300a78bbeafebf64e82658db" +} diff --git a/backend/.sqlx/query-fb9e98f5a4da2918146cff7c04d0b1ed5e219c2392867d836bc29263cc57ff90.json b/backend/.sqlx/query-fb9e98f5a4da2918146cff7c04d0b1ed5e219c2392867d836bc29263cc57ff90.json new file mode 100644 index 0000000000000..14198d4f69e33 --- /dev/null +++ b/backend/.sqlx/query-fb9e98f5a4da2918146cff7c04d0b1ed5e219c2392867d836bc29263cc57ff90.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT coalesce(COUNT(*), 0) as \"database_length!\", null::bigint as suspended FROM v2_completed_job WHERE workspace_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "database_length!", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "suspended", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null, + null + ] + }, + "hash": "fb9e98f5a4da2918146cff7c04d0b1ed5e219c2392867d836bc29263cc57ff90" +} diff --git a/backend/.sqlx/query-fc8bc1bdaf75bd5e1054698963be2ce6dea9f667ff323f4db84e3553f0e0f49c.json b/backend/.sqlx/query-fc8bc1bdaf75bd5e1054698963be2ce6dea9f667ff323f4db84e3553f0e0f49c.json new file mode 100644 index 0000000000000..2129ba336feee --- /dev/null +++ b/backend/.sqlx/query-fc8bc1bdaf75bd5e1054698963be2ce6dea9f667ff323f4db84e3553f0e0f49c.json @@ -0,0 +1,35 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_queue SET mem_peak = $1, last_ping = now()\n WHERE id = $2\n RETURNING canceled AS \"canceled!\", canceled_by, canceled_reason", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "canceled!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "canceled_by", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "canceled_reason", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int4", + "Uuid" + ] + }, + "nullable": [ + true, + true, + true + ] + }, + "hash": "fc8bc1bdaf75bd5e1054698963be2ce6dea9f667ff323f4db84e3553f0e0f49c" +} diff --git a/backend/.sqlx/query-fee633fcf73f6253d07938cbf8e1c5947d2e22a10fc95401faa2d3704647830d.json b/backend/.sqlx/query-fee633fcf73f6253d07938cbf8e1c5947d2e22a10fc95401faa2d3704647830d.json new file mode 100644 index 0000000000000..2f089691d91c1 --- /dev/null +++ b/backend/.sqlx/query-fee633fcf73f6253d07938cbf8e1c5947d2e22a10fc95401faa2d3704647830d.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\" FROM v2_queue WHERE workspace_id = $1 and root_job = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Text", + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "fee633fcf73f6253d07938cbf8e1c5947d2e22a10fc95401faa2d3704647830d" +} diff --git a/backend/.sqlx/query-fefcb942f0fbf57e7ba6e879f7843010a95fb50a983a45dc72638a3ada1b83ff.json b/backend/.sqlx/query-fefcb942f0fbf57e7ba6e879f7843010a95fb50a983a45dc72638a3ada1b83ff.json new file mode 100644 index 0000000000000..5f5244f78169f --- /dev/null +++ b/backend/.sqlx/query-fefcb942f0fbf57e7ba6e879f7843010a95fb50a983a45dc72638a3ada1b83ff.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(id) FROM v2_queue WHERE running = true AND workspace_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "fefcb942f0fbf57e7ba6e879f7843010a95fb50a983a45dc72638a3ada1b83ff" +} diff --git a/backend/.sqlx/query-ff14230469026418966ec79b77f549b2fb27c90556484f3914666d5ad7f8f107.json b/backend/.sqlx/query-ff14230469026418966ec79b77f549b2fb27c90556484f3914666d5ad7f8f107.json new file mode 100644 index 0000000000000..b91dab6fca32b --- /dev/null +++ b/backend/.sqlx/query-ff14230469026418966ec79b77f549b2fb27c90556484f3914666d5ad7f8f107.json @@ -0,0 +1,69 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n result #> $3 AS \"result: sqlx::types::Json>\",\n flow_status AS \"flow_status: sqlx::types::Json>\",\n language AS \"language: ScriptLang\",\n created_by AS \"created_by!\"\n FROM completed_job\n WHERE id = $1 AND workspace_id = $2 AND ($4::text[] IS NULL OR tag = ANY($4))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "language: ScriptLang", + "type_info": { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray", + "TextArray" + ] + }, + "nullable": [ + null, + true, + true, + true + ] + }, + "hash": "ff14230469026418966ec79b77f549b2fb27c90556484f3914666d5ad7f8f107" +} diff --git a/backend/.sqlx/query-ffc911d72977e755c0f65ddfd830b82f5df561908bcf6dc662ac60f6431341af.json b/backend/.sqlx/query-ffc911d72977e755c0f65ddfd830b82f5df561908bcf6dc662ac60f6431341af.json new file mode 100644 index 0000000000000..bc49267a18b45 --- /dev/null +++ b/backend/.sqlx/query-ffc911d72977e755c0f65ddfd830b82f5df561908bcf6dc662ac60f6431341af.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET schedule_path = REGEXP_REPLACE(schedule_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\\1') WHERE schedule_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "ffc911d72977e755c0f65ddfd830b82f5df561908bcf6dc662ac60f6431341af" +} diff --git a/backend/Cargo.lock b/backend/Cargo.lock index 0549950ca07cb..40159d72a66a2 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -11300,6 +11300,7 @@ dependencies = [ "tokio-postgres", "tokio-util", "tracing", + "ulid", "urlencoding", "uuid 1.12.0", "windmill-audit", diff --git a/backend/ee-repo-ref.txt b/backend/ee-repo-ref.txt index ba1714d994706..bf5697672ce7b 100644 --- a/backend/ee-repo-ref.txt +++ b/backend/ee-repo-ref.txt @@ -1 +1 @@ -fb5e285c316f10fa2188318585573c7e85999784 \ No newline at end of file +69134f284383169d54f4abd29481b5eca2dd2c2a \ No newline at end of file diff --git a/backend/migrations/20250110124344_v2_job_runtime.down.sql b/backend/migrations/20250110124344_v2_job_runtime.down.sql new file mode 100644 index 0000000000000..45b34f0f7a547 --- /dev/null +++ b/backend/migrations/20250110124344_v2_job_runtime.down.sql @@ -0,0 +1,4 @@ +-- Add down migration script here +-- Lock `queue` in access exclusive to prevent deadlocks when dropping the foreign key to `queue`. +LOCK TABLE queue IN ACCESS EXCLUSIVE MODE; +DROP TABLE v2_job_runtime CASCADE; diff --git a/backend/migrations/20250110124344_v2_job_runtime.up.sql b/backend/migrations/20250110124344_v2_job_runtime.up.sql new file mode 100644 index 0000000000000..60b3a99384fd7 --- /dev/null +++ b/backend/migrations/20250110124344_v2_job_runtime.up.sql @@ -0,0 +1,14 @@ +-- Add up migration script here +CREATE TABLE IF NOT EXISTS v2_job_runtime ( + id UUID REFERENCES queue (id) ON DELETE CASCADE PRIMARY KEY NOT NULL, + -- Metrics fields: + ping TIMESTAMP WITH TIME ZONE DEFAULT now(), + memory_peak INTEGER +); + +CREATE POLICY admin_policy ON v2_job_runtime + AS PERMISSIVE + FOR ALL + TO windmill_admin; + +GRANT ALL ON v2_job_runtime TO windmill_user, windmill_admin; diff --git a/backend/migrations/20250110124345_v2_job_flow_runtime.down.sql b/backend/migrations/20250110124345_v2_job_flow_runtime.down.sql new file mode 100644 index 0000000000000..1fd382d050340 --- /dev/null +++ b/backend/migrations/20250110124345_v2_job_flow_runtime.down.sql @@ -0,0 +1,4 @@ +-- Add down migration script here +-- Lock `queue` in access exclusive to prevent deadlocks when dropping the foreign key to `queue`. +LOCK TABLE queue IN ACCESS EXCLUSIVE MODE; +DROP TABLE v2_job_flow_runtime CASCADE; diff --git a/backend/migrations/20250110124345_v2_job_flow_runtime.up.sql b/backend/migrations/20250110124345_v2_job_flow_runtime.up.sql new file mode 100644 index 0000000000000..eaf8a442a854a --- /dev/null +++ b/backend/migrations/20250110124345_v2_job_flow_runtime.up.sql @@ -0,0 +1,14 @@ +-- Add up migration script here +CREATE TABLE IF NOT EXISTS v2_job_flow_runtime ( + id UUID REFERENCES queue (id) ON DELETE CASCADE PRIMARY KEY NOT NULL, + -- Flow status fields: + flow_status JSONB NOT NULL, + leaf_jobs JSONB +); + +CREATE POLICY admin_policy ON v2_job_flow_runtime + AS PERMISSIVE + FOR ALL + TO windmill_admin; + +GRANT ALL ON v2_job_flow_runtime TO windmill_user, windmill_admin; diff --git a/backend/migrations/20250110124409_v2_job_queue_compatiblity_view.down.sql b/backend/migrations/20250110124409_v2_job_queue_compatiblity_view.down.sql new file mode 100644 index 0000000000000..43f51a18cea89 --- /dev/null +++ b/backend/migrations/20250110124409_v2_job_queue_compatiblity_view.down.sql @@ -0,0 +1,3 @@ +-- Add down migration script here +DROP VIEW queue; +ALTER TABLE v2_job_queue RENAME TO queue; diff --git a/backend/migrations/20250110124409_v2_job_queue_compatiblity_view.up.sql b/backend/migrations/20250110124409_v2_job_queue_compatiblity_view.up.sql new file mode 100644 index 0000000000000..6a4f1b9c74df6 --- /dev/null +++ b/backend/migrations/20250110124409_v2_job_queue_compatiblity_view.up.sql @@ -0,0 +1,48 @@ +-- Add down migration script here +ALTER TABLE queue RENAME TO v2_job_queue; +CREATE OR REPLACE VIEW queue AS ( + SELECT + id, + workspace_id, + parent_job AS parent_job, + created_by AS created_by, + created_at, + started_at, + scheduled_for, + running, + script_hash AS script_hash, + script_path AS script_path, + args AS args, + logs AS logs, + raw_code AS raw_code, + canceled AS canceled, + canceled_by, + canceled_reason, + last_ping AS last_ping, + job_kind AS job_kind, + env_id AS env_id, + schedule_path AS schedule_path, + permissioned_as AS permissioned_as, + flow_status AS flow_status, + raw_flow AS raw_flow, + is_flow_step AS is_flow_step, + language AS language, + suspend, + suspend_until, + same_worker AS same_worker, + raw_lock AS raw_lock, + pre_run_error AS pre_run_error, + email AS email, + visible_to_owner AS visible_to_owner, + mem_peak AS mem_peak, + root_job AS root_job, + leaf_jobs AS leaf_jobs, + tag, + concurrent_limit AS concurrent_limit, + concurrency_time_window_s AS concurrency_time_window_s, + timeout AS timeout, + flow_step_id AS flow_step_id, + cache_ttl AS cache_ttl, + priority + FROM v2_job_queue +); diff --git a/backend/migrations/20250110124410_v2_job_queue.down.sql b/backend/migrations/20250110124410_v2_job_queue.down.sql new file mode 100644 index 0000000000000..f913ee4b5eca7 --- /dev/null +++ b/backend/migrations/20250110124410_v2_job_queue.down.sql @@ -0,0 +1,32 @@ +-- Add down migration script here +ALTER TABLE v2_job_queue DROP COLUMN worker; +ALTER TABLE v2_job_queue RENAME COLUMN __parent_job TO parent_job; +ALTER TABLE v2_job_queue RENAME COLUMN __created_by TO created_by; +ALTER TABLE v2_job_queue RENAME COLUMN __script_hash TO script_hash; +ALTER TABLE v2_job_queue RENAME COLUMN __script_path TO script_path; +ALTER TABLE v2_job_queue RENAME COLUMN __args TO args; +ALTER TABLE v2_job_queue RENAME COLUMN __logs TO logs; +ALTER TABLE v2_job_queue RENAME COLUMN __raw_code TO raw_code; +ALTER TABLE v2_job_queue RENAME COLUMN __canceled TO canceled; +ALTER TABLE v2_job_queue RENAME COLUMN __last_ping TO last_ping; +ALTER TABLE v2_job_queue RENAME COLUMN __job_kind TO job_kind; +ALTER TABLE v2_job_queue RENAME COLUMN __env_id TO env_id; +ALTER TABLE v2_job_queue RENAME COLUMN __schedule_path TO schedule_path; +ALTER TABLE v2_job_queue RENAME COLUMN __permissioned_as TO permissioned_as; +ALTER TABLE v2_job_queue RENAME COLUMN __flow_status TO flow_status; +ALTER TABLE v2_job_queue RENAME COLUMN __raw_flow TO raw_flow; +ALTER TABLE v2_job_queue RENAME COLUMN __is_flow_step TO is_flow_step; +ALTER TABLE v2_job_queue RENAME COLUMN __language TO language; +ALTER TABLE v2_job_queue RENAME COLUMN __same_worker TO same_worker; +ALTER TABLE v2_job_queue RENAME COLUMN __raw_lock TO raw_lock; +ALTER TABLE v2_job_queue RENAME COLUMN __pre_run_error TO pre_run_error; +ALTER TABLE v2_job_queue RENAME COLUMN __email TO email; +ALTER TABLE v2_job_queue RENAME COLUMN __visible_to_owner TO visible_to_owner; +ALTER TABLE v2_job_queue RENAME COLUMN __mem_peak TO mem_peak; +ALTER TABLE v2_job_queue RENAME COLUMN __root_job TO root_job; +ALTER TABLE v2_job_queue RENAME COLUMN __leaf_jobs TO leaf_jobs; +ALTER TABLE v2_job_queue RENAME COLUMN __concurrent_limit TO concurrent_limit; +ALTER TABLE v2_job_queue RENAME COLUMN __concurrency_time_window_s TO concurrency_time_window_s; +ALTER TABLE v2_job_queue RENAME COLUMN __timeout TO timeout; +ALTER TABLE v2_job_queue RENAME COLUMN __flow_step_id TO flow_step_id; +ALTER TABLE v2_job_queue RENAME COLUMN __cache_ttl TO cache_ttl; diff --git a/backend/migrations/20250110124410_v2_job_queue.up.sql b/backend/migrations/20250110124410_v2_job_queue.up.sql new file mode 100644 index 0000000000000..75810c577e476 --- /dev/null +++ b/backend/migrations/20250110124410_v2_job_queue.up.sql @@ -0,0 +1,32 @@ +-- Add up migration script here +ALTER TABLE v2_job_queue ADD COLUMN IF NOT EXISTS worker VARCHAR(255); +ALTER TABLE v2_job_queue RENAME COLUMN parent_job TO __parent_job; +ALTER TABLE v2_job_queue RENAME COLUMN created_by TO __created_by; +ALTER TABLE v2_job_queue RENAME COLUMN script_hash TO __script_hash; +ALTER TABLE v2_job_queue RENAME COLUMN script_path TO __script_path; +ALTER TABLE v2_job_queue RENAME COLUMN args TO __args; +ALTER TABLE v2_job_queue RENAME COLUMN logs TO __logs; +ALTER TABLE v2_job_queue RENAME COLUMN raw_code TO __raw_code; +ALTER TABLE v2_job_queue RENAME COLUMN canceled TO __canceled; +ALTER TABLE v2_job_queue RENAME COLUMN last_ping TO __last_ping; +ALTER TABLE v2_job_queue RENAME COLUMN job_kind TO __job_kind; +ALTER TABLE v2_job_queue RENAME COLUMN env_id TO __env_id; +ALTER TABLE v2_job_queue RENAME COLUMN schedule_path TO __schedule_path; +ALTER TABLE v2_job_queue RENAME COLUMN permissioned_as TO __permissioned_as; +ALTER TABLE v2_job_queue RENAME COLUMN flow_status TO __flow_status; +ALTER TABLE v2_job_queue RENAME COLUMN raw_flow TO __raw_flow; +ALTER TABLE v2_job_queue RENAME COLUMN is_flow_step TO __is_flow_step; +ALTER TABLE v2_job_queue RENAME COLUMN language TO __language; +ALTER TABLE v2_job_queue RENAME COLUMN same_worker TO __same_worker; +ALTER TABLE v2_job_queue RENAME COLUMN raw_lock TO __raw_lock; +ALTER TABLE v2_job_queue RENAME COLUMN pre_run_error TO __pre_run_error; +ALTER TABLE v2_job_queue RENAME COLUMN email TO __email; +ALTER TABLE v2_job_queue RENAME COLUMN visible_to_owner TO __visible_to_owner; +ALTER TABLE v2_job_queue RENAME COLUMN mem_peak TO __mem_peak; +ALTER TABLE v2_job_queue RENAME COLUMN root_job TO __root_job; +ALTER TABLE v2_job_queue RENAME COLUMN leaf_jobs TO __leaf_jobs; +ALTER TABLE v2_job_queue RENAME COLUMN concurrent_limit TO __concurrent_limit; +ALTER TABLE v2_job_queue RENAME COLUMN concurrency_time_window_s TO __concurrency_time_window_s; +ALTER TABLE v2_job_queue RENAME COLUMN timeout TO __timeout; +ALTER TABLE v2_job_queue RENAME COLUMN flow_step_id TO __flow_step_id; +ALTER TABLE v2_job_queue RENAME COLUMN cache_ttl TO __cache_ttl; diff --git a/backend/migrations/20250110124422_v2_job_completed_compatibility_view.down.sql b/backend/migrations/20250110124422_v2_job_completed_compatibility_view.down.sql new file mode 100644 index 0000000000000..5eb87e01e0a20 --- /dev/null +++ b/backend/migrations/20250110124422_v2_job_completed_compatibility_view.down.sql @@ -0,0 +1,3 @@ +-- Add down migration script here +DROP VIEW IF EXISTS completed_job; +ALTER TABLE IF EXISTS v2_job_completed RENAME TO completed_job; diff --git a/backend/migrations/20250110124422_v2_job_completed_compatibility_view.up.sql b/backend/migrations/20250110124422_v2_job_completed_compatibility_view.up.sql new file mode 100644 index 0000000000000..7519a1c585f5e --- /dev/null +++ b/backend/migrations/20250110124422_v2_job_completed_compatibility_view.up.sql @@ -0,0 +1,39 @@ +-- Add down migration script here +ALTER TABLE completed_job RENAME TO v2_job_completed; +CREATE OR REPLACE VIEW completed_job AS ( + SELECT + id, + workspace_id, + parent_job AS parent_job, + created_by AS created_by, + created_at AS created_at, + duration_ms, + success AS success, + script_hash AS script_hash, + script_path AS script_path, + args AS args, + result, + logs AS logs, + deleted, + raw_code AS raw_code, + canceled AS canceled, + canceled_by, + canceled_reason, + job_kind AS job_kind, + env_id AS env_id, + schedule_path AS schedule_path, + permissioned_as AS permissioned_as, + flow_status, + raw_flow AS raw_flow, + is_flow_step AS is_flow_step, + language AS language, + started_at, + is_skipped AS is_skipped, + raw_lock AS raw_lock, + email AS email, + visible_to_owner AS visible_to_owner, + mem_peak AS mem_peak, + tag AS tag, + priority AS priority + FROM v2_job_completed +); diff --git a/backend/migrations/20250110124423_v2_job_completed.down.sql b/backend/migrations/20250110124423_v2_job_completed.down.sql new file mode 100644 index 0000000000000..4028819661ee7 --- /dev/null +++ b/backend/migrations/20250110124423_v2_job_completed.down.sql @@ -0,0 +1,30 @@ +-- Add down migration script here +ALTER TABLE v2_job_completed + DROP COLUMN status CASCADE, + DROP COLUMN completed_at CASCADE, + DROP COLUMN worker CASCADE; +ALTER TABLE v2_job_completed RENAME COLUMN memory_peak TO mem_peak; +ALTER TABLE v2_job_completed RENAME COLUMN __parent_job TO parent_job; +ALTER TABLE v2_job_completed RENAME COLUMN __created_by TO created_by; +ALTER TABLE v2_job_completed RENAME COLUMN __created_at TO created_at; +ALTER TABLE v2_job_completed RENAME COLUMN __success TO success; +ALTER TABLE v2_job_completed RENAME COLUMN __script_hash TO script_hash; +ALTER TABLE v2_job_completed RENAME COLUMN __script_path TO script_path; +ALTER TABLE v2_job_completed RENAME COLUMN __args TO args; +ALTER TABLE v2_job_completed RENAME COLUMN __logs TO logs; +ALTER TABLE v2_job_completed RENAME COLUMN __raw_code TO raw_code; +ALTER TABLE v2_job_completed RENAME COLUMN __canceled TO canceled; +ALTER TABLE v2_job_completed RENAME COLUMN __job_kind TO job_kind; +ALTER TABLE v2_job_completed RENAME COLUMN __env_id TO env_id; +ALTER TABLE v2_job_completed RENAME COLUMN __schedule_path TO schedule_path; +ALTER TABLE v2_job_completed RENAME COLUMN __permissioned_as TO permissioned_as; +ALTER TABLE v2_job_completed RENAME COLUMN __raw_flow TO raw_flow; +ALTER TABLE v2_job_completed RENAME COLUMN __is_flow_step TO is_flow_step; +ALTER TABLE v2_job_completed RENAME COLUMN __language TO language; +ALTER TABLE v2_job_completed RENAME COLUMN __is_skipped TO is_skipped; +ALTER TABLE v2_job_completed RENAME COLUMN __raw_lock TO raw_lock; +ALTER TABLE v2_job_completed RENAME COLUMN __email TO email; +ALTER TABLE v2_job_completed RENAME COLUMN __visible_to_owner TO visible_to_owner; +ALTER TABLE v2_job_completed RENAME COLUMN __tag TO tag; +ALTER TABLE v2_job_completed RENAME COLUMN __priority TO priority; +DROP TYPE IF EXISTS job_status CASCADE; diff --git a/backend/migrations/20250110124423_v2_job_completed.up.sql b/backend/migrations/20250110124423_v2_job_completed.up.sql new file mode 100644 index 0000000000000..09146ecc9b972 --- /dev/null +++ b/backend/migrations/20250110124423_v2_job_completed.up.sql @@ -0,0 +1,30 @@ +-- Add up migration script here +CREATE TYPE job_status AS ENUM ('success', 'failure', 'canceled', 'skipped'); +ALTER TABLE v2_job_completed + ADD COLUMN IF NOT EXISTS status job_status, + ADD COLUMN IF NOT EXISTS completed_at TIMESTAMP WITH TIME ZONE, + ADD COLUMN IF NOT EXISTS worker VARCHAR(255); +ALTER TABLE v2_job_completed RENAME COLUMN mem_peak TO memory_peak; +ALTER TABLE v2_job_completed RENAME COLUMN parent_job TO __parent_job; +ALTER TABLE v2_job_completed RENAME COLUMN created_by TO __created_by; +ALTER TABLE v2_job_completed RENAME COLUMN created_at TO __created_at; +ALTER TABLE v2_job_completed RENAME COLUMN success TO __success; +ALTER TABLE v2_job_completed RENAME COLUMN script_hash TO __script_hash; +ALTER TABLE v2_job_completed RENAME COLUMN script_path TO __script_path; +ALTER TABLE v2_job_completed RENAME COLUMN args TO __args; +ALTER TABLE v2_job_completed RENAME COLUMN logs TO __logs; +ALTER TABLE v2_job_completed RENAME COLUMN raw_code TO __raw_code; +ALTER TABLE v2_job_completed RENAME COLUMN canceled TO __canceled; +ALTER TABLE v2_job_completed RENAME COLUMN job_kind TO __job_kind; +ALTER TABLE v2_job_completed RENAME COLUMN env_id TO __env_id; +ALTER TABLE v2_job_completed RENAME COLUMN schedule_path TO __schedule_path; +ALTER TABLE v2_job_completed RENAME COLUMN permissioned_as TO __permissioned_as; +ALTER TABLE v2_job_completed RENAME COLUMN raw_flow TO __raw_flow; +ALTER TABLE v2_job_completed RENAME COLUMN is_flow_step TO __is_flow_step; +ALTER TABLE v2_job_completed RENAME COLUMN language TO __language; +ALTER TABLE v2_job_completed RENAME COLUMN is_skipped TO __is_skipped; +ALTER TABLE v2_job_completed RENAME COLUMN raw_lock TO __raw_lock; +ALTER TABLE v2_job_completed RENAME COLUMN email TO __email; +ALTER TABLE v2_job_completed RENAME COLUMN visible_to_owner TO __visible_to_owner; +ALTER TABLE v2_job_completed RENAME COLUMN tag TO __tag; +ALTER TABLE v2_job_completed RENAME COLUMN priority TO __priority; diff --git a/backend/migrations/20250110124430_v2_job_compatibility_view.down.sql b/backend/migrations/20250110124430_v2_job_compatibility_view.down.sql new file mode 100644 index 0000000000000..45dd1e24a3392 --- /dev/null +++ b/backend/migrations/20250110124430_v2_job_compatibility_view.down.sql @@ -0,0 +1,3 @@ +-- Add down migration script here +DROP VIEW IF EXISTS job; +ALTER TABLE IF EXISTS v2_job RENAME TO job; diff --git a/backend/migrations/20250110124430_v2_job_compatibility_view.up.sql b/backend/migrations/20250110124430_v2_job_compatibility_view.up.sql new file mode 100644 index 0000000000000..246d46e2b69d6 --- /dev/null +++ b/backend/migrations/20250110124430_v2_job_compatibility_view.up.sql @@ -0,0 +1,6 @@ +-- Add down migration script here +ALTER TABLE job RENAME TO v2_job; +CREATE OR REPLACE VIEW job AS ( + SELECT id, raw_code, raw_lock, raw_flow, tag, workspace_id + FROM v2_job +); diff --git a/backend/migrations/20250110124431_v2_job.down.sql b/backend/migrations/20250110124431_v2_job.down.sql new file mode 100644 index 0000000000000..15a325725e0b9 --- /dev/null +++ b/backend/migrations/20250110124431_v2_job.down.sql @@ -0,0 +1,29 @@ +-- Add down migration script here +DROP POLICY see_folder_extra_perms_user ON v2_job; +DROP POLICY see_own_path ON v2_job; +DROP POLICY see_member_path ON v2_job; +DROP POLICY see_own ON v2_job; +DROP POLICY see_member ON v2_job; +ALTER TABLE v2_job + DROP COLUMN created_at CASCADE, + DROP COLUMN created_by CASCADE, + DROP COLUMN permissioned_as CASCADE, + DROP COLUMN permissioned_as_email CASCADE, + DROP COLUMN kind CASCADE, + DROP COLUMN runnable_id CASCADE, + DROP COLUMN runnable_path CASCADE, + DROP COLUMN parent_job CASCADE, + DROP COLUMN script_lang CASCADE, + DROP COLUMN flow_step CASCADE, + DROP COLUMN flow_step_id CASCADE, + DROP COLUMN flow_root_job CASCADE, + DROP COLUMN schedule_path CASCADE, + DROP COLUMN same_worker CASCADE, + DROP COLUMN visible_to_owner CASCADE, + DROP COLUMN concurrent_limit CASCADE, + DROP COLUMN concurrency_time_window_s CASCADE, + DROP COLUMN cache_ttl CASCADE, + DROP COLUMN timeout CASCADE, + DROP COLUMN priority CASCADE, + DROP COLUMN args CASCADE, + DROP COLUMN pre_run_error CASCADE; diff --git a/backend/migrations/20250110124431_v2_job.up.sql b/backend/migrations/20250110124431_v2_job.up.sql new file mode 100644 index 0000000000000..602d72f7e1811 --- /dev/null +++ b/backend/migrations/20250110124431_v2_job.up.sql @@ -0,0 +1,64 @@ +-- Add up migration script here +ALTER TABLE v2_job + ADD COLUMN IF NOT EXISTS created_at TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL, + ADD COLUMN IF NOT EXISTS created_by VARCHAR(255) DEFAULT 'missing' NOT NULL, + ADD COLUMN IF NOT EXISTS permissioned_as VARCHAR(55) DEFAULT 'g/all' NOT NULL, + ADD COLUMN IF NOT EXISTS permissioned_as_email VARCHAR(255) DEFAULT 'missing@email.xyz' NOT NULL, + ADD COLUMN IF NOT EXISTS kind job_kind DEFAULT 'script'::job_kind NOT NULL, + ADD COLUMN IF NOT EXISTS runnable_id BIGINT, + ADD COLUMN IF NOT EXISTS runnable_path VARCHAR(255), + ADD COLUMN IF NOT EXISTS parent_job UUID, + ADD COLUMN IF NOT EXISTS script_lang script_lang DEFAULT 'python3'::script_lang, + ADD COLUMN IF NOT EXISTS flow_step INTEGER, + ADD COLUMN IF NOT EXISTS flow_step_id VARCHAR(255), + ADD COLUMN IF NOT EXISTS flow_root_job UUID, + ADD COLUMN IF NOT EXISTS schedule_path VARCHAR(255), + ADD COLUMN IF NOT EXISTS same_worker BOOLEAN DEFAULT FALSE NOT NULL, + ADD COLUMN IF NOT EXISTS visible_to_owner BOOLEAN DEFAULT TRUE NOT NULL, + ADD COLUMN IF NOT EXISTS concurrent_limit INTEGER, + ADD COLUMN IF NOT EXISTS concurrency_time_window_s INTEGER, + ADD COLUMN IF NOT EXISTS cache_ttl INTEGER, + ADD COLUMN IF NOT EXISTS timeout INTEGER, + ADD COLUMN IF NOT EXISTS priority SMALLINT, + ADD COLUMN IF NOT EXISTS args JSONB, + ADD COLUMN IF NOT EXISTS pre_run_error TEXT; + +CREATE POLICY see_folder_extra_perms_user ON v2_job + AS PERMISSIVE + FOR ALL + TO windmill_user + USING ((visible_to_owner IS TRUE) AND (SPLIT_PART((runnable_path)::TEXT, '/'::TEXT, 1) = 'f'::TEXT) AND + (SPLIT_PART((runnable_path)::TEXT, '/'::TEXT, 2) = ANY ( + REGEXP_SPLIT_TO_ARRAY(CURRENT_SETTING('session.folders_read'::TEXT), ','::TEXT)))); + +CREATE POLICY see_own_path ON v2_job + AS PERMISSIVE + FOR ALL + TO windmill_user + USING ((visible_to_owner IS TRUE) AND (SPLIT_PART((runnable_path)::TEXT, '/'::TEXT, 1) = 'u'::TEXT) AND + (SPLIT_PART((runnable_path)::TEXT, '/'::TEXT, 2) = CURRENT_SETTING('session.user'::TEXT))); + +CREATE POLICY see_member_path ON v2_job + AS PERMISSIVE + FOR ALL + TO windmill_user + USING ((visible_to_owner IS TRUE) AND (SPLIT_PART((runnable_path)::TEXT, '/'::TEXT, 1) = 'g'::TEXT) AND + (SPLIT_PART((runnable_path)::TEXT, '/'::TEXT, 2) = ANY + (REGEXP_SPLIT_TO_ARRAY(CURRENT_SETTING('session.groups'::TEXT), ','::TEXT)))); + +CREATE POLICY see_own ON v2_job + AS PERMISSIVE + FOR ALL + TO windmill_user + USING ((SPLIT_PART((permissioned_as)::TEXT, '/'::TEXT, 1) = 'f'::TEXT) AND + (SPLIT_PART((permissioned_as)::TEXT, '/'::TEXT, 2) = CURRENT_SETTING('session.user'::TEXT))); + +CREATE POLICY see_member ON v2_job + AS PERMISSIVE + FOR ALL + TO windmill_user + USING ((SPLIT_PART((permissioned_as)::TEXT, '/'::TEXT, 1) = 'g'::TEXT) AND + (SPLIT_PART((permissioned_as)::TEXT, '/'::TEXT, 2) = ANY + (REGEXP_SPLIT_TO_ARRAY(CURRENT_SETTING('session.groups'::TEXT), ','::TEXT)))); + +GRANT ALL ON v2_job TO windmill_user, windmill_admin; diff --git a/backend/migrations/20250110124743_v2_job_queue_sync.down.sql b/backend/migrations/20250110124743_v2_job_queue_sync.down.sql new file mode 100644 index 0000000000000..b64ab629bfb1d --- /dev/null +++ b/backend/migrations/20250110124743_v2_job_queue_sync.down.sql @@ -0,0 +1,4 @@ +-- Add down migration script here +DROP FUNCTION v2_job_queue_before_insert() CASCADE; +DROP FUNCTION v2_job_queue_after_insert() CASCADE; +DROP FUNCTION v2_job_queue_before_update() CASCADE; diff --git a/backend/migrations/20250110124743_v2_job_queue_sync.up.sql b/backend/migrations/20250110124743_v2_job_queue_sync.up.sql new file mode 100644 index 0000000000000..88197dfad6044 --- /dev/null +++ b/backend/migrations/20250110124743_v2_job_queue_sync.up.sql @@ -0,0 +1,185 @@ +-- Add up migration script here + +-- v2 -> v1 +-- This trigger will be removed once all server(s)/worker(s) are updated to use `v2_*` tables +CREATE OR REPLACE FUNCTION v2_job_queue_before_insert() +RETURNS TRIGGER AS $$ +DECLARE + job v2_job; +BEGIN + -- When inserting to `v2_job_queue` from `v2` code, set `v1` columns: + SELECT * INTO job FROM v2_job WHERE id = NEW.id; + NEW.__parent_job := job.parent_job; + NEW.__created_by := job.created_by; + NEW.__script_hash := job.runnable_id; + NEW.__script_path := job.runnable_path; + NEW.__args := job.args; + -- __logs + NEW.__raw_code := job.raw_code; + NEW.__canceled := NEW.canceled_by IS NOT NULL; + -- __last_ping + NEW.__job_kind := job.kind; + NEW.__env_id := 0xcafe; -- Magic used bellow. + NEW.__schedule_path := job.schedule_path; + NEW.__permissioned_as := job.permissioned_as; + -- __flow_status + NEW.__raw_flow := job.raw_flow; + NEW.__is_flow_step := job.flow_step_id IS NOT NULL; + NEW.__language := job.script_lang; + NEW.__same_worker := job.same_worker; + NEW.__raw_lock := job.raw_lock; + NEW.__pre_run_error := job.pre_run_error; + NEW.__email := job.permissioned_as_email; + NEW.__visible_to_owner := job.visible_to_owner; + -- __mem_peak + NEW.__root_job := job.flow_root_job; + -- __leaf_jobs + NEW.__concurrent_limit := job.concurrent_limit; + NEW.__concurrency_time_window_s := job.concurrency_time_window_s; + NEW.__timeout := job.timeout; + NEW.__flow_step_id := job.flow_step_id; + NEW.__cache_ttl := job.cache_ttl; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE TRIGGER v2_job_queue_before_insert_trigger +BEFORE INSERT ON v2_job_queue +FOR EACH ROW +WHEN (pg_trigger_depth() < 1 AND NEW.__created_by IS NULL) -- Prevent infinite loop v1 <-> v2 +EXECUTE FUNCTION v2_job_queue_before_insert(); + +-- v1 -> v2 +-- On every insert to `v2_job_queue`, insert to `v2_job`, `v2_job_runtime` and `v2_job_flow_runtime` as well +-- This trigger will be removed once all server(s)/worker(s) are updated to use `v2_*` tables +CREATE OR REPLACE FUNCTION v2_job_queue_after_insert() +RETURNS TRIGGER AS $$ +BEGIN + INSERT INTO v2_job ( + id, workspace_id, created_at, created_by, permissioned_as, permissioned_as_email, + kind, runnable_id, runnable_path, parent_job, + script_lang, + flow_step, flow_step_id, flow_root_job, + schedule_path, + tag, same_worker, visible_to_owner, concurrent_limit, concurrency_time_window_s, cache_ttl, timeout, priority, + args, pre_run_error, + raw_code, raw_lock, raw_flow + ) VALUES ( + NEW.id, NEW.workspace_id, NEW.created_at, NEW.__created_by, NEW.__permissioned_as, NEW.__email, + NEW.__job_kind, NEW.__script_hash, NEW.__script_path, NEW.__parent_job, + NEW.__language, + NULL, NEW.__flow_step_id, NEW.__root_job, + NEW.__schedule_path, + NEW.tag, NEW.__same_worker, NEW.__visible_to_owner, NEW.__concurrent_limit, NEW.__concurrency_time_window_s, + NEW.__cache_ttl, NEW.__timeout, NEW.priority, + NEW.__args, NEW.__pre_run_error, + NEW.__raw_code, NEW.__raw_lock, NEW.__raw_flow + ) ON CONFLICT (id) DO UPDATE SET + workspace_id = EXCLUDED.workspace_id, + created_at = EXCLUDED.created_at, + created_by = EXCLUDED.created_by, + permissioned_as = EXCLUDED.permissioned_as, + permissioned_as_email = EXCLUDED.permissioned_as_email, + kind = EXCLUDED.kind, + runnable_id = EXCLUDED.runnable_id, + runnable_path = EXCLUDED.runnable_path, + parent_job = EXCLUDED.parent_job, + script_lang = EXCLUDED.script_lang, + flow_step = EXCLUDED.flow_step, + flow_step_id = EXCLUDED.flow_step_id, + flow_root_job = EXCLUDED.flow_root_job, + schedule_path = EXCLUDED.schedule_path, + tag = EXCLUDED.tag, + same_worker = EXCLUDED.same_worker, + visible_to_owner = EXCLUDED.visible_to_owner, + concurrent_limit = EXCLUDED.concurrent_limit, + concurrency_time_window_s = EXCLUDED.concurrency_time_window_s, + cache_ttl = EXCLUDED.cache_ttl, + timeout = EXCLUDED.timeout, + priority = EXCLUDED.priority, + args = EXCLUDED.args, + pre_run_error = EXCLUDED.pre_run_error, + raw_code = COALESCE(v2_job.raw_code, EXCLUDED.raw_code), + raw_lock = COALESCE(v2_job.raw_lock, EXCLUDED.raw_lock), + raw_flow = COALESCE(v2_job.raw_flow, EXCLUDED.raw_flow) + ; + INSERT INTO v2_job_runtime (id, ping, memory_peak) + VALUES (NEW.id, NEW.__last_ping, NEW.__mem_peak) + ON CONFLICT (id) DO UPDATE SET + ping = COALESCE(v2_job_runtime.ping, EXCLUDED.ping), + memory_peak = COALESCE(v2_job_runtime.memory_peak, EXCLUDED.memory_peak) + ; + IF NEW.__flow_status IS NOT NULL OR NEW.__leaf_jobs IS NOT NULL THEN + INSERT INTO v2_job_flow_runtime (id, flow_status, leaf_jobs) + VALUES (NEW.id, NEW.__flow_status, NEW.__leaf_jobs) + ON CONFLICT (id) DO UPDATE SET + flow_status = COALESCE(v2_job_flow_runtime.flow_status, EXCLUDED.flow_status), + leaf_jobs = COALESCE(v2_job_flow_runtime.leaf_jobs, EXCLUDED.leaf_jobs) + ; + END IF; + IF NEW.__logs IS NOT NULL THEN + INSERT INTO job_logs (job_id, workspace_id, logs) + VALUES (NEW.id, NEW.workspace_id, NEW.__logs) + ON CONFLICT (job_id) DO UPDATE SET + logs = CONCAT(job_logs.logs, EXCLUDED.logs) + ; + NEW.__logs := NULL; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE TRIGGER v2_job_queue_after_insert_trigger +AFTER INSERT ON v2_job_queue +FOR EACH ROW +WHEN (pg_trigger_depth() < 1 AND NEW.__created_by IS NOT NULL AND NEW.__env_id != 0xcafe) -- Prevent infinite loop v1 <-> v2 +EXECUTE FUNCTION v2_job_queue_after_insert(); + +-- On every update to `v2_job_queue`, update `v2_job`, `v2_job_runtime` and `v2_job_flow_runtime` as well +-- This trigger will be removed once all server(s)/worker(s) are updated to use `v2_*` tables +CREATE OR REPLACE FUNCTION v2_job_queue_before_update() +RETURNS TRIGGER AS $$ +BEGIN + -- `v2_job`: Only `args` are updated + IF NEW.__args::text IS DISTINCT FROM OLD.__args::text THEN + UPDATE v2_job + SET args = NEW.__args + WHERE id = NEW.id; + END IF; + -- `v2_job_runtime`: + IF NEW.__last_ping IS DISTINCT FROM OLD.__last_ping OR NEW.__mem_peak IS DISTINCT FROM OLD.__mem_peak THEN + INSERT INTO v2_job_runtime (id, ping, memory_peak) + VALUES (NEW.id, NEW.__last_ping, NEW.__mem_peak) + ON CONFLICT (id) DO UPDATE SET + ping = EXCLUDED.ping, + memory_peak = EXCLUDED.memory_peak + ; + END IF; + -- `v2_job_flow_runtime`: + IF NEW.__flow_status::text IS DISTINCT FROM OLD.__flow_status::text OR + NEW.__leaf_jobs::text IS DISTINCT FROM OLD.__leaf_jobs::text THEN + INSERT INTO v2_job_flow_runtime (id, flow_status, leaf_jobs) + VALUES (NEW.id, NEW.__flow_status, NEW.__leaf_jobs) + ON CONFLICT (id) DO UPDATE SET + flow_status = EXCLUDED.flow_status, + leaf_jobs = EXCLUDED.leaf_jobs + ; + END IF; + -- `job_logs`: + IF NEW.__logs IS DISTINCT FROM OLD.__logs THEN + INSERT INTO job_logs (job_id, workspace_id, logs) + VALUES (NEW.id, NEW.workspace_id, NEW.__logs) + ON CONFLICT (job_id) DO UPDATE SET + logs = CONCAT(job_logs.logs, EXCLUDED.logs) + ; + NEW.__logs := NULL; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE TRIGGER v2_job_queue_before_update_trigger +BEFORE UPDATE ON v2_job_queue +FOR EACH ROW +WHEN (pg_trigger_depth() < 1) -- Prevent infinite loop v1 <-> v2 +EXECUTE FUNCTION v2_job_queue_before_update(); diff --git a/backend/migrations/20250110124744_v2_job_completed_sync.down.sql b/backend/migrations/20250110124744_v2_job_completed_sync.down.sql new file mode 100644 index 0000000000000..5ceab6f325fac --- /dev/null +++ b/backend/migrations/20250110124744_v2_job_completed_sync.down.sql @@ -0,0 +1,2 @@ +-- Add down migration script here +DROP FUNCTION v2_job_completed_before_insert() CASCADE; diff --git a/backend/migrations/20250110124744_v2_job_completed_sync.up.sql b/backend/migrations/20250110124744_v2_job_completed_sync.up.sql new file mode 100644 index 0000000000000..57e4a0d244447 --- /dev/null +++ b/backend/migrations/20250110124744_v2_job_completed_sync.up.sql @@ -0,0 +1,56 @@ +-- Add up migration script here + +-- v1 -> v2 +-- On every insert to `v2_job_completed`, insert to `v2_job` as well +-- This trigger will be removed once all server(s)/worker(s) are updated to use `v2_*` tables +CREATE OR REPLACE FUNCTION v2_job_completed_before_insert() +RETURNS TRIGGER AS $$ +DECLARE + job v2_job; +BEGIN + IF NEW.__created_by IS NULL THEN + -- v2 -> v1 + SELECT * INTO job FROM v2_job WHERE id = NEW.id; + NEW.__parent_job := job.parent_job; + NEW.__created_by := job.created_by; + NEW.__created_at := job.created_at; + NEW.__success := NEW.status = 'success'; + NEW.__script_hash := job.runnable_id; + NEW.__script_path := job.runnable_path; + NEW.__args := job.args; + -- __logs + -- __deleted + NEW.__raw_code := job.raw_code; + NEW.__canceled := NEW.status = 'canceled'; + NEW.__job_kind := job.kind; + -- __env_id + NEW.__schedule_path := job.schedule_path; + NEW.__permissioned_as := job.permissioned_as; + NEW.__raw_flow := job.raw_flow; + NEW.__is_flow_step := job.flow_step_id IS NOT NULL; + NEW.__language := job.script_lang; + NEW.__is_skipped := NEW.status = 'skipped'; + NEW.__raw_lock := job.raw_lock; + NEW.__email := job.permissioned_as_email; + NEW.__visible_to_owner := job.visible_to_owner; + NEW.__tag := job.tag; + NEW.__priority := job.priority; + ELSE + -- v1 -> v2 + NEW.completed_at := now(); + NEW.status := CASE + WHEN NEW.__is_skipped THEN 'skipped'::job_status + WHEN NEW.__canceled THEN 'canceled'::job_status + WHEN NEW.__success THEN 'success'::job_status + ELSE 'failure'::job_status + END; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE TRIGGER v2_job_completed_before_insert_trigger +BEFORE INSERT ON v2_job_completed +FOR EACH ROW +WHEN (pg_trigger_depth() < 1) -- Prevent infinite loop v1 <-> v2 +EXECUTE FUNCTION v2_job_completed_before_insert(); diff --git a/backend/migrations/20250110124748_v2_migrate_from_v1.down.sql b/backend/migrations/20250110124748_v2_migrate_from_v1.down.sql new file mode 100644 index 0000000000000..6bc376fc3d78d --- /dev/null +++ b/backend/migrations/20250110124748_v2_migrate_from_v1.down.sql @@ -0,0 +1,2 @@ +-- Add down migration script here +-- Nothing to do here diff --git a/backend/migrations/20250110124748_v2_migrate_from_v1.up.sql b/backend/migrations/20250110124748_v2_migrate_from_v1.up.sql new file mode 100644 index 0000000000000..7e68c24d9d330 --- /dev/null +++ b/backend/migrations/20250110124748_v2_migrate_from_v1.up.sql @@ -0,0 +1,142 @@ +-- Add up migration script here + +-- Set new columns in `v2_job_completed`: +UPDATE v2_job_completed +SET completed_at = started_at + (interval '1 millisecond' * duration_ms), + status = CASE + WHEN __is_skipped THEN 'skipped'::job_status + WHEN __canceled THEN 'canceled'::job_status + WHEN __success THEN 'success'::job_status + ELSE 'failure'::job_status END +WHERE status IS NULL; + +-- Insert missing `v2_job` rows from `v2_job_queue`: +INSERT INTO v2_job ( + id, workspace_id, created_at, created_by, permissioned_as, permissioned_as_email, + kind, runnable_id, runnable_path, parent_job, + script_lang, + flow_step_id, flow_root_job, + schedule_path, + tag, same_worker, visible_to_owner, concurrent_limit, concurrency_time_window_s, cache_ttl, timeout, priority, + args, pre_run_error, + raw_code, raw_lock, raw_flow +) SELECT + id, workspace_id, created_at, __created_by, __permissioned_as, __email, + __job_kind, __script_hash, __script_path, __parent_job, + __language, + __flow_step_id, __root_job, + __schedule_path, + tag, __same_worker, __visible_to_owner, __concurrent_limit, __concurrency_time_window_s, + __cache_ttl, __timeout, priority, + __args, __pre_run_error, + __raw_code, __raw_lock, __raw_flow +FROM v2_job_queue +WHERE NOT EXISTS (SELECT 1 FROM v2_job WHERE v2_job.id = v2_job_queue.id); + +-- Insert missing `v2_job` rows from `v2_job_completed`: +INSERT INTO v2_job ( + id, workspace_id, created_at, created_by, permissioned_as, permissioned_as_email, + kind, runnable_id, runnable_path, parent_job, + script_lang, + schedule_path, + tag, visible_to_owner, priority, + args, + raw_code, raw_lock, raw_flow +) SELECT + id, workspace_id, __created_at, __created_by, __permissioned_as, __email, + __job_kind, __script_hash, __script_path, __parent_job, + __language, + __schedule_path, + __tag, __visible_to_owner, __priority, + __args, + __raw_code, __raw_lock, __raw_flow +FROM v2_job_completed +WHERE NOT EXISTS (SELECT 1 FROM v2_job WHERE v2_job.id = v2_job_completed.id); + +-- Set existing `v2_job` rows from `v2_job_queue`: +UPDATE v2_job SET + created_at = v2_job_queue.created_at, + created_by = v2_job_queue.__created_by, + permissioned_as = v2_job_queue.__permissioned_as, + permissioned_as_email = v2_job_queue.__email, + kind = v2_job_queue.__job_kind, + runnable_id = v2_job_queue.__script_hash, + runnable_path = v2_job_queue.__script_path, + parent_job = v2_job_queue.__parent_job, + script_lang = v2_job_queue.__language, + flow_step_id = v2_job_queue.__flow_step_id, + flow_root_job = v2_job_queue.__root_job, + schedule_path = v2_job_queue.__schedule_path, + tag = v2_job_queue.tag, + same_worker = v2_job_queue.__same_worker, + visible_to_owner = v2_job_queue.__visible_to_owner, + concurrent_limit = v2_job_queue.__concurrent_limit, + concurrency_time_window_s = v2_job_queue.__concurrency_time_window_s, + cache_ttl = v2_job_queue.__cache_ttl, + timeout = v2_job_queue.__timeout, + priority = v2_job_queue.priority, + args = v2_job_queue.__args, + pre_run_error = v2_job_queue.__pre_run_error, + raw_code = COALESCE(v2_job.raw_code, v2_job_queue.__raw_code), + raw_lock = COALESCE(v2_job.raw_lock, v2_job_queue.__raw_lock), + raw_flow = COALESCE(v2_job.raw_flow, v2_job_queue.__raw_flow) +FROM v2_job_queue +WHERE v2_job.id = v2_job_queue.id AND v2_job.created_by = 'missing'; + +-- Set existing `v2_job` rows from `v2_job_completed`: +UPDATE v2_job SET + created_at = v2_job_completed.__created_at, + created_by = v2_job_completed.__created_by, + permissioned_as = v2_job_completed.__permissioned_as, + permissioned_as_email = v2_job_completed.__email, + kind = v2_job_completed.__job_kind, + runnable_id = v2_job_completed.__script_hash, + runnable_path = v2_job_completed.__script_path, + parent_job = v2_job_completed.__parent_job, + script_lang = v2_job_completed.__language, + schedule_path = v2_job_completed.__schedule_path, + tag = v2_job_completed.__tag, + visible_to_owner = v2_job_completed.__visible_to_owner, + priority = v2_job_completed.__priority, + args = v2_job_completed.__args, + raw_code = COALESCE(v2_job.raw_code, v2_job_completed.__raw_code), + raw_lock = COALESCE(v2_job.raw_lock, v2_job_completed.__raw_lock), + raw_flow = COALESCE(v2_job.raw_flow, v2_job_completed.__raw_flow) +FROM v2_job_completed +WHERE v2_job.id = v2_job_completed.id AND v2_job.created_by = 'missing'; + +-- Migrate `v2_job_queue` moved columns to `v2_job_runtime`: +INSERT INTO v2_job_runtime (id, ping, memory_peak) +SELECT id, __last_ping, __mem_peak +FROM v2_job_queue +WHERE NOT running AND __last_ping IS NOT NULL OR __mem_peak IS NOT NULL + -- Locked ones will sync within triggers + FOR UPDATE SKIP LOCKED +ON CONFLICT (id) DO NOTHING; + +-- Migrate `v2_job_queue` moved columns to `v2_flow_job_runtime`: +INSERT INTO v2_job_flow_runtime (id, flow_status, leaf_jobs) +SELECT id, __flow_status, __leaf_jobs +FROM v2_job_queue +WHERE NOT running AND __flow_status IS NOT NULL OR __leaf_jobs IS NOT NULL + -- Locked ones will sync within triggers + FOR UPDATE SKIP LOCKED +ON CONFLICT (id) DO NOTHING; + +-- Migrate old `v2_job_queue.__logs` to `job_logs` +INSERT INTO job_logs (job_id, workspace_id, logs) +SELECT id, workspace_id, __logs +FROM v2_job_queue +WHERE __logs IS NOT NULL +ON CONFLICT (job_id) DO UPDATE SET + logs = CONCAT(job_logs.logs, EXCLUDED.logs) +; + +-- Migrate old `v2_job_completed.__logs` to `job_logs` +INSERT INTO job_logs (job_id, workspace_id, logs) +SELECT id, workspace_id, __logs +FROM v2_job_completed +WHERE __logs IS NOT NULL AND __logs != '##DELETED##' +ON CONFLICT (job_id) DO UPDATE SET + logs = CONCAT(job_logs.logs, EXCLUDED.logs) +; diff --git a/backend/migrations/20250110124804_v2_sync_runtime.down.sql b/backend/migrations/20250110124804_v2_sync_runtime.down.sql new file mode 100644 index 0000000000000..9c9fe30241f7e --- /dev/null +++ b/backend/migrations/20250110124804_v2_sync_runtime.down.sql @@ -0,0 +1,3 @@ +-- Add down migration script here +DROP FUNCTION v2_job_runtime_before_insert() CASCADE; +DROP FUNCTION v2_job_runtime_before_update() CASCADE; diff --git a/backend/migrations/20250110124804_v2_sync_runtime.up.sql b/backend/migrations/20250110124804_v2_sync_runtime.up.sql new file mode 100644 index 0000000000000..c7c5510e51113 --- /dev/null +++ b/backend/migrations/20250110124804_v2_sync_runtime.up.sql @@ -0,0 +1,37 @@ +-- Add up migration script here + +-- On every insert/update to `v2_job_runtime`, reflect to `v2_job_queue` as well +-- This triggers will be removed once all server(s)/worker(s) are updated to use `v2_*` tables +CREATE OR REPLACE FUNCTION v2_job_runtime_before_insert() + RETURNS TRIGGER AS $$ +BEGIN + UPDATE v2_job_queue + SET __last_ping = NEW.ping, __mem_peak = NEW.memory_peak + WHERE id = NEW.id; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE TRIGGER v2_job_runtime_before_insert_trigger + BEFORE INSERT ON v2_job_runtime + FOR EACH ROW + WHEN (pg_trigger_depth() < 1) -- Prevent infinite loop v2 <-> v1 +EXECUTE FUNCTION v2_job_runtime_before_insert(); + +CREATE OR REPLACE FUNCTION v2_job_runtime_before_update() + RETURNS TRIGGER AS $$ +BEGIN + IF NEW.ping IS DISTINCT FROM OLD.ping OR NEW.memory_peak IS DISTINCT FROM OLD.memory_peak THEN + UPDATE v2_job_queue + SET __last_ping = NEW.ping, __mem_peak = NEW.memory_peak + WHERE id = NEW.id; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE TRIGGER v2_job_runtime_before_update_trigger + BEFORE UPDATE ON v2_job_runtime + FOR EACH ROW + WHEN (pg_trigger_depth() < 1) -- Prevent infinite loop v2 <-> v1 +EXECUTE FUNCTION v2_job_runtime_before_update(); diff --git a/backend/migrations/20250110124805_v2_sync_flow_runtime.down.sql b/backend/migrations/20250110124805_v2_sync_flow_runtime.down.sql new file mode 100644 index 0000000000000..8fba34b2a46bd --- /dev/null +++ b/backend/migrations/20250110124805_v2_sync_flow_runtime.down.sql @@ -0,0 +1,3 @@ +-- Add down migration script here +DROP FUNCTION v2_job_flow_runtime_before_insert() CASCADE; +DROP FUNCTION v2_job_flow_runtime_before_update() CASCADE; diff --git a/backend/migrations/20250110124805_v2_sync_flow_runtime.up.sql b/backend/migrations/20250110124805_v2_sync_flow_runtime.up.sql new file mode 100644 index 0000000000000..c1f1aceaac778 --- /dev/null +++ b/backend/migrations/20250110124805_v2_sync_flow_runtime.up.sql @@ -0,0 +1,38 @@ +-- Add up migration script here + +-- On every insert/update to `v2_job_flow_runtime`, reflect to `v2_job_queue` as well +-- This triggers will be removed once all server(s)/worker(s) are updated to use `v2_*` tables +CREATE OR REPLACE FUNCTION v2_job_flow_runtime_before_insert() + RETURNS TRIGGER AS $$ +BEGIN + UPDATE v2_job_queue + SET __flow_status = NEW.flow_status, __leaf_jobs = NEW.leaf_jobs + WHERE id = NEW.id; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE TRIGGER v2_job_flow_runtime_before_insert_trigger + BEFORE INSERT ON v2_job_flow_runtime + FOR EACH ROW + WHEN (pg_trigger_depth() < 1) -- Prevent infinite loop v2 <-> v1 +EXECUTE FUNCTION v2_job_flow_runtime_before_insert(); + +CREATE OR REPLACE FUNCTION v2_job_flow_runtime_before_update() + RETURNS TRIGGER AS $$ +BEGIN + IF NEW.flow_status::TEXT IS DISTINCT FROM OLD.flow_status::TEXT OR + NEW.leaf_jobs::TEXT IS DISTINCT FROM OLD.leaf_jobs::TEXT THEN + UPDATE v2_job_queue + SET __flow_status = NEW.flow_status, __leaf_jobs = NEW.leaf_jobs + WHERE id = NEW.id; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE TRIGGER v2_job_flow_runtime_before_update_trigger + BEFORE UPDATE ON v2_job_flow_runtime + FOR EACH ROW + WHEN (pg_trigger_depth() < 1) -- Prevent infinite loop v2 <-> v1 +EXECUTE FUNCTION v2_job_flow_runtime_before_update(); diff --git a/backend/migrations/20250110124919_v2_job_constraints.down.sql b/backend/migrations/20250110124919_v2_job_constraints.down.sql new file mode 100644 index 0000000000000..72796ff008b58 --- /dev/null +++ b/backend/migrations/20250110124919_v2_job_constraints.down.sql @@ -0,0 +1,4 @@ +-- Add down migration script here +ALTER TABLE v2_job ALTER COLUMN workspace_id DROP NOT NULL; +ALTER TABLE v2_job ALTER COLUMN tag DROP DEFAULT; +ALTER TABLE v2_job ALTER COLUMN tag DROP NOT NULL; diff --git a/backend/migrations/20250110124919_v2_job_constraints.up.sql b/backend/migrations/20250110124919_v2_job_constraints.up.sql new file mode 100644 index 0000000000000..b0ba090fa9c39 --- /dev/null +++ b/backend/migrations/20250110124919_v2_job_constraints.up.sql @@ -0,0 +1,4 @@ +-- Add up migration script here +ALTER TABLE v2_job ALTER COLUMN workspace_id SET NOT NULL; +ALTER TABLE v2_job ALTER COLUMN tag SET DEFAULT 'other'; +ALTER TABLE v2_job ALTER COLUMN tag SET NOT NULL; diff --git a/backend/migrations/20250110145628_v2_job_queue_constraints.down.sql b/backend/migrations/20250110145628_v2_job_queue_constraints.down.sql new file mode 100644 index 0000000000000..9f13ce1b41241 --- /dev/null +++ b/backend/migrations/20250110145628_v2_job_queue_constraints.down.sql @@ -0,0 +1,2 @@ +-- Add up migration script here +ALTER TABLE v2_job_queue ALTER COLUMN __created_by SET NOT NULL; diff --git a/backend/migrations/20250110145628_v2_job_queue_constraints.up.sql b/backend/migrations/20250110145628_v2_job_queue_constraints.up.sql new file mode 100644 index 0000000000000..5eac732ab0a70 --- /dev/null +++ b/backend/migrations/20250110145628_v2_job_queue_constraints.up.sql @@ -0,0 +1,2 @@ +-- Add up migration script here +ALTER TABLE v2_job_queue ALTER COLUMN __created_by DROP NOT NULL; diff --git a/backend/migrations/20250110145629_v2_job_completed_constraints.down.sql b/backend/migrations/20250110145629_v2_job_completed_constraints.down.sql new file mode 100644 index 0000000000000..928a5feb5a44a --- /dev/null +++ b/backend/migrations/20250110145629_v2_job_completed_constraints.down.sql @@ -0,0 +1,8 @@ +-- Add down migration script here +ALTER TABLE v2_job_completed ALTER COLUMN status DROP NOT NULL; +ALTER TABLE v2_job_completed ALTER COLUMN completed_at DROP DEFAULT; +ALTER TABLE v2_job_completed ALTER COLUMN completed_at DROP NOT NULL; +ALTER TABLE v2_job_completed ALTER COLUMN started_at SET NOT NULL; +ALTER TABLE v2_job_completed ALTER COLUMN __created_at SET NOT NULL; +ALTER TABLE v2_job_completed ALTER COLUMN __created_by SET NOT NULL; +ALTER TABLE v2_job_completed ALTER COLUMN __success SET NOT NULL; diff --git a/backend/migrations/20250110145629_v2_job_completed_constraints.up.sql b/backend/migrations/20250110145629_v2_job_completed_constraints.up.sql new file mode 100644 index 0000000000000..9c4772ec726ad --- /dev/null +++ b/backend/migrations/20250110145629_v2_job_completed_constraints.up.sql @@ -0,0 +1,8 @@ +-- Add up migration script here +ALTER TABLE v2_job_completed ALTER COLUMN status SET NOT NULL; +ALTER TABLE v2_job_completed ALTER COLUMN completed_at SET DEFAULT now(); +ALTER TABLE v2_job_completed ALTER COLUMN completed_at SET NOT NULL; +ALTER TABLE v2_job_completed ALTER COLUMN started_at DROP NOT NULL; +ALTER TABLE v2_job_completed ALTER COLUMN __created_at DROP NOT NULL; +ALTER TABLE v2_job_completed ALTER COLUMN __created_by DROP NOT NULL; +ALTER TABLE v2_job_completed ALTER COLUMN __success DROP NOT NULL; diff --git a/backend/migrations/20250110145630_v2_queue_compatibility_view.down.sql b/backend/migrations/20250110145630_v2_queue_compatibility_view.down.sql new file mode 100644 index 0000000000000..27e4d60fa3242 --- /dev/null +++ b/backend/migrations/20250110145630_v2_queue_compatibility_view.down.sql @@ -0,0 +1,6 @@ +-- Add down migration script here +DROP FUNCTION v2_queue_instead_of_update_overlay() CASCADE; +DROP FUNCTION v2_queue_instead_of_update() CASCADE; +DROP FUNCTION v2_queue_instead_of_delete() CASCADE; +DROP FUNCTION v2_queue_update(OLD v2_queue, NEW v2_queue) CASCADE; +DROP VIEW v2_queue; diff --git a/backend/migrations/20250110145630_v2_queue_compatibility_view.up.sql b/backend/migrations/20250110145630_v2_queue_compatibility_view.up.sql new file mode 100644 index 0000000000000..89f6ef7ad0acd --- /dev/null +++ b/backend/migrations/20250110145630_v2_queue_compatibility_view.up.sql @@ -0,0 +1,170 @@ +-- Add up migration script here +CREATE OR REPLACE VIEW v2_queue AS +SELECT + j.id, + j.workspace_id, + j.parent_job, + j.created_by, + j.created_at, + q.started_at, + q.scheduled_for, + q.running, + j.runnable_id AS script_hash, + j.runnable_path AS script_path, + j.args, + j.raw_code, + q.canceled_by IS NOT NULL AS canceled, + q.canceled_by, + q.canceled_reason, + r.ping AS last_ping, + j.kind AS job_kind, + j.schedule_path, + j.permissioned_as, + f.flow_status, + j.raw_flow, + j.flow_step_id IS NOT NULL AS is_flow_step, + j.script_lang AS language, + q.suspend, + q.suspend_until, + j.same_worker, + j.raw_lock, + j.pre_run_error, + j.permissioned_as_email AS email, + j.visible_to_owner, + r.memory_peak AS mem_peak, + j.flow_root_job AS root_job, + f.leaf_jobs, + j.tag, + j.concurrent_limit, + j.concurrency_time_window_s, + j.timeout, + j.flow_step_id, + j.cache_ttl, + j.priority, + NULL::TEXT AS logs, + NULL::BIGINT AS env_id +FROM v2_job_queue q + JOIN v2_job j USING (id) + LEFT JOIN v2_job_runtime r USING (id) + LEFT JOIN v2_job_flow_runtime f USING (id) +; + +-- Dispatch update of `v1` schema to `v2_*` tables. +CREATE OR REPLACE FUNCTION v2_queue_update(OLD v2_queue, NEW v2_queue) RETURNS VOID AS $$ BEGIN + -- Unsupported columns: + IF NEW.workspace_id IS DISTINCT FROM OLD.workspace_id + OR NEW.parent_job IS DISTINCT FROM OLD.parent_job + OR NEW.created_by IS DISTINCT FROM OLD.created_by + OR NEW.created_at IS DISTINCT FROM OLD.created_at + OR NEW.script_hash IS DISTINCT FROM OLD.script_hash + OR NEW.script_path IS DISTINCT FROM OLD.script_path + OR NEW.raw_code IS DISTINCT FROM OLD.raw_code + OR NEW.job_kind IS DISTINCT FROM OLD.job_kind + OR NEW.schedule_path IS DISTINCT FROM OLD.schedule_path + OR NEW.permissioned_as IS DISTINCT FROM OLD.permissioned_as + OR NEW.raw_flow::TEXT IS DISTINCT FROM OLD.raw_flow::TEXT + OR NEW.language IS DISTINCT FROM OLD.language + OR NEW.same_worker IS DISTINCT FROM OLD.same_worker + OR NEW.raw_lock IS DISTINCT FROM OLD.raw_lock + OR NEW.pre_run_error IS DISTINCT FROM OLD.pre_run_error + OR NEW.email IS DISTINCT FROM OLD.email + OR NEW.visible_to_owner IS DISTINCT FROM OLD.visible_to_owner + OR NEW.concurrent_limit IS DISTINCT FROM OLD.concurrent_limit + OR NEW.concurrency_time_window_s IS DISTINCT FROM OLD.concurrency_time_window_s + OR NEW.timeout IS DISTINCT FROM OLD.timeout + OR NEW.flow_step_id IS DISTINCT FROM OLD.flow_step_id + OR NEW.cache_ttl IS DISTINCT FROM OLD.cache_ttl + OR NEW.priority IS DISTINCT FROM OLD.priority + THEN + RAISE EXCEPTION 'Updating an immutable column in `v2_queue`'; + END IF; + -- Update the `v2_job` table + IF NEW.args::TEXT IS DISTINCT FROM OLD.args::TEXT THEN + UPDATE v2_job + SET args = NEW.args + WHERE id = OLD.id; + END IF; + -- Update the `v2_job_queue` table + IF NEW.canceled AND NEW.canceled_by IS NULL THEN + NEW.canceled_by := 'unknown'; + NEW.canceled_reason := 'canceled by user'; + END IF; + IF NEW.started_at IS DISTINCT FROM OLD.started_at + OR NEW.scheduled_for IS DISTINCT FROM OLD.scheduled_for + OR NEW.running IS DISTINCT FROM OLD.running + OR NEW.canceled_by IS DISTINCT FROM OLD.canceled_by + OR NEW.canceled_reason IS DISTINCT FROM OLD.canceled_reason + OR NEW.suspend IS DISTINCT FROM OLD.suspend + OR NEW.suspend_until IS DISTINCT FROM OLD.suspend_until + THEN + UPDATE v2_job_queue + SET started_at = NEW.started_at, + scheduled_for = NEW.scheduled_for, + running = NEW.running, + canceled_by = NEW.canceled_by, + canceled_reason = NEW.canceled_reason, + suspend = NEW.suspend, + suspend_until = NEW.suspend_until + WHERE id = OLD.id; + END IF; + -- Update the `v2_job_runtime` table + IF NEW.last_ping IS DISTINCT FROM OLD.last_ping + OR NEW.mem_peak IS DISTINCT FROM OLD.mem_peak + THEN + UPDATE v2_job_runtime + SET ping = NEW.last_ping, + memory_peak = NEW.mem_peak + WHERE id = OLD.id; + END IF; + -- Update the `v2_job_flow_runtime` table + IF NEW.flow_status::TEXT IS DISTINCT FROM OLD.flow_status::TEXT + OR NEW.leaf_jobs::TEXT IS DISTINCT FROM OLD.leaf_jobs::TEXT + THEN + UPDATE v2_job_flow_runtime + SET flow_status = NEW.flow_status, + leaf_jobs = NEW.leaf_jobs + WHERE id = OLD.id; + END IF; +END $$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION v2_queue_instead_of_update() RETURNS TRIGGER AS $$ BEGIN + -- v1 -> v2 sync + PERFORM v2_queue_update(OLD, NEW); + RETURN NEW; +END $$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION v2_queue_instead_of_update_overlay() RETURNS TRIGGER AS $$ BEGIN + -- v1 -> v2 sync + PERFORM v2_queue_update(OLD, NEW); + -- v2 -> v1 sync + IF NEW.args IS DISTINCT FROM OLD.args + OR NEW.last_ping IS DISTINCT FROM OLD.last_ping + OR NEW.mem_peak IS DISTINCT FROM OLD.mem_peak + OR NEW.flow_status::TEXT IS DISTINCT FROM OLD.flow_status::TEXT + OR NEW.leaf_jobs::TEXT IS DISTINCT FROM OLD.leaf_jobs::TEXT + THEN + UPDATE v2_job_queue + SET __args = NEW.args, + __last_ping = NEW.last_ping, + __mem_peak = NEW.mem_peak, + __flow_status = NEW.flow_status, + __leaf_jobs = NEW.leaf_jobs + WHERE id = OLD.id; + END IF; + RETURN NEW; +END $$ LANGUAGE plpgsql; + +CREATE TRIGGER v2_queue_instead_of_update_trigger +INSTEAD OF UPDATE ON v2_queue +FOR EACH ROW +EXECUTE PROCEDURE v2_queue_instead_of_update_overlay(); + +CREATE OR REPLACE FUNCTION v2_queue_instead_of_delete() RETURNS TRIGGER AS $$ BEGIN + DELETE FROM v2_job_queue WHERE id = OLD.id; + RETURN OLD; +END $$ LANGUAGE plpgsql; + +CREATE TRIGGER v2_queue_instead_of_delete_trigger + INSTEAD OF DELETE ON v2_queue + FOR EACH ROW +EXECUTE PROCEDURE v2_queue_instead_of_delete(); diff --git a/backend/migrations/20250110145631_v2_completed_job_compatibility_view.down.sql b/backend/migrations/20250110145631_v2_completed_job_compatibility_view.down.sql new file mode 100644 index 0000000000000..3bc436e1fd4d2 --- /dev/null +++ b/backend/migrations/20250110145631_v2_completed_job_compatibility_view.down.sql @@ -0,0 +1,6 @@ +-- Add up migration script here +DROP FUNCTION v2_completed_job_instead_of_update_overlay() CASCADE; +DROP FUNCTION v2_completed_job_instead_of_update() CASCADE; +DROP FUNCTION v2_completed_job_instead_of_delete() CASCADE; +DROP FUNCTION v2_completed_job_update(OLD v2_completed_job, NEW v2_completed_job) CASCADE; +DROP VIEW v2_completed_job; diff --git a/backend/migrations/20250110145631_v2_completed_job_compatibility_view.up.sql b/backend/migrations/20250110145631_v2_completed_job_compatibility_view.up.sql new file mode 100644 index 0000000000000..61f08717f0cc9 --- /dev/null +++ b/backend/migrations/20250110145631_v2_completed_job_compatibility_view.up.sql @@ -0,0 +1,110 @@ +-- Add up migration script here +CREATE OR REPLACE VIEW v2_completed_job AS +SELECT + j.id, + j.workspace_id, + j.parent_job, + j.created_by, + j.created_at, + c.duration_ms, + c.status = 'success' AS success, + j.runnable_id AS script_hash, + j.runnable_path AS script_path, + j.args, + c.result, + c.deleted, + j.raw_code, + c.status = 'canceled' AS canceled, + c.canceled_by, + c.canceled_reason, + j.kind AS job_kind, + j.schedule_path, + j.permissioned_as, + c.flow_status, + j.raw_flow, + j.flow_step_id IS NOT NULL AS is_flow_step, + j.script_lang AS language, + c.started_at, + c.status = 'skipped' AS is_skipped, + j.raw_lock, + j.permissioned_as_email AS email, + j.visible_to_owner, + c.memory_peak AS mem_peak, + j.tag, + j.priority, + NULL::TEXT AS logs, + NULL::BIGINT AS env_id +FROM v2_job_completed c + JOIN v2_job j USING (id) +; + +CREATE OR REPLACE FUNCTION v2_completed_job_update(OLD v2_completed_job, NEW v2_completed_job) RETURNS VOID AS $$ BEGIN + -- Unsupported columns: + IF NEW.workspace_id IS DISTINCT FROM OLD.workspace_id + OR NEW.parent_job IS DISTINCT FROM OLD.parent_job + OR NEW.created_by IS DISTINCT FROM OLD.created_by + OR NEW.created_at IS DISTINCT FROM OLD.created_at + OR NEW.script_hash IS DISTINCT FROM OLD.script_hash + OR NEW.script_path IS DISTINCT FROM OLD.script_path + OR NEW.raw_code IS DISTINCT FROM OLD.raw_code + OR NEW.job_kind IS DISTINCT FROM OLD.job_kind + OR NEW.schedule_path IS DISTINCT FROM OLD.schedule_path + OR NEW.permissioned_as IS DISTINCT FROM OLD.permissioned_as + OR NEW.raw_flow::TEXT IS DISTINCT FROM OLD.raw_flow::TEXT + OR NEW.language IS DISTINCT FROM OLD.language + OR NEW.raw_lock IS DISTINCT FROM OLD.raw_lock + OR NEW.email IS DISTINCT FROM OLD.email + OR NEW.visible_to_owner IS DISTINCT FROM OLD.visible_to_owner + OR NEW.priority IS DISTINCT FROM OLD.priority + THEN + RAISE EXCEPTION 'Updating an immutable column in `v2_completed_job`'; + END IF; + -- Update the `v2_job` table + IF NEW.args::TEXT IS DISTINCT FROM OLD.args::TEXT THEN + UPDATE v2_job + SET args = NEW.args + WHERE id = OLD.id; + END IF; + -- Update the `v2_job_completed` table + IF NEW.result::TEXT IS DISTINCT FROM OLD.result::TEXT + OR NEW.deleted IS DISTINCT FROM OLD.deleted + THEN + UPDATE v2_job_completed + SET result = NEW.result, + deleted = NEW.deleted + WHERE id = OLD.id; + END IF; +END $$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION v2_completed_job_instead_of_update() RETURNS TRIGGER AS $$ BEGIN + -- v1 -> v2 sync + PERFORM v2_completed_job_update(OLD, NEW); + RETURN NEW; +END $$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION v2_completed_job_instead_of_update_overlay() RETURNS TRIGGER AS $$ BEGIN + -- v1 -> v2 sync + PERFORM v2_completed_job_update(OLD, NEW); + -- v2 -> v1 sync + IF NEW.args::TEXT IS DISTINCT FROM OLD.args::TEXT THEN + UPDATE v2_job_completed + SET __args = NEW.args + WHERE id = OLD.id; + END IF; + RETURN NEW; +END $$ LANGUAGE plpgsql; + +CREATE TRIGGER v2_completed_job_instead_of_update_trigger + INSTEAD OF UPDATE ON v2_completed_job + FOR EACH ROW +EXECUTE PROCEDURE v2_completed_job_instead_of_update_overlay(); + +CREATE OR REPLACE FUNCTION v2_completed_job_instead_of_delete() RETURNS TRIGGER AS $$ BEGIN + DELETE FROM v2_job_completed WHERE id = OLD.id; + RETURN OLD; +END $$ LANGUAGE plpgsql; + +CREATE TRIGGER v2_completed_job_instead_of_delete_trigger + INSTEAD OF DELETE ON v2_completed_job + FOR EACH ROW +EXECUTE PROCEDURE v2_completed_job_instead_of_delete(); diff --git a/backend/src/monitor.rs b/backend/src/monitor.rs index 0aeb29df56ea5..4e763b3d844f2 100644 --- a/backend/src/monitor.rs +++ b/backend/src/monitor.rs @@ -19,6 +19,7 @@ use tokio::{ join, sync::{mpsc, RwLock}, }; +use uuid::Uuid; #[cfg(feature = "embedding")] use windmill_api::embeddings::update_embeddings_db; @@ -36,7 +37,7 @@ use windmill_common::{ auth::JWT_SECRET, ee::CriticalErrorChannel, error, - flow_status::FlowStatusModule, + flow_status::{FlowStatus, FlowStatusModule}, global_settings::{ BASE_URL_SETTING, BUNFIG_INSTALL_SCOPES_SETTING, CRITICAL_ALERT_MUTE_UI_SETTING, CRITICAL_ERROR_CHANNELS_SETTING, DEFAULT_TAGS_PER_WORKSPACE_SETTING, @@ -734,11 +735,11 @@ pub async fn delete_expired_items(db: &DB) -> () { match db.begin().await { Ok(mut tx) => { let deleted_jobs = sqlx::query_scalar!( - "DELETE FROM completed_job WHERE created_at <= now() - ($1::bigint::text || ' s')::interval AND started_at + ((duration_ms/1000 + $1::bigint) || ' s')::interval <= now() RETURNING id", - job_retention_secs - ) - .fetch_all(&mut *tx) - .await; + "DELETE FROM v2_completed_job WHERE created_at <= now() - ($1::bigint::text || ' s')::interval AND started_at + ((duration_ms/1000 + $1::bigint) || ' s')::interval <= now() RETURNING id AS \"id!\"", + job_retention_secs + ) + .fetch_all(&mut *tx) + .await; match deleted_jobs { Ok(deleted_jobs) => { @@ -786,7 +787,7 @@ pub async fn delete_expired_items(db: &DB) -> () { } if let Err(e) = - sqlx::query!("DELETE FROM job WHERE id = ANY($1)", &deleted_jobs) + sqlx::query!("DELETE FROM v2_job WHERE id = ANY($1)", &deleted_jobs) .execute(&mut *tx) .await { @@ -1332,7 +1333,7 @@ pub async fn expose_queue_metrics(db: &Pool) { sqlx::query!( "INSERT INTO metrics (id, value) VALUES ($1, to_jsonb((SELECT EXTRACT(EPOCH FROM now() - scheduled_for) - FROM queue WHERE tag = $2 AND running = false AND scheduled_for <= now() - ('3 seconds')::interval + FROM v2_queue WHERE tag = $2 AND running = false AND scheduled_for <= now() - ('3 seconds')::interval ORDER BY priority DESC NULLS LAST, scheduled_for LIMIT 1)))", format!("queue_delay_{}", tag), tag @@ -1495,15 +1496,15 @@ pub async fn reload_base_url_setting(db: &DB) -> error::Result<()> { async fn handle_zombie_jobs(db: &Pool, base_internal_url: &str, worker_name: &str) { if *RESTART_ZOMBIE_JOBS { let restarted = sqlx::query!( - "UPDATE queue SET running = false, started_at = null - WHERE last_ping < now() - ($1 || ' seconds')::interval - AND running = true AND job_kind NOT IN ('flow', 'flowpreview', 'flownode', 'singlescriptflow') AND same_worker = false RETURNING id, workspace_id, last_ping", - *ZOMBIE_JOB_TIMEOUT, - ) - .fetch_all(db) - .await - .ok() - .unwrap_or_else(|| vec![]); + "UPDATE v2_queue SET running = false, started_at = null + WHERE last_ping < now() - ($1 || ' seconds')::interval + AND running = true AND job_kind NOT IN ('flow', 'flowpreview', 'flownode', 'singlescriptflow') AND same_worker = false RETURNING id AS \"id!\", workspace_id AS \"workspace_id!\", last_ping", + *ZOMBIE_JOB_TIMEOUT, + ) + .fetch_all(db) + .await + .ok() + .unwrap_or_else(|| vec![]); #[cfg(feature = "prometheus")] if METRICS_ENABLED.load(std::sync::atomic::Ordering::Relaxed) { @@ -1533,7 +1534,7 @@ async fn handle_zombie_jobs(db: &Pool, base_internal_url: &str, worker } let mut timeout_query = - "SELECT * FROM queue WHERE last_ping < now() - ($1 || ' seconds')::interval + "SELECT * FROM v2_queue WHERE last_ping < now() - ($1 || ' seconds')::interval AND running = true AND job_kind NOT IN ('flow', 'flowpreview', 'flownode', 'singlescriptflow')" .to_string(); if *RESTART_ZOMBIE_JOBS { @@ -1607,27 +1608,35 @@ async fn handle_zombie_jobs(db: &Pool, base_internal_url: &str, worker worker_name, send_result_never_used, #[cfg(feature = "benchmark")] - &mut windmill_common::bench::BenchmarkIter::new(), + &mut windmill_worker::bench::BenchmarkIter::new(), ) .await; } } async fn handle_zombie_flows(db: &DB) -> error::Result<()> { - let flows = sqlx::query_as::<_, QueuedJob>( + let flows = sqlx::query!( r#" - SELECT * - FROM queue - WHERE running = true AND suspend = 0 AND suspend_until IS null AND scheduled_for <= now() AND (job_kind = 'flow' OR job_kind = 'flowpreview' OR job_kind = 'flownode') - AND last_ping IS NOT NULL AND last_ping < NOW() - ($1 || ' seconds')::interval AND canceled = false + SELECT + id AS "id!", workspace_id AS "workspace_id!", parent_job, is_flow_step, + flow_status AS "flow_status: Box", last_ping, same_worker + FROM v2_queue + WHERE running = true AND suspend = 0 AND suspend_until IS null AND scheduled_for <= now() + AND (job_kind = 'flow' OR job_kind = 'flowpreview' OR job_kind = 'flownode') + AND last_ping IS NOT NULL AND last_ping < NOW() - ($1 || ' seconds')::interval + AND canceled = false "#, - ).bind(FLOW_ZOMBIE_TRANSITION_TIMEOUT.as_str()) + FLOW_ZOMBIE_TRANSITION_TIMEOUT.as_str() + ) .fetch_all(db) .await?; for flow in flows { - let status = flow.parse_flow_status(); - if !flow.same_worker + let status = flow + .flow_status + .as_deref() + .and_then(|x| serde_json::from_str::(x).ok()); + if !flow.same_worker.unwrap_or(false) && status.is_some_and(|s| { s.modules .get(0) @@ -1642,7 +1651,7 @@ async fn handle_zombie_flows(db: &DB) -> error::Result<()> { report_critical_error(error_message, db.clone(), Some(&flow.workspace_id), None).await; // if the flow hasn't started and is a zombie, we can simply restart it sqlx::query!( - "UPDATE queue SET running = false, started_at = null WHERE id = $1 AND canceled = false", + "UPDATE v2_queue SET running = false, started_at = null WHERE id = $1 AND canceled = false", flow.id ) .execute(db) @@ -1653,44 +1662,39 @@ async fn handle_zombie_flows(db: &DB) -> error::Result<()> { let now = now_from_db(db).await?; let reason = format!( "{} was hanging in between 2 steps. Last ping: {last_ping:?} (now: {now})", - if flow.is_flow_step && flow.parent_job.is_some() { + if flow.is_flow_step.unwrap_or(false) && flow.parent_job.is_some() { format!("Flow was cancelled because subflow {id}") } else { format!("Flow {id} was cancelled because it") } ); report_critical_error(reason.clone(), db.clone(), Some(&flow.workspace_id), None).await; - cancel_zombie_flow_job(db, flow, reason).await?; + cancel_zombie_flow_job(db, flow.id, &flow.workspace_id, reason).await?; } } let flows2 = sqlx::query!( - " - DELETE - FROM parallel_monitor_lock - WHERE last_ping IS NOT NULL AND last_ping < NOW() - ($1 || ' seconds')::interval - RETURNING parent_flow_id, job_id, last_ping - ", + r#" + DELETE + FROM parallel_monitor_lock + WHERE last_ping IS NOT NULL AND last_ping < NOW() - ($1 || ' seconds')::interval + RETURNING parent_flow_id, job_id, last_ping, (SELECT workspace_id FROM v2_queue q + WHERE q.id = parent_flow_id AND q.running = true AND q.canceled = false) AS workspace_id + "#, FLOW_ZOMBIE_TRANSITION_TIMEOUT.as_str() ) .fetch_all(db) .await?; for flow in flows2 { - let in_queue = sqlx::query_as::<_, QueuedJob>( - "SELECT * FROM queue WHERE id = $1 AND running = true AND canceled = false", - ) - .bind(flow.parent_flow_id) - .fetch_optional(db) - .await?; - if let Some(job) = in_queue { + if let Some(parent_flow_workspace_id) = flow.workspace_id { tracing::error!( "parallel Zombie flow detected: {} in workspace {}. Last ping was: {:?}.", - job.id, - job.workspace_id, + flow.parent_flow_id, + parent_flow_workspace_id, flow.last_ping ); - cancel_zombie_flow_job(db, job, + cancel_zombie_flow_job(db, flow.parent_flow_id, &parent_flow_workspace_id, format!("Flow {} cancelled as one of the parallel branch {} was unable to make the last transition ", flow.parent_flow_id, flow.job_id)) .await?; } else { @@ -1702,27 +1706,28 @@ async fn handle_zombie_flows(db: &DB) -> error::Result<()> { async fn cancel_zombie_flow_job( db: &Pool, - flow: QueuedJob, + id: Uuid, + workspace_id: &str, message: String, ) -> Result<(), error::Error> { - let tx = db.begin().await.unwrap(); + let mut tx = db.begin().await?; tracing::error!( "zombie flow detected: {} in workspace {}. Cancelling it.", - flow.id, - flow.workspace_id + id, + workspace_id ); - let (ntx, _) = cancel_job( + (tx, _) = cancel_job( "monitor", Some(message), - flow.id, - flow.workspace_id.as_str(), + id, + workspace_id, tx, db, true, false, ) .await?; - ntx.commit().await?; + tx.commit().await?; Ok(()) } diff --git a/backend/tests/fixtures/base.sql b/backend/tests/fixtures/base.sql index f406e603d4be3..b0dfd05c082a2 100644 --- a/backend/tests/fixtures/base.sql +++ b/backend/tests/fixtures/base.sql @@ -23,13 +23,13 @@ GRANT ALL PRIVILEGES ON TABLE workspace_key TO windmill_user; CREATE FUNCTION "notify_insert_on_completed_job" () RETURNS TRIGGER AS $$ BEGIN - PERFORM pg_notify('insert on completed_job', NEW.id::text); + PERFORM pg_notify('completed', NEW.id::text); RETURN NEW; END; $$ LANGUAGE PLPGSQL; CREATE TRIGGER "notify_insert_on_completed_job" - AFTER INSERT ON "completed_job" + AFTER INSERT ON "v2_job_completed" FOR EACH ROW EXECUTE FUNCTION "notify_insert_on_completed_job" (); @@ -37,18 +37,18 @@ EXECUTE FUNCTION "notify_insert_on_completed_job" (); CREATE FUNCTION "notify_queue" () RETURNS TRIGGER AS $$ BEGIN - PERFORM pg_notify('queue', NEW.id::text); + PERFORM pg_notify('queued', NEW.id::text); RETURN NEW; END; $$ LANGUAGE PLPGSQL; CREATE TRIGGER "notify_queue_after_insert" - AFTER INSERT ON "queue" + AFTER INSERT ON "v2_job_queue" FOR EACH ROW EXECUTE FUNCTION "notify_queue" (); CREATE TRIGGER "notify_queue_after_flow_status_update" - AFTER UPDATE ON "queue" + AFTER UPDATE ON "v2_job_flow_runtime" FOR EACH ROW WHEN (NEW.flow_status IS DISTINCT FROM OLD.flow_status) EXECUTE FUNCTION "notify_queue" (); \ No newline at end of file diff --git a/backend/tests/fixtures/result_format.sql b/backend/tests/fixtures/result_format.sql index 1f990e2d860de..0d70e56370d5a 100644 --- a/backend/tests/fixtures/result_format.sql +++ b/backend/tests/fixtures/result_format.sql @@ -1,3 +1,9 @@ +INSERT INTO public.v2_job ( + id, workspace_id, created_by, kind, script_lang +) VALUES ( + '1eecb96a-c8b0-4a3d-b1b6-087878c55e41', 'test-workspace', 'test-user', 'script', 'postgresql' +); + INSERT INTO public.completed_job ( id, workspace_id, created_by, created_at, duration_ms, success, flow_status, result, job_kind, language ) VALUES ( diff --git a/backend/tests/worker.rs b/backend/tests/worker.rs index 7bb71416c7374..1ea74c3a2fd67 100644 --- a/backend/tests/worker.rs +++ b/backend/tests/worker.rs @@ -18,8 +18,6 @@ use tokio::time::{timeout, Duration}; use windmill_api_client::types::{CreateFlowBody, RawScript}; -use sqlx::query; - #[cfg(feature = "enterprise")] use windmill_api_client::types::{EditSchedule, NewSchedule, ScriptArgs}; @@ -191,7 +189,6 @@ async fn set_jwt_secret() -> () { mod suspend_resume { use serde_json::json; - use sqlx::query_scalar; use super::*; @@ -202,11 +199,13 @@ mod suspend_resume { ) { loop { queue.by_ref().find(&flow).await.unwrap(); - if query_scalar("SELECT suspend > 0 FROM queue WHERE id = $1") - .bind(flow) - .fetch_one(db) - .await - .unwrap() + if sqlx::query_scalar!( + "SELECT suspend > 0 AS \"r!\" FROM v2_queue WHERE id = $1", + flow + ) + .fetch_one(db) + .await + .unwrap() { break; } @@ -360,7 +359,7 @@ mod suspend_resume { // ensure resumes are cleaned up through CASCADE when the flow is finished assert_eq!( 0, - query_scalar::<_, i64>("SELECT count(*) FROM resume_job") + sqlx::query_scalar!("SELECT count(*) AS \"count!\" FROM resume_job") .fetch_one(&db) .await .unwrap() @@ -927,7 +926,7 @@ impl RunJob { /* root job */ None, /* job_id */ None, /* is_flow_step */ false, - /* running */ false, + /* same_worker */ false, None, true, None, @@ -1049,11 +1048,11 @@ fn spawn_test_worker( } async fn listen_for_completed_jobs(db: &Pool) -> impl Stream + Unpin { - listen_for_uuid_on(db, "insert on completed_job").await + listen_for_uuid_on(db, "completed").await } async fn listen_for_queue(db: &Pool) -> impl Stream + Unpin { - listen_for_uuid_on(db, "queue").await + listen_for_uuid_on(db, "queued").await } async fn listen_for_uuid_on( @@ -1078,7 +1077,7 @@ async fn listen_for_uuid_on( async fn completed_job(uuid: Uuid, db: &Pool) -> CompletedJob { sqlx::query_as::<_, CompletedJob>( - "SELECT *, result->'wm_labels' as labels FROM completed_job WHERE id = $1", + "SELECT *, result->'wm_labels' as labels FROM v2_completed_job WHERE id = $1", ) .bind(uuid) .fetch_one(db) @@ -3190,11 +3189,13 @@ async fn test_script_schedule_handlers(db: Pool) { let uuid = uuid.unwrap().unwrap(); - let completed_job = - query!("SELECT script_path FROM completed_job WHERE id = $1", uuid) - .fetch_one(&db2) - .await - .unwrap(); + let completed_job = sqlx::query!( + "SELECT script_path FROM v2_completed_job WHERE id = $1", + uuid + ) + .fetch_one(&db2) + .await + .unwrap(); if completed_job.script_path.is_none() || completed_job.script_path != Some("f/system/schedule_error_handler".to_string()) @@ -3259,7 +3260,7 @@ async fn test_script_schedule_handlers(db: Pool) { let uuid = uuid.unwrap().unwrap(); let completed_job = - query!("SELECT script_path FROM completed_job WHERE id = $1", uuid) + sqlx::query!("SELECT script_path FROM v2_completed_job WHERE id = $1", uuid) .fetch_one(&db2) .await .unwrap(); @@ -3342,11 +3343,13 @@ async fn test_flow_schedule_handlers(db: Pool) { let uuid = uuid.unwrap().unwrap(); - let completed_job = - query!("SELECT script_path FROM completed_job WHERE id = $1", uuid) - .fetch_one(&db2) - .await - .unwrap(); + let completed_job = sqlx::query!( + "SELECT script_path FROM v2_completed_job WHERE id = $1", + uuid + ) + .fetch_one(&db2) + .await + .unwrap(); if completed_job.script_path.is_none() || completed_job.script_path != Some("f/system/schedule_error_handler".to_string()) @@ -3412,7 +3415,7 @@ async fn test_flow_schedule_handlers(db: Pool) { let uuid = uuid.unwrap().unwrap(); let completed_job = - query!("SELECT script_path FROM completed_job WHERE id = $1", uuid) + sqlx::query!("SELECT script_path FROM v2_completed_job WHERE id = $1", uuid) .fetch_one(&db2) .await .unwrap(); @@ -3487,7 +3490,7 @@ async fn run_deployed_relative_imports( async move { completed.next().await; // deployed script - let script = query!( + let script = sqlx::query!( "SELECT hash FROM script WHERE path = $1", "f/system/test_import".to_string() ) diff --git a/backend/windmill-api/src/apps.rs b/backend/windmill-api/src/apps.rs index a8cf4a5bdec2c..0740d18f7b47f 100644 --- a/backend/windmill-api/src/apps.rs +++ b/backend/windmill-api/src/apps.rs @@ -1794,7 +1794,7 @@ async fn check_if_allowed_to_access_s3_file_from_app( let allowed = opt_authed.is_some() || sqlx::query_scalar!( r#"SELECT EXISTS ( - SELECT 1 FROM completed_job + SELECT 1 FROM v2_completed_job WHERE workspace_id = $2 AND (job_kind = 'appscript' OR job_kind = 'preview') AND created_by = 'anonymous' diff --git a/backend/windmill-api/src/auth.rs b/backend/windmill-api/src/auth.rs index 64024f1ab12aa..71c62704778be 100644 --- a/backend/windmill-api/src/auth.rs +++ b/backend/windmill-api/src/auth.rs @@ -151,12 +151,16 @@ impl AuthCache { } } _ => { - let user_o = sqlx::query_as::<_, (Option, Option, bool, Option>, Option)>( - "UPDATE token SET last_used_at = now() WHERE token = $1 AND (expiration > NOW() \ - OR expiration IS NULL) AND (workspace_id IS NULL OR workspace_id = $2) RETURNING owner, email, super_admin, scopes, label", + let user_o = sqlx::query!( + "UPDATE token SET last_used_at = now() WHERE + token = $1 + AND (expiration > NOW() OR expiration IS NULL) + AND (workspace_id IS NULL OR workspace_id = $2) + RETURNING owner, email, super_admin, scopes, label", + token, + w_id.as_ref(), ) - .bind(token) - .bind(w_id.as_ref()) + .map(|x| (x.owner, x.email, x.super_admin, x.scopes, x.label)) .fetch_optional(&self.db) .await .ok() @@ -251,12 +255,13 @@ impl AuthCache { (_, Some(email), super_admin, scopes, label) => { let username_override = username_override_from_label(label); if w_id.is_some() { - let row_o = sqlx::query_as::<_, (String, bool, bool)>( - "SELECT username, is_admin, operator FROM usr where email = $1 AND \ - workspace_id = $2 AND disabled = false", + let row_o = sqlx::query!( + "SELECT username, is_admin, operator FROM usr WHERE + email = $1 AND workspace_id = $2 AND disabled = false", + &email, + w_id.as_ref().unwrap() ) - .bind(&email) - .bind(&w_id.as_ref().unwrap()) + .map(|x| (x.username, x.is_admin, x.operator)) .fetch_optional(&self.db) .await .unwrap_or(Some(("error".to_string(), false, false))); diff --git a/backend/windmill-api/src/concurrency_groups.rs b/backend/windmill-api/src/concurrency_groups.rs index 945df75573a04..8370dc29be135 100644 --- a/backend/windmill-api/src/concurrency_groups.rs +++ b/backend/windmill-api/src/concurrency_groups.rs @@ -157,22 +157,22 @@ async fn get_concurrent_intervals( let lq = ListCompletedQuery { order_desc: Some(true), ..lq }; let lqc = lq.clone(); let lqq: ListQueueQuery = lqc.into(); - let mut sqlb_q = SqlBuilder::select_from("queue") + let mut sqlb_q = SqlBuilder::select_from("v2_queue") .fields(UnifiedJob::queued_job_fields()) .order_by("created_at", lq.order_desc.unwrap_or(true)) .limit(row_limit) .clone(); - let mut sqlb_c = SqlBuilder::select_from("completed_job") + let mut sqlb_c = SqlBuilder::select_from("v2_completed_job") .fields(UnifiedJob::completed_job_fields()) .order_by("started_at", lq.order_desc.unwrap_or(true)) .limit(row_limit) .clone(); - let mut sqlb_q_user = SqlBuilder::select_from("queue") + let mut sqlb_q_user = SqlBuilder::select_from("v2_queue") .fields(&["id"]) .order_by("created_at", lq.order_desc.unwrap_or(true)) .limit(row_limit) .clone(); - let mut sqlb_c_user = SqlBuilder::select_from("completed_job") + let mut sqlb_c_user = SqlBuilder::select_from("v2_completed_job") .fields(&["id"]) .order_by("started_at", lq.order_desc.unwrap_or(true)) .limit(row_limit) diff --git a/backend/windmill-api/src/db.rs b/backend/windmill-api/src/db.rs index 6186b0ada1a42..53d9d2498c0ac 100644 --- a/backend/windmill-api/src/db.rs +++ b/backend/windmill-api/src/db.rs @@ -6,14 +6,16 @@ * LICENSE-AGPL for a copy of the license. */ -use futures::FutureExt; -use sqlx::Executor; +use std::sync::atomic::Ordering; +use std::time::Duration; +use futures::FutureExt; use sqlx::{ migrate::{Migrate, MigrateError}, pool::PoolConnection, - PgConnection, Pool, Postgres, + Executor, PgConnection, Pool, Postgres, }; + use windmill_audit::audit_ee::{AuditAuthor, AuditAuthorable}; use windmill_common::utils::generate_lock_id; use windmill_common::{ @@ -199,6 +201,24 @@ pub async fn migrate(db: &DB) -> Result<(), Error> { } }); + let db2 = db.clone(); + let _ = tokio::task::spawn(async move { + use windmill_common::worker::MIN_VERSION_IS_LATEST; + loop { + if !MIN_VERSION_IS_LATEST.load(Ordering::Relaxed) { + tokio::time::sleep(Duration::from_secs(30)).await; + continue; + } + if let Err(err) = v2_finalize(&db2).await { + tracing::error!("{err:#}: Could not apply v2 finalize migration, retry in 30s.."); + tokio::time::sleep(Duration::from_secs(30)).await; + continue; + } + tracing::info!("v2 finalization step successfully applied."); + break; + } + }); + Ok(()) } @@ -250,7 +270,7 @@ async fn fix_flow_versioning_migration( } macro_rules! run_windmill_migration { - ($migration_job_name:expr, $db:expr, $code:block) => { + ($migration_job_name:expr, $db:expr, |$tx:ident| $code:block) => { { let migration_job_name = $migration_job_name; let db: &Pool = $db; @@ -264,11 +284,11 @@ macro_rules! run_windmill_migration { .unwrap_or(false); if !has_done_migration { tracing::info!("Applying {migration_job_name} migration"); - let mut tx = db.begin().await?; + let mut $tx = db.begin().await?; let mut r = false; while !r { r = sqlx::query_scalar!("SELECT pg_try_advisory_lock(4242)") - .fetch_one(&mut *tx) + .fetch_one(&mut *$tx) .await .map_err(|e| { tracing::error!("Error acquiring {migration_job_name} lock: {e:#}"); @@ -298,7 +318,7 @@ macro_rules! run_windmill_migration { "INSERT INTO windmill_migrations (name) VALUES ($1) ON CONFLICT DO NOTHING", migration_job_name ) - .execute(&mut *tx) + .execute(&mut *$tx) .await?; tracing::info!("Finished applying {migration_job_name} migration"); } else { @@ -306,9 +326,9 @@ macro_rules! run_windmill_migration { } let _ = sqlx::query("SELECT pg_advisory_unlock(4242)") - .execute(&mut *tx) + .execute(&mut *$tx) .await?; - tx.commit().await?; + $tx.commit().await?; tracing::info!("released lock for {migration_job_name}"); } else { tracing::debug!("migration {migration_job_name} already done"); @@ -318,6 +338,109 @@ macro_rules! run_windmill_migration { }; } +async fn v2_finalize(db: &DB) -> Result<(), Error> { + run_windmill_migration!("v2_finalize_disable_sync", db, |tx| { + tx.execute( + r#" + CREATE OR REPLACE TRIGGER v2_queue_instead_of_update_trigger + INSTEAD OF UPDATE ON v2_queue + FOR EACH ROW + EXECUTE PROCEDURE v2_queue_instead_of_update(); + + CREATE OR REPLACE TRIGGER v2_completed_job_instead_of_update_trigger + INSTEAD OF UPDATE ON v2_completed_job + FOR EACH ROW + EXECUTE PROCEDURE v2_completed_job_instead_of_update(); + + DROP FUNCTION v2_queue_instead_of_update_overlay() CASCADE; + DROP FUNCTION v2_completed_job_instead_of_update_overlay() CASCADE; + DROP FUNCTION v2_job_completed_before_insert() CASCADE; + DROP FUNCTION v2_job_flow_runtime_before_insert() CASCADE; + DROP FUNCTION v2_job_flow_runtime_before_update() CASCADE; + DROP FUNCTION v2_job_queue_after_insert() CASCADE; + DROP FUNCTION v2_job_queue_before_insert() CASCADE; + DROP FUNCTION v2_job_queue_before_update() CASCADE; + DROP FUNCTION v2_job_runtime_before_insert() CASCADE; + DROP FUNCTION v2_job_runtime_before_update() CASCADE; + + DROP VIEW completed_job, completed_job_view, job, queue, queue_view CASCADE; + "#, + ) + .await?; + }); + run_windmill_migration!("v2_finalize_job_queue", db, |tx| { + tx.execute( + r#" + ALTER TABLE v2_job_queue + DROP COLUMN __parent_job CASCADE, + DROP COLUMN __created_by CASCADE, + DROP COLUMN __script_hash CASCADE, + DROP COLUMN __script_path CASCADE, + DROP COLUMN __args CASCADE, + DROP COLUMN __logs CASCADE, + DROP COLUMN __raw_code CASCADE, + DROP COLUMN __canceled CASCADE, + DROP COLUMN __last_ping CASCADE, + DROP COLUMN __job_kind CASCADE, + DROP COLUMN __env_id CASCADE, + DROP COLUMN __schedule_path CASCADE, + DROP COLUMN __permissioned_as CASCADE, + DROP COLUMN __flow_status CASCADE, + DROP COLUMN __raw_flow CASCADE, + DROP COLUMN __is_flow_step CASCADE, + DROP COLUMN __language CASCADE, + DROP COLUMN __same_worker CASCADE, + DROP COLUMN __raw_lock CASCADE, + DROP COLUMN __pre_run_error CASCADE, + DROP COLUMN __email CASCADE, + DROP COLUMN __visible_to_owner CASCADE, + DROP COLUMN __mem_peak CASCADE, + DROP COLUMN __root_job CASCADE, + DROP COLUMN __leaf_jobs CASCADE, + DROP COLUMN __concurrent_limit CASCADE, + DROP COLUMN __concurrency_time_window_s CASCADE, + DROP COLUMN __timeout CASCADE, + DROP COLUMN __flow_step_id CASCADE, + DROP COLUMN __cache_ttl CASCADE; + "#, + ) + .await?; + }); + run_windmill_migration!("v2_finalize_job_completed", db, |tx| { + tx.execute( + r#" + ALTER TABLE v2_job_completed + DROP COLUMN __parent_job CASCADE, + DROP COLUMN __created_by CASCADE, + DROP COLUMN __created_at CASCADE, + DROP COLUMN __success CASCADE, + DROP COLUMN __script_hash CASCADE, + DROP COLUMN __script_path CASCADE, + DROP COLUMN __args CASCADE, + DROP COLUMN __logs CASCADE, + DROP COLUMN __deleted CASCADE, + DROP COLUMN __raw_code CASCADE, + DROP COLUMN __canceled CASCADE, + DROP COLUMN __job_kind CASCADE, + DROP COLUMN __env_id CASCADE, + DROP COLUMN __schedule_path CASCADE, + DROP COLUMN __permissioned_as CASCADE, + DROP COLUMN __raw_flow CASCADE, + DROP COLUMN __is_flow_step CASCADE, + DROP COLUMN __language CASCADE, + DROP COLUMN __is_skipped CASCADE, + DROP COLUMN __raw_lock CASCADE, + DROP COLUMN __email CASCADE, + DROP COLUMN __visible_to_owner CASCADE, + DROP COLUMN __tag CASCADE, + DROP COLUMN __priority CASCADE; + "#, + ) + .await?; + }); + Ok(()) +} + async fn fix_job_completed_index(db: &DB) -> Result<(), Error> { // let has_done_migration = sqlx::query_scalar!( // "SELECT EXISTS(SELECT name FROM windmill_migrations WHERE name = 'fix_job_completed_index')" @@ -360,7 +483,7 @@ async fn fix_job_completed_index(db: &DB) -> Result<(), Error> { // tx.commit().await?; // } - run_windmill_migration!("fix_job_completed_index_2", &db, { + run_windmill_migration!("fix_job_completed_index_2", &db, |tx| { // sqlx::query( // "CREATE INDEX CONCURRENTLY IF NOT EXISTS ix_completed_job_workspace_id_created_at_new_2 ON completed_job (workspace_id, job_kind, success, is_skipped, is_flow_step, created_at DESC)" // ).execute(db).await?; @@ -380,7 +503,7 @@ async fn fix_job_completed_index(db: &DB) -> Result<(), Error> { .await?; }); - run_windmill_migration!("fix_job_completed_index_3", &db, { + run_windmill_migration!("fix_job_completed_index_3", &db, |tx| { sqlx::query("DROP INDEX CONCURRENTLY IF EXISTS index_completed_job_on_schedule_path") .execute(db) .await?; @@ -398,60 +521,60 @@ async fn fix_job_completed_index(db: &DB) -> Result<(), Error> { .await?; }); - run_windmill_migration!("fix_job_completed_index_4", &db, { + run_windmill_migration!("fix_job_index_1", &db, |tx| { let migration_job_name = "fix_job_completed_index_4"; let mut i = 1; tracing::info!("step {i} of {migration_job_name} migration"); - sqlx::query("create index concurrently if not exists ix_completed_job_workspace_id_created_at_new_3 ON completed_job (workspace_id, created_at DESC)") + sqlx::query!("create index concurrently if not exists ix_job_workspace_id_created_at_new_3 ON v2_job (workspace_id, created_at DESC)") .execute(db) .await?; i += 1; tracing::info!("step {i} of {migration_job_name} migration"); - sqlx::query("create index concurrently if not exists ix_completed_job_workspace_id_created_at_new_8 ON completed_job (workspace_id, created_at DESC) where job_kind in ('deploymentcallback') AND parent_job IS NULL") + sqlx::query!("create index concurrently if not exists ix_job_workspace_id_created_at_new_8 ON v2_job (workspace_id, created_at DESC) where kind in ('deploymentcallback') AND parent_job IS NULL") .execute(db) .await?; i += 1; tracing::info!("step {i} of {migration_job_name} migration"); - sqlx::query("create index concurrently if not exists ix_completed_job_workspace_id_created_at_new_9 ON completed_job (workspace_id, created_at DESC) where job_kind in ('dependencies', 'flowdependencies', 'appdependencies') AND parent_job IS NULL") + sqlx::query!("create index concurrently if not exists ix_job_workspace_id_created_at_new_9 ON v2_job (workspace_id, created_at DESC) where kind in ('dependencies', 'flowdependencies', 'appdependencies') AND parent_job IS NULL") .execute(db) .await?; i += 1; tracing::info!("step {i} of {migration_job_name} migration"); - sqlx::query("create index concurrently if not exists ix_completed_job_workspace_id_created_at_new_5 ON completed_job (workspace_id, created_at DESC) where job_kind in ('preview', 'flowpreview') AND parent_job IS NULL") + sqlx::query!("create index concurrently if not exists ix_job_workspace_id_created_at_new_5 ON v2_job (workspace_id, created_at DESC) where kind in ('preview', 'flowpreview') AND parent_job IS NULL") .execute(db) .await?; i += 1; tracing::info!("step {i} of {migration_job_name} migration"); - sqlx::query("create index concurrently if not exists ix_completed_job_workspace_id_created_at_new_6 ON completed_job (workspace_id, created_at DESC) where job_kind in ('script', 'flow') AND parent_job IS NULL") + sqlx::query!("create index concurrently if not exists ix_job_workspace_id_created_at_new_6 ON v2_job (workspace_id, created_at DESC) where kind in ('script', 'flow') AND parent_job IS NULL") .execute(db) .await?; i += 1; tracing::info!("step {i} of {migration_job_name} migration"); - sqlx::query("create index concurrently if not exists ix_completed_job_workspace_id_created_at_new_7 ON completed_job (workspace_id, success, created_at DESC) where job_kind in ('script', 'flow') AND parent_job IS NULL") + sqlx::query!("create index concurrently if not exists ix_job_workspace_id_created_at_new_7 ON v2_job (workspace_id, created_at DESC) where kind in ('script', 'flow') AND parent_job IS NULL") .execute(db) .await?; i += 1; tracing::info!("step {i} of {migration_job_name} migration"); - sqlx::query("create index concurrently if not exists ix_completed_job_workspace_id_started_at_new_2 ON completed_job (workspace_id, started_at DESC)") + sqlx::query!("create index concurrently if not exists ix_completed_job_workspace_id_started_at_new_2 ON v2_job_completed (workspace_id, started_at DESC)") .execute(db) .await?; i += 1; tracing::info!("step {i} of {migration_job_name} migration"); - sqlx::query("create index concurrently if not exists root_job_index_by_path_2 ON completed_job (workspace_id, script_path, created_at desc) WHERE parent_job IS NULL") + sqlx::query!("create index concurrently if not exists root_job_index_by_path_2 ON v2_job (workspace_id, runnable_path, created_at desc) WHERE parent_job IS NULL") .execute(db) .await?; i += 1; tracing::info!("step {i} of {migration_job_name} migration"); - sqlx::query("create index concurrently if not exists ix_completed_job_created_at ON completed_job (created_at DESC)") + sqlx::query!("create index concurrently if not exists ix_job_created_at ON v2_job (created_at DESC)") .execute(db) .await?; @@ -479,13 +602,13 @@ async fn fix_job_completed_index(db: &DB) -> Result<(), Error> { .await?; }); - run_windmill_migration!("fix_labeled_jobs_index", &db, { + run_windmill_migration!("fix_labeled_jobs_index", &db, |tx| { tracing::info!("Special migration to add index concurrently on job labels 2"); sqlx::query!("DROP INDEX CONCURRENTLY IF EXISTS labeled_jobs_on_jobs") .execute(db) .await?; sqlx::query!( - "CREATE INDEX CONCURRENTLY labeled_jobs_on_jobs ON completed_job USING GIN ((result -> 'wm_labels')) WHERE result ? 'wm_labels'" + "CREATE INDEX CONCURRENTLY labeled_jobs_on_jobs ON v2_job_completed USING GIN ((result -> 'wm_labels')) WHERE result ? 'wm_labels'" ).execute(db).await?; }); diff --git a/backend/windmill-api/src/flows.rs b/backend/windmill-api/src/flows.rs index 1b283d43f361e..b4ab8a772dcc4 100644 --- a/backend/windmill-api/src/flows.rs +++ b/backend/windmill-api/src/flows.rs @@ -913,7 +913,7 @@ async fn update_flow( })?; if let Some(old_dep_job) = old_dep_job { sqlx::query!( - "UPDATE queue SET canceled = true WHERE id = $1", + "UPDATE v2_queue SET canceled = true WHERE id = $1", old_dep_job ) .execute(&mut *new_tx) diff --git a/backend/windmill-api/src/inputs.rs b/backend/windmill-api/src/inputs.rs index 1271be1620a39..cc7ddc22e27e1 100644 --- a/backend/windmill-api/src/inputs.rs +++ b/backend/windmill-api/src/inputs.rs @@ -133,7 +133,7 @@ async fn get_input_history( let mut tx = user_db.begin(&authed).await?; let sql = &format!( - "select id, created_at, created_by, 'null'::jsonb as args, success from completed_job \ + "select id, created_at, created_by, 'null'::jsonb as args, success from v2_completed_job \ where {} = $1 and job_kind = any($2) and workspace_id = $3 \ order by created_at desc limit $4 offset $5", r.runnable_type.column_name() @@ -213,7 +213,7 @@ async fn get_args_from_history_or_saved_input( .await? } else { sqlx::query_scalar!( - "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM completed_job WHERE id = $1 AND workspace_id = $2", + "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", job_or_input_id, w_id, g.allow_large.unwrap_or(true) @@ -223,7 +223,7 @@ async fn get_args_from_history_or_saved_input( } } else { sqlx::query_scalar!( - "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM completed_job WHERE id = $1 AND workspace_id = $2 UNION ALL SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM input WHERE id = $1 AND workspace_id = $2", + "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM v2_job WHERE id = $1 AND workspace_id = $2", job_or_input_id, w_id, g.allow_large.unwrap_or(true) diff --git a/backend/windmill-api/src/job_metrics.rs b/backend/windmill-api/src/job_metrics.rs index 2af59383a89af..a763908ce9949 100644 --- a/backend/windmill-api/src/job_metrics.rs +++ b/backend/windmill-api/src/job_metrics.rs @@ -171,7 +171,7 @@ async fn set_job_progress( if let Some(flow_job_id) = flow_job_id { // TODO: Return error if trying to set completed job? sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['modules', flow_status->>'step', 'progress'], $1) WHERE id = $2", serde_json::json!(percent.clamp(0, 99)), diff --git a/backend/windmill-api/src/jobs.rs b/backend/windmill-api/src/jobs.rs index 6e88f76ca7d3d..b30df055bf722 100644 --- a/backend/windmill-api/src/jobs.rs +++ b/backend/windmill-api/src/jobs.rs @@ -8,12 +8,12 @@ use axum::body::Body; use axum::http::HeaderValue; -use futures::TryFutureExt; use itertools::Itertools; use quick_cache::sync::Cache; use serde_json::value::RawValue; use sqlx::Pool; use std::collections::HashMap; +use std::iter; use std::ops::{Deref, DerefMut}; #[cfg(feature = "prometheus")] use std::sync::atomic::Ordering; @@ -85,8 +85,8 @@ use windmill_common::{METRICS_DEBUG_ENABLED, METRICS_ENABLED}; use windmill_common::{get_latest_deployed_hash_for_path, BASE_URL}; use windmill_queue::{ - cancel_job, get_queued_job, get_result_and_success_by_id_from_flow, job_is_complete, push, - PushArgs, PushArgsOwned, PushIsolationLevel, + cancel_job, get_result_and_success_by_id_from_flow, job_is_complete, push, PushArgs, + PushArgsOwned, PushIsolationLevel, RawJob, }; #[cfg(feature = "prometheus")] @@ -334,16 +334,21 @@ async fn get_root_job( Ok(Json(res)) } -async fn compute_root_job_for_flow(db: &DB, w_id: &str, job_id: Uuid) -> error::Result { - let mut job = get_queued_job(&job_id, w_id, db).await?; - while let Some(j) = job { - if let Some(uuid) = j.parent_job { - job = get_queued_job(&uuid, w_id, db).await?; - } else { - return Ok(j.id.to_string()); +async fn compute_root_job_for_flow(db: &DB, w_id: &str, mut job_id: Uuid) -> error::Result { + // TODO: use `root_job` ? + loop { + job_id = match sqlx::query_scalar!( + "SELECT parent_job FROM v2_queue WHERE id = $1 AND workspace_id = $2", + job_id, + w_id + ) + .fetch_one(db) + .await + { + Ok(Some(job_id)) => job_id, + _ => return Ok(job_id.to_string()), } } - Ok(job_id.to_string()) } async fn get_db_clock(Extension(db): Extension) -> windmill_common::error::JsonResult { @@ -600,7 +605,7 @@ async fn get_flow_job_debug_info( let mut job_ids = vec![]; let jobs_with_root = sqlx::query_scalar!( - "SELECT id FROM queue WHERE workspace_id = $1 and root_job = $2", + "SELECT id AS \"id!\" FROM v2_queue WHERE workspace_id = $1 and root_job = $2", &w_id, &id, ) @@ -670,23 +675,23 @@ async fn get_job( } macro_rules! get_job_query { - ("completed_job_view", $($opts:tt)*) => { + ("v2_completed_job", $($opts:tt)*) => { get_job_query!( - @impl "completed_job_view", ($($opts)*), + @impl "v2_completed_job", ($($opts)*), "duration_ms, success, result, deleted, is_skipped, result->'wm_labels' as labels, \ CASE WHEN result is null or pg_column_size(result) < 90000 THEN result ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as result", ) }; - ("queue_view", $($opts:tt)*) => { + ("v2_queue", $($opts:tt)*) => { get_job_query!( - @impl "queue_view", ($($opts)*), + @impl "v2_queue", ($($opts)*), "scheduled_for, running, last_ping, suspend, suspend_until, same_worker, pre_run_error, visible_to_owner, \ root_job, leaf_jobs, concurrent_limit, concurrency_time_window_s, timeout, flow_step_id, cache_ttl", ) }; (@impl $table:literal, (with_logs: $with_logs:expr, $($rest:tt)*), $additional_fields:literal, $($args:tt)*) => { if $with_logs { - get_job_query!(@impl $table, ($($rest)*), $additional_fields, logs = const_format::formatcp!("right({}.logs, 20000)", $table), $($args)*) + get_job_query!(@impl $table, ($($rest)*), $additional_fields, logs = "right(job_logs.logs, 20000)", $($args)*) } else { get_job_query!(@impl $table, ($($rest)*), $additional_fields, logs = "null", $($args)*) } @@ -713,7 +718,7 @@ macro_rules! get_job_query { {logs} as logs, {code} as raw_code, canceled, canceled_by, canceled_reason, job_kind, \ schedule_path, permissioned_as, flow_status, {flow} as raw_flow, is_flow_step, language, \ {lock} as raw_lock, email, visible_to_owner, mem_peak, tag, priority, {additional_fields} \ - FROM {table} \ + FROM {table} LEFT JOIN job_logs ON id = job_id \ WHERE id = $1 AND {table}.workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3)) LIMIT 1", table = $table, additional_fields = $additional_fields, @@ -787,38 +792,26 @@ impl<'a> GetQuery<'a> { async fn resolve_raw_values( &self, db: &DB, - id: Uuid, kind: JobKind, - hash: Option, + runnable_id: Option, job: &mut JobExtended, ) { - let (raw_code, raw_lock, raw_flow) = ( - job.raw_code.take(), - job.raw_lock.take(), - job.raw_flow.take(), - ); - if self.with_flow { - // Try to fetch the flow from the cache, fallback to the preview flow. - // NOTE: This could check for the job kinds instead of the `or_else` but it's not - // necessary as `fetch_flow` return early if the job kind is not a preview one. - cache::job::fetch_flow(db, kind, hash) - .or_else(|_| cache::job::fetch_preview_flow(db, &id, raw_flow)) + job.raw_flow = match job.raw_flow.take() { + _ if !self.with_flow => None, + raw_flow => cache::job::fetch_flow(db, kind, runnable_id, raw_flow) .await .ok() - .inspect(|data| job.raw_flow = Some(sqlx::types::Json(data.raw_flow.clone()))); - } - if self.with_code { - // Try to fetch the code from the cache, fallback to the preview code. - // NOTE: This could check for the job kinds instead of the `or_else` but it's not - // necessary as `fetch_script` return early if the job kind is not a preview one. - cache::job::fetch_script(db, kind, hash) - .or_else(|_| cache::job::fetch_preview_script(db, &id, raw_lock, raw_code)) + .map(|data| sqlx::types::Json(data.raw_flow.clone())), + }; + (job.raw_code, job.raw_lock) = match (job.raw_code.take(), job.raw_lock.take()) { + _ if !self.with_code => (None, None), + (Some(raw_code), raw_lock) => (Some(raw_code), raw_lock), + (_, _) => cache::job::fetch_script(db, kind, runnable_id) .await .ok() - .inspect(|data| { - (job.raw_lock, job.raw_code) = (data.lock.clone(), Some(data.code.clone())) - }); - } + .map(|data| (Some(data.code.clone()), data.lock.clone())) + .unwrap_or_default(), + }; } async fn fetch_queued( @@ -827,7 +820,7 @@ impl<'a> GetQuery<'a> { job_id: Uuid, workspace_id: &str, ) -> error::Result>> { - let query = get_job_query!("queue_view", + let query = get_job_query!("v2_queue", with_logs: self.with_logs, with_code: self.with_code, with_flow: self.with_flow, @@ -841,7 +834,7 @@ impl<'a> GetQuery<'a> { self.check_auth(job.as_ref().map(|job| job.created_by.as_str()))?; if let Some(job) = job.as_mut() { - self.resolve_raw_values(db, job.id, job.job_kind, job.script_hash, job) + self.resolve_raw_values(db, job.job_kind, job.script_hash.map(|x| x.0), job) .await; } if self.with_flow { @@ -859,7 +852,7 @@ impl<'a> GetQuery<'a> { job_id: Uuid, workspace_id: &str, ) -> error::Result>> { - let query = get_job_query!("completed_job_view", + let query = get_job_query!("v2_completed_job", with_logs: self.with_logs, with_code: self.with_code, with_flow: self.with_flow, @@ -873,7 +866,7 @@ impl<'a> GetQuery<'a> { self.check_auth(cjob.as_ref().map(|job| job.created_by.as_str()))?; if let Some(job) = cjob.as_mut() { - self.resolve_raw_values(db, job.id, job.job_kind, job.script_hash, job) + self.resolve_raw_values(db, job.job_kind, job.script_hash.map(|x| x.0), job) .await; } if self.with_flow { @@ -1007,10 +1000,10 @@ async fn get_job_logs( .flatten(); let record = sqlx::query!( - "SELECT created_by, CONCAT(coalesce(completed_job.logs, ''), coalesce(job_logs.logs, '')) as logs, job_logs.log_offset, job_logs.log_file_index - FROM completed_job - LEFT JOIN job_logs ON job_logs.job_id = completed_job.id - WHERE completed_job.id = $1 AND completed_job.workspace_id = $2 AND ($3::text[] IS NULL OR completed_job.tag = ANY($3))", + "SELECT created_by AS \"created_by!\", CONCAT(coalesce(v2_completed_job.logs, ''), coalesce(job_logs.logs, '')) as logs, job_logs.log_offset, job_logs.log_file_index + FROM v2_completed_job + LEFT JOIN job_logs ON job_logs.job_id = v2_completed_job.id + WHERE v2_completed_job.id = $1 AND v2_completed_job.workspace_id = $2 AND ($3::text[] IS NULL OR v2_completed_job.tag = ANY($3))", id, w_id, tags.as_ref().map(|v| v.as_slice()) @@ -1044,10 +1037,10 @@ async fn get_job_logs( Ok(content_plain(Body::from(logs))) } else { let text = sqlx::query!( - "SELECT created_by, CONCAT(coalesce(queue.logs, ''), coalesce(job_logs.logs, '')) as logs, coalesce(job_logs.log_offset, 0) as log_offset, job_logs.log_file_index - FROM queue - LEFT JOIN job_logs ON job_logs.job_id = queue.id - WHERE queue.id = $1 AND queue.workspace_id = $2 AND ($3::text[] IS NULL OR queue.tag = ANY($3))", + "SELECT created_by AS \"created_by!\", CONCAT(coalesce(v2_queue.logs, ''), coalesce(job_logs.logs, '')) as logs, coalesce(job_logs.log_offset, 0) as log_offset, job_logs.log_file_index + FROM v2_queue + LEFT JOIN job_logs ON job_logs.job_id = v2_queue.id + WHERE v2_queue.id = $1 AND v2_queue.workspace_id = $2 AND ($3::text[] IS NULL OR v2_queue.tag = ANY($3))", id, w_id, tags.as_ref().map(|v| v.as_slice()) @@ -1085,29 +1078,23 @@ async fn get_job_logs( } } -#[derive(FromRow)] -pub struct RawArgs { - pub args: Option>>, - pub created_by: String, -} - async fn get_args( OptAuthed(opt_authed): OptAuthed, Extension(db): Extension, Path((w_id, id)): Path<(String, Uuid)>, -) -> error::JsonResult> { +) -> JsonResult> { let tags = opt_authed .as_ref() .map(|authed| get_scope_tags(authed)) .flatten(); - let record = sqlx::query_as::<_, RawArgs>( - "SELECT created_by, args - FROM completed_job - WHERE completed_job.id = $1 AND completed_job.workspace_id = $2 AND ($3::text[] IS NULL OR completed_job.tag = ANY($3))", + let record = sqlx::query!( + "SELECT created_by AS \"created_by!\", args as \"args: sqlx::types::Json>\" + FROM v2_completed_job + WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + id, + &w_id, + tags.as_ref().map(|v| v.as_slice()) as Option<&[&str]>, ) - .bind(&id) - .bind(&w_id) - .bind(tags.as_ref().map(|v| v.as_slice())) .fetch_optional(&db) .await?; @@ -1122,14 +1109,14 @@ async fn get_args( Ok(Json(record.args.map(|x| x.0).unwrap_or_default())) } else { - let record = sqlx::query_as::<_, RawArgs>( - "SELECT created_by, args - FROM queue - WHERE queue.id = $1 AND queue.workspace_id = $2 AND ($3::text[] IS NULL OR queue.tag = ANY($3))", + let record = sqlx::query!( + "SELECT created_by AS \"created_by!\", args as \"args: sqlx::types::Json>\" + FROM v2_queue + WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + id, + &w_id, + tags.as_ref().map(|v| v.as_slice()) as Option<&[&str]>, ) - .bind(&id) - .bind(&w_id) - .bind(tags.as_ref().map(|v| v.as_slice())) .fetch_optional(&db) .await?; let record = not_found_if_none(record, "Job Args", id.to_string())?; @@ -1397,7 +1384,7 @@ pub fn list_queue_jobs_query( tags: Option>, ) -> SqlBuilder { let (limit, offset) = paginate_without_limits(pagination); - let mut sqlb = SqlBuilder::select_from("queue") + let mut sqlb = SqlBuilder::select_from("v2_queue") .fields(fields) .order_by("created_at", lq.order_desc.unwrap_or(true)) .limit(limit) @@ -1485,82 +1472,46 @@ async fn cancel_jobs( w_id: &str, ) -> error::JsonResult> { let mut uuids = vec![]; - let mut tx = db.begin().await?; - let trivial_jobs = sqlx::query!("INSERT INTO completed_job AS cj - ( workspace_id - , id - , parent_job - , created_by - , created_at - , started_at - , duration_ms - , success - , script_hash - , script_path - , args - , result - , raw_code - , raw_lock - , canceled - , canceled_by - , canceled_reason - , job_kind - , schedule_path - , permissioned_as - , flow_status - , raw_flow - , is_flow_step - , is_skipped - , language - , email - , visible_to_owner - , mem_peak - , tag - , priority - ) - SELECT workspace_id - , id - , parent_job - , created_by - , created_at - , now() - , 0 - , false - , script_hash - , script_path - , args - , $4 - , raw_code - , raw_lock - , true - , $1 - , canceled_reason - , job_kind - , schedule_path - , permissioned_as - , flow_status - , raw_flow - , is_flow_step - , false - , language - , email - , visible_to_owner - , mem_peak - , tag - , priority FROM queue - WHERE id = any($2) AND running = false AND parent_job IS NULL AND workspace_id = $3 AND schedule_path IS NULL FOR UPDATE SKIP LOCKED - ON CONFLICT (id) DO NOTHING RETURNING id", username, &jobs, w_id, serde_json::json!({"error": { "message": format!("Job canceled: cancel all by {username}"), "name": "Canceled", "reason": "cancel all", "canceler": username}})) - .fetch_all(&mut *tx) - .await?.into_iter().map(|x| x.id).collect::>(); - sqlx::query!( - "DELETE FROM queue WHERE id = any($1) AND workspace_id = $2", - &trivial_jobs, - w_id + let trivial_jobs = sqlx::query_scalar!( + "WITH queued AS ( + DELETE FROM v2_job_queue q + USING v2_job j + WHERE + q.id = ANY ($2) AND q.id = j.id + AND q.running = false AND j.parent_job IS NULL AND q.workspace_id = $3 + AND j.schedule_path IS NULL + RETURNING + q.id, q.workspace_id, q.started_at, q.worker + ), queued_and_runtime AS ( + SELECT queued.*, memory_peak, flow_status + FROM queued + JOIN v2_job_runtime USING (id) + LEFT JOIN v2_job_flow_runtime USING (id) + ) INSERT INTO v2_job_completed ( + id, workspace_id, + duration_ms, result, canceled_by, canceled_reason, status, + flow_status, started_at, memory_peak, worker + ) SELECT + id, workspace_id, + 0, $4, $1, 'cancel all', 'canceled'::job_status, + flow_status, started_at, memory_peak, worker + FROM queued_and_runtime + ON CONFLICT (id) DO NOTHING RETURNING id", + username, + &jobs, + w_id, + serde_json::json!({ + "error": { + "message": format!("Job canceled: cancel all by {username}"), + "name": "Canceled", + "reason": "cancel all", + "canceler": username + } + }) ) - .execute(&mut *tx) + .fetch_all(db) .await?; - tx.commit().await?; // sqlx::query!( // "UPDATE queue SET canceled = true, canceled_by = $1, canceled_reason = 'cancelled all by user' WHERE id IN (SELECT id FROM queue where id = any($2) AND workspace_id = $3 AND schedule_path IS NULL FOR UPDATE SKIP LOCKED) RETURNING id", @@ -1622,7 +1573,7 @@ async fn cancel_selection( let mut tx = user_db.begin(&authed).await?; let tags = get_scope_tags(&authed).map(|v| v.iter().map(|s| s.to_string()).collect_vec()); let jobs_to_cancel = sqlx::query_scalar!( - "SELECT id FROM queue WHERE id = ANY($1) AND schedule_path IS NULL AND ($2::text[] IS NULL OR tag = ANY($2))", + "SELECT id AS \"id!\" FROM v2_queue WHERE id = ANY($1) AND schedule_path IS NULL AND ($2::text[] IS NULL OR tag = ANY($2))", &jobs, tags.as_ref().map(|v| v.as_slice()) ) @@ -1642,7 +1593,7 @@ async fn list_filtered_uuids( ) -> error::JsonResult> { require_admin(authed.is_admin, &authed.username)?; - let mut sqlb = SqlBuilder::select_from("queue").fields(&["id"]).clone(); + let mut sqlb = SqlBuilder::select_from("v2_queue").fields(&["id"]).clone(); sqlb = join_concurrency_key(lq.concurrency_key.as_ref(), sqlb); @@ -1679,7 +1630,7 @@ async fn count_queue_jobs( Ok(Json( sqlx::query_as!( QueueStats, - "SELECT coalesce(COUNT(*) FILTER(WHERE suspend = 0 AND running = false), 0) as \"database_length!\", coalesce(COUNT(*) FILTER(WHERE suspend > 0), 0) as \"suspended!\" FROM queue WHERE (workspace_id = $1 OR $2) AND scheduled_for <= now()", + "SELECT coalesce(COUNT(*) FILTER(WHERE suspend = 0 AND running = false), 0) as \"database_length!\", coalesce(COUNT(*) FILTER(WHERE suspend > 0), 0) as \"suspended!\" FROM v2_queue WHERE (workspace_id = $1 OR $2) AND scheduled_for <= now()", w_id, w_id == "admins" && cq.all_workspaces.unwrap_or(false), ) @@ -1701,7 +1652,7 @@ async fn count_completed_jobs_detail( Path(w_id): Path, Query(query): Query, ) -> error::JsonResult { - let mut sqlb = SqlBuilder::select_from("completed_job"); + let mut sqlb = SqlBuilder::select_from("v2_completed_job"); sqlb.field("COUNT(*) as count"); if !query.all_workspaces.unwrap_or(false) { @@ -1743,7 +1694,7 @@ async fn count_completed_jobs( Ok(Json( sqlx::query_as!( QueueStats, - "SELECT coalesce(COUNT(*), 0) as \"database_length!\", null::bigint as suspended FROM completed_job WHERE workspace_id = $1", + "SELECT coalesce(COUNT(*), 0) as \"database_length!\", null::bigint as suspended FROM v2_completed_job WHERE workspace_id = $1", w_id ) .fetch_one(&db) @@ -1957,7 +1908,7 @@ async fn resume_suspended_job_internal( if !approved { sqlx::query!( - "UPDATE queue SET suspend = 0 WHERE id = $1", + "UPDATE v2_queue SET suspend = 0 WHERE id = $1", parent_flow_info.id ) .execute(&mut *tx) @@ -2029,7 +1980,7 @@ async fn resume_immediately_if_relevant<'c>( if matches!(status.current_step(), Some(FlowStatusModule::WaitingForEvents { job, .. }) if job == &job_id) { sqlx::query!( - "UPDATE queue SET suspend = $1 WHERE id = $2", + "UPDATE v2_queue SET suspend = $1 WHERE id = $2", suspend, flow.id, ) @@ -2080,9 +2031,11 @@ async fn get_suspended_parent_flow_info(job_id: Uuid, db: &DB) -> error::Result< let flow = sqlx::query_as!( FlowInfo, r#" - SELECT id, flow_status, suspend, script_path - FROM queue - WHERE id = ( SELECT parent_job FROM queue WHERE id = $1 UNION ALL SELECT parent_job FROM completed_job WHERE id = $1) + SELECT q.id, f.flow_status, q.suspend, j.runnable_path AS script_path + FROM v2_job_queue q + JOIN v2_job j USING (id) + JOIN v2_job_flow_runtime f USING (id) + WHERE id = ( SELECT parent_job FROM v2_job WHERE id = $1 ) FOR UPDATE "#, job_id, @@ -2100,8 +2053,8 @@ async fn get_suspended_flow_info<'c>( let flow = sqlx::query_as!( FlowInfo, r#" - SELECT id, flow_status, suspend, script_path - FROM queue + SELECT id AS "id!", flow_status, suspend AS "suspend!", script_path + FROM v2_queue WHERE id = $1 "#, job_id, @@ -2160,11 +2113,7 @@ pub async fn get_suspended_job_flow( let flow_id = sqlx::query_scalar!( r#" SELECT parent_job - FROM queue - WHERE id = $1 AND workspace_id = $2 - UNION ALL - SELECT parent_job - FROM completed_job + FROM v2_job WHERE id = $1 AND workspace_id = $2 "#, job, @@ -2301,7 +2250,7 @@ pub async fn get_flow_user_state( let r = sqlx::query_scalar!( r#" SELECT flow_status->'user_states'->$1 - FROM queue + FROM v2_queue WHERE id = $2 AND workspace_id = $3 "#, key, @@ -2323,7 +2272,7 @@ pub async fn set_flow_user_state( let mut tx = user_db.begin(&authed).await?; let r = sqlx::query_scalar!( r#" - UPDATE queue SET flow_status = JSONB_SET(flow_status, ARRAY['user_states'], JSONB_SET(COALESCE(flow_status->'user_states', '{}'::jsonb), ARRAY[$1], $2)) + UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['user_states'], JSONB_SET(COALESCE(flow_status->'user_states', '{}'::jsonb), ARRAY[$1], $2)) WHERE id = $3 AND workspace_id = $4 AND job_kind IN ('flow', 'flowpreview', 'flownode') RETURNING 1 "#, key, @@ -2762,7 +2711,7 @@ impl<'a> From for Job { parent_job: uj.parent_job, created_by: uj.created_by, created_at: uj.created_at, - started_at: uj.started_at.unwrap_or(uj.created_at), + started_at: uj.started_at, duration_ms: uj.duration_ms.unwrap(), success: uj.success.unwrap(), script_hash: uj.script_hash, @@ -3134,11 +3083,15 @@ pub async fn restart_flow( check_license_key_valid().await?; let mut tx = user_db.clone().begin(&authed).await?; - let completed_job = sqlx::query_as::<_, CompletedJob>( - "SELECT *, result->'wm_labels' as labels from completed_job WHERE id = $1 and workspace_id = $2", + let completed_job = sqlx::query!( + "SELECT + script_path, args AS \"args: sqlx::types::Json>>\", + tag AS \"tag!\", priority + FROM v2_completed_job + WHERE id = $1 and workspace_id = $2", + job_id, + &w_id, ) - .bind(job_id) - .bind(&w_id) .fetch_optional(&mut *tx) .await? .with_context(|| "Unable to find completed job with the given job UUID")?; @@ -3164,11 +3117,7 @@ pub async fn restart_flow( &db, tx, &w_id, - JobPayload::RestartedFlow { - completed_job_id: job_id, - step_id: step_id, - branch_or_iteration_n: branch_or_iteration_n, - }, + JobPayload::RestartedFlow { completed_job_id: job_id, step_id, branch_or_iteration_n }, push_args, &authed.username, &authed.email, @@ -3438,7 +3387,7 @@ pub async fn run_workflow_as_code( if !wkflow_query.skip_update.unwrap_or(false) { sqlx::query!( - "UPDATE queue SET flow_status = jsonb_set(COALESCE(flow_status, '{}'::jsonb), array[$1], jsonb_set(jsonb_set('{}'::jsonb, '{scheduled_for}', to_jsonb(now()::text)), '{name}', to_jsonb($4::text))) WHERE id = $2 AND workspace_id = $3", + "UPDATE v2_queue SET flow_status = jsonb_set(COALESCE(flow_status, '{}'::jsonb), array[$1], jsonb_set(jsonb_set('{}'::jsonb, '{scheduled_for}', to_jsonb(now()::text)), '{name}', to_jsonb($4::text))) WHERE id = $2 AND workspace_id = $3", uuid.to_string(), job_id, w_id, @@ -3569,11 +3518,17 @@ pub async fn run_wait_result( } if result.is_none() { - let row = sqlx::query_as::<_, RawResultWithSuccess>( - "SELECT '' as created_by, result, language, flow_status, success FROM completed_job WHERE id = $1 AND workspace_id = $2", + let row = sqlx::query!( + "SELECT + result AS \"result: sqlx::types::Json>\", + language AS \"language: ScriptLang\", + flow_status AS \"flow_status: sqlx::types::Json>\", + success AS \"success!\" + FROM v2_completed_job + WHERE id = $1 AND workspace_id = $2", + uuid, + &w_id ) - .bind(uuid) - .bind(&w_id) .fetch_optional(db) .await?; if let Some(mut raw_result) = row { @@ -3682,7 +3637,7 @@ pub async fn run_wait_result( async fn delete_job_metadata_after_use(db: &DB, job_uuid: Uuid) -> Result<(), Error> { sqlx::query!( - "UPDATE completed_job + "UPDATE v2_completed_job SET logs = '##DELETED##', args = '{}'::jsonb, result = '{}'::jsonb WHERE id = $1", job_uuid, @@ -3703,7 +3658,7 @@ async fn delete_job_metadata_after_use(db: &DB, job_uuid: Uuid) -> Result<(), Er pub async fn check_queue_too_long(db: &DB, queue_limit: Option) -> error::Result<()> { if let Some(limit) = queue_limit { let count = sqlx::query_scalar!( - "SELECT COUNT(*) FROM queue WHERE canceled = false AND (scheduled_for <= now() + "SELECT COUNT(*) FROM v2_queue WHERE canceled = false AND (scheduled_for <= now() OR (suspend_until IS NOT NULL AND ( suspend <= 0 OR suspend_until <= now())))", @@ -4657,7 +4612,7 @@ async fn add_batch_jobs( dedicated_worker, custom_concurrency_key, concurrent_limit, - concurrent_time_window_s, + concurrency_time_window_s, timeout, raw_code, raw_lock, @@ -4810,51 +4765,32 @@ async fn add_batch_jobs( }; let mut tx = user_db.begin(&authed).await?; - - let uuids = sqlx::query_scalar!( - r#"WITH uuid_table as ( - select gen_random_uuid() as uuid from generate_series(1, $5) - ) - INSERT INTO job - (id, workspace_id, raw_code, raw_lock, raw_flow) - (SELECT uuid, $1, $2, $3, $4 FROM uuid_table) - RETURNING id"#, - w_id, - raw_code, - raw_lock, - raw_flow.map(sqlx::types::Json) as Option>, - n - ) - .fetch_all(&mut *tx) + let uuids = Vec::from_iter(iter::repeat_with(|| ulid::Ulid::new().into()).take(n as usize)); + let args = uuids + .iter() + .map(|uuid| sqlx::types::Json(to_raw_value(&serde_json::json!({ "uuid": uuid })))) + .collect::>(); + tx = RawJob { + created_by: &authed.username, + permissioned_as: &username_to_permissioned_as(&authed.username), + permissioned_as_email: &authed.email, + kind: job_kind, + runnable_id: hash.map(|h| h.0), + runnable_path: path.as_deref(), + script_lang: Some(language), + tag: &tag, + concurrent_limit, + concurrency_time_window_s, + timeout, + raw_code: raw_code.as_deref(), + raw_lock: raw_lock.as_deref(), + raw_flow: raw_flow.as_ref(), + flow_status: flow_status.as_ref(), + ..RawJob::default() + } + .push_many(tx, &uuids, &w_id, &args) .await?; - let uuids = sqlx::query_scalar!( - r#"WITH uuid_table as ( - select unnest($11::uuid[]) as uuid - ) - INSERT INTO queue - (id, script_hash, script_path, job_kind, language, args, tag, created_by, permissioned_as, email, scheduled_for, workspace_id, concurrent_limit, concurrency_time_window_s, timeout, flow_status) - (SELECT uuid, $1, $2, $3, $4, ('{ "uuid": "' || uuid || '" }')::jsonb, $5, $6, $7, $8, $9, $10, $12, $13, $14, $15 FROM uuid_table) - RETURNING id"#, - hash.map(|h| h.0), - path, - job_kind.clone() as JobKind, - language as ScriptLang, - tag, - authed.username, - username_to_permissioned_as(&authed.username), - authed.email, - Utc::now(), - w_id, - &uuids, - concurrent_limit, - concurrent_time_window_s, - timeout, - flow_status.map(sqlx::types::Json) as Option> - ) - .fetch_all(&mut *tx) - .await?; - if let Some(custom_concurrency_key) = custom_concurrency_key { sqlx::query!( "INSERT INTO concurrency_key (job_id, key) SELECT id, $1 FROM unnest($2::uuid[]) as id", @@ -5130,29 +5066,33 @@ async fn get_job_update( Path((w_id, job_id)): Path<(String, Uuid)>, Query(JobUpdateQuery { running, log_offset, get_progress }): Query, ) -> error::JsonResult { - let record = sqlx::query_as::<_, JobUpdateRow>( - "SELECT running, substr(concat(coalesce(queue.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) as logs, mem_peak, - CASE WHEN is_flow_step is true then NULL else flow_status END as flow_status, - job_logs.log_offset + char_length(job_logs.logs) + 1 as log_offset, created_by - FROM queue - LEFT JOIN job_logs ON job_logs.job_id = queue.id - WHERE queue.workspace_id = $2 AND queue.id = $3", + let record = sqlx::query!( + "SELECT + running AS \"running!\", + substr(concat(coalesce(v2_queue.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) AS logs, + mem_peak, + CASE WHEN is_flow_step is true then NULL else flow_status END AS \"flow_status: sqlx::types::Json>\", + job_logs.log_offset + char_length(job_logs.logs) + 1 AS log_offset, + created_by AS \"created_by!\" + FROM v2_queue + LEFT JOIN job_logs ON job_logs.job_id = v2_queue.id + WHERE v2_queue.workspace_id = $2 AND v2_queue.id = $3", + log_offset, + &w_id, + job_id ) - .bind(log_offset) - .bind(&w_id) - .bind(&job_id) .fetch_optional(&db) .await?; let progress: Option = if get_progress == Some(true) { sqlx::query_scalar!( - "SELECT scalar_int FROM job_stats WHERE workspace_id = $1 AND job_id = $2 AND metric_id = $3", - &w_id, - job_id, - "progress_perc" - ) - .fetch_optional(&db) - .await?.and_then(|inner| inner) + "SELECT scalar_int FROM job_stats WHERE workspace_id = $1 AND job_id = $2 AND metric_id = $3", + &w_id, + job_id, + "progress_perc" + ) + .fetch_optional(&db) + .await?.and_then(|inner| inner) } else { None }; @@ -5180,17 +5120,20 @@ async fn get_job_update( .map(|x: sqlx::types::Json>| x.0), })) } else { - let record = sqlx::query_as::<_, JobUpdateRow>( - "SELECT false as running, substr(concat(coalesce(completed_job.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) as logs, mem_peak, - CASE WHEN is_flow_step is true then NULL else flow_status END as flow_status, - job_logs.log_offset + char_length(job_logs.logs) + 1 as log_offset, created_by - FROM completed_job - LEFT JOIN job_logs ON job_logs.job_id = completed_job.id - WHERE completed_job.workspace_id = $2 AND id = $3", + let record = sqlx::query!( + "SELECT + substr(concat(coalesce(v2_completed_job.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) AS logs, + mem_peak, + CASE WHEN is_flow_step is true then NULL else flow_status END AS \"flow_status: sqlx::types::Json>\", + job_logs.log_offset + char_length(job_logs.logs) + 1 AS log_offset, + created_by AS \"created_by!\" + FROM v2_completed_job + LEFT JOIN job_logs ON job_logs.job_id = v2_completed_job.id + WHERE v2_completed_job.workspace_id = $2 AND v2_completed_job.id = $3", + log_offset, + &w_id, + job_id ) - .bind(log_offset) - .bind(&w_id) - .bind(&job_id) .fetch_optional(&db) .await?; if let Some(record) = record { @@ -5333,7 +5276,7 @@ pub fn list_completed_jobs_query( join_outstanding_wait_times: bool, tags: Option>, ) -> SqlBuilder { - let mut sqlb = SqlBuilder::select_from("completed_job") + let mut sqlb = SqlBuilder::select_from("v2_completed_job") .fields(fields) .order_by("created_at", lq.order_desc.unwrap_or(true)) .offset(offset) @@ -5481,15 +5424,6 @@ pub struct RawResult { pub created_by: Option, } -#[derive(FromRow)] -pub struct RawResultWithSuccess { - pub result: Option>>, - pub flow_status: Option>>, - pub language: Option, - pub success: bool, - pub created_by: String, -} - async fn get_completed_job_result( OptAuthed(opt_authed): OptAuthed, Extension(db): Extension, @@ -5501,27 +5435,38 @@ async fn get_completed_job_result( .map(|authed| get_scope_tags(authed)) .flatten(); let result_o = if let Some(json_path) = json_path { - sqlx::query_as::<_, RawResult>( - "SELECT result #> $3 as result, flow_status, language, created_by FROM completed_job WHERE id = $1 AND workspace_id = $2 AND ($4::text[] IS NULL OR tag = ANY($4))", - ) - .bind(id) - .bind(&w_id) - .bind( - json_path - .split(".") - .map(|x| x.to_string()) - .collect::>(), + sqlx::query_as!( + RawResult, + "SELECT + result #> $3 AS \"result: sqlx::types::Json>\", + flow_status AS \"flow_status: sqlx::types::Json>\", + language AS \"language: ScriptLang\", + created_by AS \"created_by!\" + FROM v2_completed_job + WHERE id = $1 AND workspace_id = $2 AND ($4::text[] IS NULL OR tag = ANY($4))", + id, + &w_id, + json_path.split(".").collect::>() as Vec<&str>, + tags.as_ref().map(|v| v.as_slice()) as Option<&[&str]>, ) - .bind(tags.as_ref().map(|v| v.as_slice())) .fetch_optional(&db) .await? } else { - sqlx::query_as::<_, RawResult>("SELECT result, flow_status, language, created_by FROM completed_job WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))") - .bind(id) - .bind(&w_id) - .bind(tags.as_ref().map(|v| v.as_slice())) - .fetch_optional(&db) - .await? + sqlx::query_as!( + RawResult, + "SELECT + result AS \"result: sqlx::types::Json>\", + flow_status AS \"flow_status: sqlx::types::Json>\", + language AS \"language: ScriptLang\", + created_by AS \"created_by!\" + FROM v2_completed_job + WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + id, + &w_id, + tags.as_ref().map(|v| v.as_slice()) as Option<&[&str]>, + ) + .fetch_optional(&db) + .await? }; let mut raw_result = not_found_if_none(result_o, "Completed Job", id.to_string())?; @@ -5532,7 +5477,7 @@ async fn get_completed_job_result( let mut parent_job = id; while parent_job != suspended_job { let p_job = sqlx::query_scalar!( - "SELECT parent_job FROM queue WHERE id = $1 AND workspace_id = $2", + "SELECT parent_job FROM v2_queue WHERE id = $1 AND workspace_id = $2", parent_job, &w_id ) @@ -5599,7 +5544,7 @@ async fn count_by_tag( TagCount, r#" SELECT tag as "tag!", COUNT(*) as "count!" - FROM completed_job + FROM v2_completed_job WHERE started_at > NOW() - make_interval(secs => $1) AND ($2::text IS NULL OR workspace_id = $2) GROUP BY tag ORDER BY "count!" DESC @@ -5636,12 +5581,18 @@ async fn get_completed_job_result_maybe( .as_ref() .map(|authed| get_scope_tags(authed)) .flatten(); - let result_o = sqlx::query_as::<_, RawResultWithSuccess>( - "SELECT result, success, language, flow_status, created_by FROM completed_job WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + let result_o = sqlx::query!( + "SELECT + result AS \"result: sqlx::types::Json>\", success AS \"success!\", + language AS \"language: ScriptLang\", + flow_status AS \"flow_status: sqlx::types::Json>\", + created_by AS \"created_by!\" + FROM v2_completed_job + WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + id, + &w_id, + tags.as_ref().map(|v| v.as_slice()) as Option<&[&str]>, ) - .bind(id) - .bind(&w_id) - .bind(tags.as_ref().map(|v| v.as_slice())) .fetch_optional(&db) .await?; @@ -5668,7 +5619,7 @@ async fn get_completed_job_result_maybe( .into_response()) } else if get_started.is_some_and(|x| x) { let started = sqlx::query_scalar!( - "SELECT running FROM queue WHERE id = $1 AND workspace_id = $2", + "SELECT running AS \"running!\" FROM v2_queue WHERE id = $1 AND workspace_id = $2", id, w_id ) @@ -5705,7 +5656,7 @@ async fn delete_completed_job<'a>( require_admin(authed.is_admin, &authed.username)?; let tags = get_scope_tags(&authed); let job_o = sqlx::query_as::<_, CompletedJob>( - "UPDATE completed_job SET args = null, logs = '', result = null, deleted = true WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3)) \ + "UPDATE v2_completed_job SET args = null, logs = '', result = null, deleted = true WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3)) \ RETURNING *, null as labels", ) .bind(id) diff --git a/backend/windmill-api/src/resources.rs b/backend/windmill-api/src/resources.rs index 88f8d79c29e46..7ce92d00fdb04 100644 --- a/backend/windmill-api/src/resources.rs +++ b/backend/windmill-api/src/resources.rs @@ -31,7 +31,6 @@ use windmill_audit::ActionKind; use windmill_common::{ db::UserDB, error::{Error, JsonResult, Result}, - jobs::QueuedJob, utils::{not_found_if_none, paginate, require_admin, Pagination, StripPath}, variables, }; @@ -537,21 +536,28 @@ pub async fn transform_json_value<'c>( } Value::String(y) if y.starts_with("$") && job_id.is_some() => { let mut tx = authed_transaction_or_default(authed, user_db.clone(), db).await?; - let job = sqlx::query_as::<_, QueuedJob>( - "SELECT * FROM queue WHERE id = $1 AND workspace_id = $2", + let job_id = job_id.unwrap(); + let job = sqlx::query!( + "SELECT + email AS \"email!\", + created_by AS \"created_by!\", + parent_job, permissioned_as AS \"permissioned_as!\", + script_path, schedule_path, flow_step_id, root_job, + scheduled_for AS \"scheduled_for!: chrono::DateTime\" + FROM v2_queue WHERE id = $1 AND workspace_id = $2", + job_id, + workspace ) - .bind(job_id.unwrap()) - .bind(workspace) .fetch_optional(&mut *tx) .await?; tx.commit().await?; - let job = not_found_if_none(job, "Job", job_id.unwrap().to_string())?; + let job = not_found_if_none(job, "Job", job_id.to_string())?; let flow_path = if let Some(uuid) = job.parent_job { let mut tx: Transaction<'_, Postgres> = authed_transaction_or_default(authed, user_db.clone(), db).await?; - let p = sqlx::query_scalar!("SELECT script_path FROM queue WHERE id = $1", uuid) + let p = sqlx::query_scalar!("SELECT script_path FROM v2_queue WHERE id = $1", uuid) .fetch_optional(&mut *tx) .await? .flatten(); @@ -563,11 +569,11 @@ pub async fn transform_json_value<'c>( let variables = variables::get_reserved_variables( db, - &job.workspace_id, + workspace, token, &job.email, &job.created_by, - &job.id.to_string(), + &job_id.to_string(), &job.permissioned_as, job.script_path.clone(), job.parent_job.map(|x| x.to_string()), diff --git a/backend/windmill-api/src/schedule.rs b/backend/windmill-api/src/schedule.rs index 79e8c4dcb49db..d58996ddc461c 100644 --- a/backend/windmill-api/src/schedule.rs +++ b/backend/windmill-api/src/schedule.rs @@ -407,8 +407,8 @@ async fn list_schedule_with_jobs( let mut tx = user_db.begin(&authed).await?; let (per_page, offset) = paginate(pagination); let rows = sqlx::query_as!(ScheduleWJobs, - "SELECT schedule.*, t.jobs FROM schedule, LATERAL ( SELECT ARRAY (SELECT json_build_object('id', id, 'success', success, 'duration_ms', duration_ms) FROM completed_job WHERE - completed_job.schedule_path = schedule.path AND completed_job.workspace_id = $1 AND parent_job IS NULL AND is_skipped = False ORDER BY started_at DESC LIMIT 20) AS jobs ) t + "SELECT schedule.*, t.jobs FROM schedule, LATERAL ( SELECT ARRAY (SELECT json_build_object('id', id, 'success', success, 'duration_ms', duration_ms) FROM v2_completed_job WHERE + v2_completed_job.schedule_path = schedule.path AND v2_completed_job.workspace_id = $1 AND parent_job IS NULL AND is_skipped = False ORDER BY started_at DESC LIMIT 20) AS jobs ) t WHERE schedule.workspace_id = $1 ORDER BY schedule.edited_at desc LIMIT $2 OFFSET $3", w_id, per_page as i64, @@ -838,7 +838,7 @@ pub async fn clear_schedule<'c>( ) -> Result<()> { tracing::info!("Clearing schedule {}", path); sqlx::query!( - "DELETE FROM queue WHERE schedule_path = $1 AND running = false AND workspace_id = $2 AND is_flow_step = false", + "DELETE FROM v2_queue WHERE schedule_path = $1 AND running = false AND workspace_id = $2 AND is_flow_step = false", path, w_id ) diff --git a/backend/windmill-api/src/slack_approvals.rs b/backend/windmill-api/src/slack_approvals.rs index 1f48bd07dc4e9..3343fa2328d18 100644 --- a/backend/windmill-api/src/slack_approvals.rs +++ b/backend/windmill-api/src/slack_approvals.rs @@ -22,7 +22,6 @@ use windmill_common::{ cache, error::{self, Error}, jobs::JobKind, - scripts::ScriptHash, variables::{build_crypt, decrypt_value_with_mc}, }; @@ -448,10 +447,16 @@ async fn transform_schemas( let is_required = required.unwrap().contains(key); let default_value = default_args_json.and_then(|json| json.get(key).cloned()); - let dynamic_enums_value = dynamic_enums_json.and_then(|json| json.get(key).cloned()); - - let input_block = - create_input_block(key, schema, is_required, default_value, dynamic_enums_value); + let dynamic_enums_value = + dynamic_enums_json.and_then(|json| json.get(key).cloned()); + + let input_block = create_input_block( + key, + schema, + is_required, + default_value, + dynamic_enums_value, + ); match input_block { serde_json::Value::Array(arr) => blocks.extend(arr), _ => blocks.push(input_block), @@ -536,7 +541,7 @@ fn create_input_block( // Handle date-time format if let FieldType::String = schema.r#type { - if schema.format.as_deref() == Some("date-time") { + if schema.format.as_deref() == Some("date-time") { tracing::debug!("Date-time type"); let now = chrono::Local::now(); let current_date = now.format("%Y-%m-%d").to_string(); @@ -971,17 +976,17 @@ async fn get_modal_blocks( let (job_kind, script_hash, raw_flow, parent_job_id, created_at, created_by, script_path, args) = sqlx::query!( "SELECT - queue.job_kind AS \"job_kind: JobKind\", - queue.script_hash AS \"script_hash: ScriptHash\", - queue.raw_flow AS \"raw_flow: sqlx::types::Json>\", - completed_job.parent_job AS \"parent_job: Uuid\", - completed_job.created_at AS \"created_at: chrono::NaiveDateTime\", - completed_job.created_by AS \"created_by!\", - queue.script_path, - queue.args AS \"args: sqlx::types::Json>\" - FROM queue - JOIN completed_job ON completed_job.parent_job = queue.id - WHERE completed_job.id = $1 AND completed_job.workspace_id = $2 + v2_queue.job_kind AS \"job_kind!: JobKind\", + v2_queue.script_hash, + v2_queue.raw_flow AS \"raw_flow: sqlx::types::Json>\", + v2_completed_job.parent_job AS \"parent_job: Uuid\", + v2_completed_job.created_at AS \"created_at!: chrono::NaiveDateTime\", + v2_completed_job.created_by AS \"created_by!\", + v2_queue.script_path, + v2_queue.args AS \"args: sqlx::types::Json>\" + FROM v2_queue + JOIN v2_completed_job ON v2_completed_job.parent_job = v2_queue.id + WHERE v2_completed_job.id = $1 AND v2_completed_job.workspace_id = $2 LIMIT 1", job_id, &w_id @@ -992,17 +997,11 @@ async fn get_modal_blocks( .ok_or_else(|| error::Error::BadRequest("This workflow is no longer running and has either already timed out or been cancelled or completed.".to_string())) .map(|r| (r.job_kind, r.script_hash, r.raw_flow, r.parent_job, r.created_at, r.created_by, r.script_path, r.args))?; - let flow_data = match cache::job::fetch_flow(&db, job_kind, script_hash).await { + let flow_data = match cache::job::fetch_flow(&db, job_kind, script_hash, raw_flow).await { Ok(data) => data, - Err(_) => { - if let Some(parent_job_id) = parent_job_id.as_ref() { - cache::job::fetch_preview_flow(&db, parent_job_id, raw_flow).await? - } else { - return Err(error::Error::BadRequest( - "This workflow is no longer running and has either already timed out or been cancelled or completed.".to_string(), - )); - } - } + Err(_) => return Err(error::Error::BadRequest( + "This workflow is no longer running and has either already timed out or been cancelled or completed.".to_string(), + )) }; let flow_value = &flow_data.flow; diff --git a/backend/windmill-api/src/users.rs b/backend/windmill-api/src/users.rs index fdb40d97c5c8b..78901d2d665e5 100644 --- a/backend/windmill-api/src/users.rs +++ b/backend/windmill-api/src/users.rs @@ -471,7 +471,7 @@ async fn list_user_usage( FROM usr , LATERAL ( SELECT COALESCE(SUM(duration_ms + 1000)/1000 , 0)::BIGINT executions - FROM completed_job + FROM v2_completed_job WHERE workspace_id = $1 AND job_kind NOT IN ('flow', 'flowpreview', 'flownode') AND email = usr.email @@ -2309,25 +2309,9 @@ async fn update_username_in_workpsace<'c>( .execute(&mut **tx) .await?; - // ---- queue ---- + // ---- v2_job ---- sqlx::query!( - r#"UPDATE queue SET script_path = REGEXP_REPLACE(script_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\1') WHERE script_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3"#, - new_username, - old_username, - w_id - ).execute(&mut **tx) - .await?; - - sqlx::query!( - r#"UPDATE queue SET schedule_path = REGEXP_REPLACE(schedule_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\1') WHERE schedule_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3"#, - new_username, - old_username, - w_id - ).execute(&mut **tx) - .await?; - - sqlx::query!( - "UPDATE queue SET permissioned_as = ('u/' || $1) WHERE permissioned_as = ('u/' || $2) AND workspace_id = $3", + r#"UPDATE v2_job SET runnable_path = REGEXP_REPLACE(runnable_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\1') WHERE runnable_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3"#, new_username, old_username, w_id @@ -2336,44 +2320,25 @@ async fn update_username_in_workpsace<'c>( .await?; sqlx::query!( - "UPDATE queue SET canceled_by = $1 WHERE canceled_by = $2 AND workspace_id = $3", + r#"UPDATE v2_job SET schedule_path = REGEXP_REPLACE(schedule_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\1') WHERE schedule_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3"#, new_username, old_username, w_id ) .execute(&mut **tx) - .await - .unwrap(); + .await?; sqlx::query!( - "UPDATE queue SET created_by = $1 WHERE created_by = $2 AND workspace_id = $3", + "UPDATE v2_job SET permissioned_as = ('u/' || $1) WHERE permissioned_as = ('u/' || $2) AND workspace_id = $3", new_username, old_username, w_id ) .execute(&mut **tx) - .await - .unwrap(); - - // ---- completed_job ---- - sqlx::query!( - r#"UPDATE completed_job SET script_path = REGEXP_REPLACE(script_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\1') WHERE script_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3"#, - new_username, - old_username, - w_id - ).execute(&mut **tx) .await?; sqlx::query!( - r#"UPDATE completed_job SET schedule_path = REGEXP_REPLACE(schedule_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\1') WHERE schedule_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3"#, - new_username, - old_username, - w_id - ).execute(&mut **tx) - .await?; - - sqlx::query!( - "UPDATE completed_job SET permissioned_as = ('u/' || $1) WHERE permissioned_as = ('u/' || $2) AND workspace_id = $3", + "UPDATE v2_job SET created_by = $1 WHERE created_by = $2 AND workspace_id = $3", new_username, old_username, w_id @@ -2381,25 +2346,25 @@ async fn update_username_in_workpsace<'c>( .execute(&mut **tx) .await?; + // ---- v2_job_queue ---- sqlx::query!( - "UPDATE completed_job SET created_by = $1 WHERE created_by = $2 AND workspace_id = $3", + "UPDATE v2_job_queue SET canceled_by = $1 WHERE canceled_by = $2 AND workspace_id = $3", new_username, old_username, w_id ) .execute(&mut **tx) - .await - .unwrap(); + .await?; + // ---- v2_job_completed job ---- sqlx::query!( - "UPDATE completed_job SET canceled_by = $1 WHERE canceled_by = $2 AND workspace_id = $3", + "UPDATE v2_job_completed SET canceled_by = $1 WHERE canceled_by = $2 AND workspace_id = $3", new_username, old_username, w_id ) .execute(&mut **tx) - .await - .unwrap(); + .await?; // ---- resources---- sqlx::query!( diff --git a/backend/windmill-api/src/websocket_triggers.rs b/backend/windmill-api/src/websocket_triggers.rs index 2f00a1f5a4291..f14c2b5f41ffd 100644 --- a/backend/windmill-api/src/websocket_triggers.rs +++ b/backend/windmill-api/src/websocket_triggers.rs @@ -558,17 +558,12 @@ async fn wait_runnable_result( .into()); } - #[derive(sqlx::FromRow)] - struct RawResult { - result: Option>>, - success: bool, - } - - let result = sqlx::query_as::<_, RawResult>( - "SELECT result, success FROM completed_job WHERE id = $1 AND workspace_id = $2", + let result = sqlx::query!( + "SELECT result AS \"result: SqlxJson>\", success AS \"success!\" + FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + Uuid::parse_str(&job_id)?, + workspace_id ) - .bind(Uuid::parse_str(&job_id).unwrap()) - .bind(workspace_id) .fetch_optional(db) .await; diff --git a/backend/windmill-api/src/workspaces_extra.rs b/backend/windmill-api/src/workspaces_extra.rs index a8f0c8721dcb5..2378e85c0271d 100644 --- a/backend/windmill-api/src/workspaces_extra.rs +++ b/backend/windmill-api/src/workspaces_extra.rs @@ -144,7 +144,7 @@ pub(crate) async fn change_workspace_id( .await?; sqlx::query!( - "UPDATE completed_job SET workspace_id = $1 WHERE workspace_id = $2", + "UPDATE v2_job_completed SET workspace_id = $1 WHERE workspace_id = $2", &rw.new_id, &old_id ) @@ -269,7 +269,7 @@ pub(crate) async fn change_workspace_id( .await?; sqlx::query!( - "UPDATE queue SET workspace_id = $1 WHERE workspace_id = $2", + "UPDATE v2_job_queue SET workspace_id = $1 WHERE workspace_id = $2", &rw.new_id, &old_id ) @@ -277,7 +277,7 @@ pub(crate) async fn change_workspace_id( .await?; sqlx::query!( - "UPDATE job SET workspace_id = $1 WHERE workspace_id = $2", + "UPDATE v2_job SET workspace_id = $1 WHERE workspace_id = $2", &rw.new_id, &old_id ) @@ -430,7 +430,10 @@ pub(crate) async fn delete_workspace( sqlx::query!("DELETE FROM dependency_map WHERE workspace_id = $1", &w_id) .execute(&mut *tx) .await?; - sqlx::query!("DELETE FROM queue WHERE workspace_id = $1", &w_id) + sqlx::query!("DELETE FROM v2_job_queue WHERE workspace_id = $1", &w_id) + .execute(&mut *tx) + .await?; + sqlx::query!("DELETE FROM v2_job WHERE workspace_id = $1", &w_id) .execute(&mut *tx) .await?; sqlx::query!("DELETE FROM capture WHERE workspace_id = $1", &w_id) @@ -468,9 +471,12 @@ pub(crate) async fn delete_workspace( .execute(&mut *tx) .await?; - sqlx::query!("DELETE FROM completed_job WHERE workspace_id = $1", &w_id) - .execute(&mut *tx) - .await?; + sqlx::query!( + "DELETE FROM v2_job_completed WHERE workspace_id = $1", + &w_id + ) + .execute(&mut *tx) + .await?; sqlx::query!("DELETE FROM job_stats WHERE workspace_id = $1", &w_id) .execute(&mut *tx) diff --git a/backend/windmill-common/src/bench.rs b/backend/windmill-common/src/bench.rs deleted file mode 100644 index 0e89af571c2f2..0000000000000 --- a/backend/windmill-common/src/bench.rs +++ /dev/null @@ -1,213 +0,0 @@ -use crate::{ - worker::{write_file, TMP_DIR}, - DB, -}; -use serde::Serialize; -use tokio::time::Instant; - -#[derive(Serialize)] -pub struct BenchmarkInfo { - #[serde(skip)] - pub start: Instant, - #[serde(skip)] - pub iters: u64, - timings: Vec, - pub iter_durations: Vec, - pub total_duration: Option, -} - -impl BenchmarkInfo { - pub fn new() -> Self { - BenchmarkInfo { - iters: 0, - timings: vec![], - start: Instant::now(), - iter_durations: vec![], - total_duration: None, - } - } - - pub fn add_iter(&mut self, bench: BenchmarkIter, inc_iters: bool) { - if inc_iters { - self.iters += 1; - } - let elapsed_total = bench.start.elapsed().as_nanos() as u64; - self.timings.push(bench); - self.iter_durations.push(elapsed_total); - } - - pub fn write_to_file(&mut self, path: &str) -> anyhow::Result<()> { - let total_duration = self.start.elapsed().as_millis() as u64; - self.total_duration = Some(total_duration as u64); - - println!( - "Writing benchmark {path}, duration of benchmark: {total_duration}s and RPS: {}", - self.iters as f64 / total_duration as f64 - ); - write_file(TMP_DIR, path, &serde_json::to_string(&self).unwrap()).expect("write profiling"); - Ok(()) - } -} - -#[derive(Serialize)] -pub struct BenchmarkIter { - #[serde(skip)] - pub start: Instant, - #[serde(skip)] - last_instant: Instant, - last_step: String, - timings: Vec<(String, u32)>, -} - -impl BenchmarkIter { - pub fn new() -> Self { - BenchmarkIter { - last_instant: Instant::now(), - timings: vec![], - start: Instant::now(), - last_step: String::new(), - } - } - - pub fn add_timing(&mut self, name: &str) { - let elapsed = self.last_instant.elapsed().as_nanos() as u32; - self.timings - .push((format!("{}->{}", self.last_step, name), elapsed)); - self.last_instant = Instant::now(); - self.last_step = name.to_string(); - } -} - -pub async fn benchmark_init(benchmark_jobs: i32, db: &DB) { - use crate::{jobs::JobKind, scripts::ScriptLang}; - - let benchmark_kind = std::env::var("BENCHMARK_KIND").unwrap_or("noop".to_string()); - - if benchmark_jobs > 0 { - match benchmark_kind.as_str() { - "dedicated" => { - // you need to create the script first, check https://github.com/windmill-labs/windmill/blob/b76a92cfe454c686f005c65f534e29e039f3c706/benchmarks/lib.ts#L47 - let hash = sqlx::query_scalar!( - "SELECT hash FROM script WHERE path = $1 AND workspace_id = $2", - "f/benchmarks/dedicated", - "admins" - ) - .fetch_one(db) - .await - .unwrap_or_else(|_e| panic!("failed to insert dedicated jobs")); - sqlx::query!("INSERT INTO queue (id, script_hash, script_path, job_kind, language, tag, created_by, permissioned_as, email, scheduled_for, workspace_id) (SELECT gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 FROM generate_series(1, $11))", - hash, - "f/benchmarks/dedicated", - JobKind::Script as JobKind, - ScriptLang::Bun as ScriptLang, - "admins:f/benchmarks/dedicated", - "admin", - "u/admin", - "admin@windmill.dev", - chrono::Utc::now(), - "admins", - benchmark_jobs - ) - .execute(db) - .await.unwrap_or_else(|_e| panic!("failed to insert dedicated jobs")); - } - "parallelflow" => { - //create dedicated script - sqlx::query!("INSERT INTO script (summary, description, dedicated_worker, content, workspace_id, path, hash, language, tag, created_by, lock) VALUES ('', '', true, $1, $2, $3, $4, $5, $6, $7, '') ON CONFLICT (workspace_id, hash) DO NOTHING", - "export async function main() { - console.log('hello world'); - }", - "admins", - "u/admin/parallelflow", - 1234567890, - ScriptLang::Deno as ScriptLang, - "flow", - "admin", - ) - .execute(db) - .await.unwrap_or_else(|_e| panic!("failed to insert parallelflow jobs {_e:#}")); - sqlx::query!("INSERT INTO queue (id, script_hash, script_path, job_kind, language, tag, created_by, permissioned_as, email, scheduled_for, workspace_id, raw_flow, flow_status) (SELECT gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12 FROM generate_series(1, 1))", - None::, - None::, - JobKind::FlowPreview as JobKind, - ScriptLang::Deno as ScriptLang, - "flow", - "admin", - "u/admin", - "admin@windmill.dev", - chrono::Utc::now(), - "admins", - serde_json::from_str::(r#" -{ - "modules": [ - { - "id": "a", - "value": { - "type": "forloopflow", - "modules": [ - { - "id": "b", - "value": { - "path": "u/admin/parallelflow", - "type": "script", - "tag_override": "", - "input_transforms": {} - }, - "summary": "calctest" - } - ], - "iterator": { - "expr": "[...new Array(300)]", - "type": "javascript" - }, - "parallel": true, - "parallelism": 10, - "skip_failures": true - } - } - ], - "preprocessor_module": null -} - "#).unwrap(), - serde_json::from_str::(r#" -{ - "step": 0, - "modules": [ - { - "id": "a", - "type": "WaitingForPriorSteps" - } - ], - "cleanup_module": {}, - "failure_module": { - "id": "failure", - "type": "WaitingForPriorSteps" - }, - "preprocessor_module": null - } - - "#).unwrap() - ) - .execute(db) - .await.unwrap_or_else(|_e| panic!("failed to insert parallelflow jobs")); - } - _ => { - sqlx::query!("INSERT INTO queue (id, script_hash, script_path, job_kind, language, tag, created_by, permissioned_as, email, scheduled_for, workspace_id) (SELECT gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 FROM generate_series(1, $11))", - None::, - None::, - JobKind::Noop as JobKind, - ScriptLang::Deno as ScriptLang, - "deno", - "admin", - "u/admin", - "admin@windmill.dev", - chrono::Utc::now(), - "admins", - benchmark_jobs - ) - .execute(db) - .await.unwrap_or_else(|_e| panic!("failed to insert noop jobs")); - } - } - } -} diff --git a/backend/windmill-common/src/cache.rs b/backend/windmill-common/src/cache.rs index 13d71dd4028eb..a6c78394a4edc 100644 --- a/backend/windmill-common/src/cache.rs +++ b/backend/windmill-common/src/cache.rs @@ -302,6 +302,26 @@ pub enum RawData { Script(Arc), } +impl RawData { + pub fn from_raw( + raw_code: Option, + raw_lock: Option, + raw_flow: Option>>, + ) -> error::Result> { + match (raw_flow, raw_code, raw_lock) { + (Some(Json(raw_flow)), _, _) => FlowData::from_raw(raw_flow) + .map(Arc::new) + .map(Self::Flow) + .map(Some), + (_, Some(code), lock) => Ok(ScriptData { code, lock }) + .map(Arc::new) + .map(Self::Script) + .map(Some), + _ => Ok(None), + } + } +} + #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ScriptMetadata { pub language: Option, @@ -353,7 +373,6 @@ pub fn clear() { flow::clear(); script::clear(); app::clear(); - job::clear(); } pub mod flow { @@ -601,114 +620,19 @@ pub mod job { use super::*; use crate::jobs::JobKind; - #[cfg(not(feature = "scoped_cache"))] - lazy_static! { - /// Very small in-memory cache for "preview" jobs raw data. - static ref PREVIEWS: Cache = Cache::new(50); - } - - #[cfg(feature = "scoped_cache")] - lazy_static! { - /// Very small in-memory cache for "preview" jobs raw data. - static ref PREVIEWS: Cache<(ThreadId, Uuid), RawData> = Cache::new(50); - } - - /// Clear the job cache. - pub fn clear() { - PREVIEWS.clear(); - } - - #[track_caller] - pub fn fetch_preview_flow<'a, 'c>( - e: impl PgExecutor<'c> + 'a, - job: &'a Uuid, - // original raw values from `queue` or `completed_job` tables: - // kept for backward compatibility. - raw_flow: Option>>, - ) -> impl Future>> + 'a { - let fetch_preview = fetch_preview(e, job, None, None, raw_flow); - async move { - fetch_preview.await.and_then(|data| match data { - RawData::Flow(data) => Ok(data), - RawData::Script(_) => Err(error::Error::InternalErr(format!( - "Job ({job}) isn't a flow job." - ))), - }) - } - } - - #[track_caller] - pub fn fetch_preview_script<'a, 'c>( - e: impl PgExecutor<'c> + 'a, - job: &'a Uuid, - // original raw values from `queue` or `completed_job` tables: - // kept for backward compatibility. - raw_lock: Option, - raw_code: Option, - ) -> impl Future>> + 'a { - let fetch_preview = fetch_preview(e, job, raw_lock, raw_code, None); - async move { - fetch_preview.await.and_then(|data| match data { - RawData::Script(data) => Ok(data), - RawData::Flow(_) => Err(error::Error::InternalErr(format!( - "Job ({job}) isn't a script job." - ))), - }) - } - } - - #[track_caller] - pub fn fetch_preview<'a, 'c>( - e: impl PgExecutor<'c> + 'a, - job: &'a Uuid, - // original raw values from `queue` or `completed_job` tables: - // kept for backward compatibility. - raw_lock: Option, - raw_code: Option, - raw_flow: Option>>, - ) -> impl Future> + 'a { - let loc = Location::caller(); - let fetch = async move { - match (raw_lock, raw_code, raw_flow) { - (None, None, None) => sqlx::query!( - "SELECT raw_code, raw_lock, raw_flow AS \"raw_flow: Json>\" \ - FROM job WHERE id = $1 LIMIT 1", - job - ) - .fetch_optional(e) - .await - .map_err(Into::into) - .and_then(unwrap_or_error(&loc, "Preview", job)) - .map(|r| (r.raw_lock, r.raw_code, r.raw_flow)), - (lock, code, flow) => Ok((lock, code, flow)), - } - .and_then(|(lock, code, flow)| match flow { - Some(Json(flow)) => FlowData::from_raw(flow).map(Arc::new).map(RawData::Flow), - _ => Ok(RawData::Script(Arc::new(ScriptData { - code: code.unwrap_or_default(), - lock, - }))), - }) - }; - #[cfg(not(feature = "scoped_cache"))] - return PREVIEWS.get_or_insert_async(job, fetch); - #[cfg(feature = "scoped_cache")] - async move { - let job = &(std::thread::current().id(), job.clone()); - PREVIEWS.get_or_insert_async(job, fetch).await - } - } + // TODO(uael): new PREVIEWS cache #[track_caller] pub fn fetch_script<'c>( e: impl PgExecutor<'c>, kind: JobKind, - hash: Option, + runnable_id: Option, + // TODO(uael): add raw values here ) -> impl Future>> { use JobKind::*; let loc = Location::caller(); async move { - match (kind, hash.map(|ScriptHash(id)| id)) { + match (kind, runnable_id) { (FlowScript, Some(id)) => flow::fetch_script(e, FlowNodeId(id)).await, (Script | Dependencies, Some(hash)) => script::fetch(e, ScriptHash(hash)) .await @@ -727,16 +651,20 @@ pub mod job { pub fn fetch_flow<'c>( e: impl PgExecutor<'c> + Copy, kind: JobKind, - hash: Option, + runnable_id: Option, + raw_flow: Option>>, ) -> impl Future>> { use JobKind::*; let loc = Location::caller(); async move { - match (kind, hash.map(|ScriptHash(id)| id)) { + if let Some(Json(raw_flow)) = raw_flow { + return FlowData::from_raw(raw_flow).map(Arc::new); + } + match (kind, runnable_id) { (FlowDependencies, Some(id)) => flow::fetch_version(e, id).await, (FlowNode, Some(id)) => flow::fetch_flow(e, FlowNodeId(id)).await, (Flow, Some(id)) => match flow::fetch_version_lite(e, id).await { - Ok(raw_flow) => Ok(raw_flow), + Ok(data) => Ok(data), Err(_) => flow::fetch_version(e, id).await, }, _ => Err(error::Error::InternalErr(format!( diff --git a/backend/windmill-common/src/jobs.rs b/backend/windmill-common/src/jobs.rs index 13a8dd7e8ae6d..282851975b1d9 100644 --- a/backend/windmill-common/src/jobs.rs +++ b/backend/windmill-common/src/jobs.rs @@ -197,7 +197,7 @@ pub struct CompletedJob { pub parent_job: Option, pub created_by: String, pub created_at: chrono::DateTime, - pub started_at: chrono::DateTime, + pub started_at: Option>, pub duration_ms: i64, pub success: bool, #[serde(skip_serializing_if = "Option::is_none")] diff --git a/backend/windmill-common/src/lib.rs b/backend/windmill-common/src/lib.rs index 9fcd14111ff4d..b039faec63078 100644 --- a/backend/windmill-common/src/lib.rs +++ b/backend/windmill-common/src/lib.rs @@ -18,8 +18,6 @@ use sqlx::{Pool, Postgres}; pub mod apps; pub mod auth; -#[cfg(feature = "benchmark")] -pub mod bench; pub mod cache; pub mod db; pub mod ee; diff --git a/backend/windmill-common/src/queue.rs b/backend/windmill-common/src/queue.rs index 6cbb5611c55ea..93c26e7d7db6a 100644 --- a/backend/windmill-common/src/queue.rs +++ b/backend/windmill-common/src/queue.rs @@ -3,14 +3,14 @@ use std::collections::HashMap; use sqlx::{Pool, Postgres}; pub async fn get_queue_counts(db: &Pool) -> HashMap { - sqlx::query_as::<_, (String, i64)>( - "SELECT tag, count(*) as count FROM queue WHERE + sqlx::query!( + "SELECT tag AS \"tag!\", count(*) AS \"count!\" FROM v2_queue WHERE scheduled_for <= now() - ('3 seconds')::interval AND running = false GROUP BY tag", ) .fetch_all(db) .await .ok() - .map(|v| v.into_iter().map(|(k, v)| (k, v as u32)).collect()) + .map(|v| v.into_iter().map(|x| (x.tag, x.count as u32)).collect()) .unwrap_or_else(|| HashMap::new()) } diff --git a/backend/windmill-common/src/worker.rs b/backend/windmill-common/src/worker.rs index c64184b4849f5..e2aeca2d20ce6 100644 --- a/backend/windmill-common/src/worker.rs +++ b/backend/windmill-common/src/worker.rs @@ -12,7 +12,10 @@ use std::{ io::Write, path::{Component, Path, PathBuf}, str::FromStr, - sync::{atomic::AtomicBool, Arc}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, }; use tokio::sync::RwLock; use windmill_macros::annotations; @@ -103,20 +106,22 @@ lazy_static::lazy_static! { pub static ref DISABLE_FLOW_SCRIPT: bool = std::env::var("DISABLE_FLOW_SCRIPT").ok().is_some_and(|x| x == "1" || x == "true"); } +pub static MIN_VERSION_IS_LATEST: AtomicBool = AtomicBool::new(false); + pub async fn make_suspended_pull_query(wc: &WorkerConfig) { if wc.worker_tags.len() == 0 { tracing::error!("Empty tags in worker tags, skipping"); return; } let query = format!( - "UPDATE queue + "UPDATE v2_queue SET running = true , started_at = coalesce(started_at, now()) , last_ping = now() , suspend_until = null WHERE id = ( SELECT id - FROM queue + FROM v2_job_queue WHERE suspend_until IS NOT NULL AND (suspend <= 0 OR suspend_until <= now()) AND tag IN ({}) ORDER BY priority DESC NULLS LAST, created_at FOR UPDATE SKIP LOCKED @@ -141,14 +146,14 @@ pub async fn make_pull_query(wc: &WorkerConfig) { tracing::error!("Empty tags in priority tags, skipping"); continue; } - let query = format!("UPDATE queue + let query = format!("UPDATE v2_queue SET running = true , started_at = coalesce(started_at, now()) , last_ping = now() , suspend_until = null WHERE id = ( SELECT id - FROM queue + FROM v2_job_queue WHERE running = false AND tag IN ({}) AND scheduled_for <= now() ORDER BY priority DESC NULLS LAST, scheduled_for FOR UPDATE SKIP LOCKED @@ -609,6 +614,8 @@ pub async fn update_min_version<'c, E: sqlx::Executor<'c, Database = sqlx::Postg tracing::info!("Minimal worker version: {min_version}"); } + MIN_VERSION_IS_LATEST.store(min_version == cur_version, Ordering::Relaxed); + *MIN_VERSION_IS_AT_LEAST_1_427.write().await = min_version >= Version::new(1, 427, 0); *MIN_VERSION_IS_AT_LEAST_1_432.write().await = min_version >= Version::new(1, 432, 0); *MIN_VERSION_IS_AT_LEAST_1_440.write().await = min_version >= Version::new(1, 440, 0); diff --git a/backend/windmill-queue/src/jobs.rs b/backend/windmill-queue/src/jobs.rs index 5b2df07a3915f..17692ef93b38b 100644 --- a/backend/windmill-queue/src/jobs.rs +++ b/backend/windmill-queue/src/jobs.rs @@ -29,18 +29,18 @@ use windmill_audit::ActionKind; use windmill_common::utils::now_from_db; use windmill_common::{ auth::{fetch_authed_from_permissioned_as, permissioned_as_to_username}, - cache::{self, FlowData}, + cache, db::{Authed, UserDB}, error::{self, to_anyhow, Error}, flow_status::{ - BranchAllStatus, FlowCleanupModule, FlowStatus, FlowStatusModule, FlowStatusModuleWParent, - Iterator, JobResult, RestartedFrom, RetryStatus, MAX_RETRY_ATTEMPTS, MAX_RETRY_INTERVAL, + BranchAllStatus, FlowStatus, FlowStatusModule, FlowStatusModuleWParent, Iterator, + JobResult, RestartedFrom, RetryStatus, MAX_RETRY_ATTEMPTS, MAX_RETRY_INTERVAL, }, flows::{ add_virtual_items_if_necessary, FlowModule, FlowModuleValue, FlowValue, InputTransform, }, jobs::{ - get_payload_tag_from_prefixed_path, CompletedJob, JobKind, JobPayload, QueuedJob, RawCode, + get_payload_tag_from_prefixed_path, JobKind, JobPayload, QueuedJob, RawCode, ENTRYPOINT_OVERRIDE, PREPROCESSOR_FAKE_ENTRYPOINT, }, schedule::Schedule, @@ -49,8 +49,8 @@ use windmill_common::{ utils::{not_found_if_none, report_critical_error, StripPath, WarnAfterExt}, worker::{ to_raw_value, CLOUD_HOSTED, DEFAULT_TAGS_PER_WORKSPACE, DEFAULT_TAGS_WORKSPACES, - DISABLE_FLOW_SCRIPT, MIN_VERSION_IS_AT_LEAST_1_427, MIN_VERSION_IS_AT_LEAST_1_432, - MIN_VERSION_IS_AT_LEAST_1_440, NO_LOGS, WORKER_PULL_QUERIES, WORKER_SUSPENDED_PULL_QUERY, + DISABLE_FLOW_SCRIPT, MIN_VERSION_IS_AT_LEAST_1_432, MIN_VERSION_IS_AT_LEAST_1_440, NO_LOGS, + WORKER_PULL_QUERIES, WORKER_SUSPENDED_PULL_QUERY, }, DB, METRICS_ENABLED, }; @@ -173,7 +173,7 @@ pub async fn cancel_single_job<'c>( }); } else { let id: Option = sqlx::query_scalar!( - "UPDATE queue SET canceled = true, canceled_by = $1, canceled_reason = $2, scheduled_for = now(), suspend = 0 WHERE id = $3 AND workspace_id = $4 AND (canceled = false OR canceled_reason != $2) RETURNING id", + "UPDATE v2_queue SET canceled = true, canceled_by = $1, canceled_reason = $2, scheduled_for = now(), suspend = 0 WHERE id = $3 AND workspace_id = $4 AND (canceled = false OR canceled_reason != $2) RETURNING id AS \"id!\"", username, reason, job_running.id, @@ -249,7 +249,7 @@ pub async fn cancel_job<'c>( while !jobs.is_empty() { let p_job = jobs.pop(); let new_jobs = sqlx::query_scalar!( - "SELECT id FROM queue WHERE parent_job = $1 AND workspace_id = $2", + "SELECT id AS \"id!\" FROM v2_queue WHERE parent_job = $1 AND workspace_id = $2", p_job, w_id ) @@ -361,7 +361,7 @@ async fn cancel_persistent_script_jobs_internal<'c>( // we could have retrieved the job IDs in the first query where we retrieve the hashes, but just in case a job was inserted in the queue right in-between the two above query, we re-do the fetch here let jobs_to_cancel = sqlx::query_scalar::<_, Uuid>( - "SELECT id FROM queue WHERE workspace_id = $1 AND script_path = $2 AND canceled = false", + "SELECT id FROM v2_queue WHERE workspace_id = $1 AND script_path = $2 AND canceled = false", ) .bind(w_id) .bind(script_path) @@ -533,11 +533,12 @@ pub async fn add_completed_job( )); } - let _job_id = queued_job.id; - let (opt_uuid, _duration, _skip_downstream_error_handlers) = (|| async { + let job_id = queued_job.id; + let (canceled_by, canceled_reason) = + canceled_by.map_or((None, None), |c| (c.username, c.reason)); + let (opt_uuid, canceled, _duration, _skip_downstream_error_handlers) = (|| async { let mut tx = db.begin().await?; - let job_id = queued_job.id; // tracing::error!("1 {:?}", start.elapsed()); tracing::debug!( @@ -546,139 +547,81 @@ pub async fn add_completed_job( serde_json::to_string(&result).unwrap_or_else(|_| "".to_string()) ); - let (raw_code, raw_lock, raw_flow) = if !*MIN_VERSION_IS_AT_LEAST_1_427.read().await { - sqlx::query!( - "SELECT raw_code, raw_lock, raw_flow AS \"raw_flow: Json>\" - FROM job WHERE id = $1 AND workspace_id = $2 LIMIT 1", - &job_id, - &queued_job.workspace_id - ) - .fetch_one(db) - .map_ok(|record| (record.raw_code, record.raw_lock, record.raw_flow)) - .or_else(|_| { - sqlx::query!( - "SELECT raw_code, raw_lock, raw_flow AS \"raw_flow: Json>\" - FROM queue WHERE id = $1 AND workspace_id = $2 LIMIT 1", - &job_id, - &queued_job.workspace_id - ) - .fetch_one(db) - .map_ok(|record| (record.raw_code, record.raw_lock, record.raw_flow)) - }) - .await - .unwrap_or_default() - } else { - (None, None, None) - }; - - let mem_peak = mem_peak.max(queued_job.mem_peak.unwrap_or(0)); // add_time!(bench, "add_completed_job query START"); - let _duration = sqlx::query_scalar!( - "INSERT INTO completed_job AS cj - ( workspace_id - , id - , parent_job - , created_by - , created_at - , started_at - , duration_ms - , success - , script_hash - , script_path - , args - , result - , raw_code - , raw_lock - , canceled - , canceled_by - , canceled_reason - , job_kind - , schedule_path - , permissioned_as - , flow_status - , raw_flow - , is_flow_step - , is_skipped - , language - , email - , visible_to_owner - , mem_peak - , tag - , priority - ) - VALUES ($1, $2, $3, $4, $5, COALESCE($6, now()), COALESCE($30::bigint, (EXTRACT('epoch' FROM (now())) - EXTRACT('epoch' FROM (COALESCE($6, now()))))*1000), $7, $8, $9,\ - $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29) - ON CONFLICT (id) DO UPDATE SET success = $7, result = $11 RETURNING duration_ms", - queued_job.workspace_id, - queued_job.id, - queued_job.parent_job, - queued_job.created_by, - queued_job.created_at, - queued_job.started_at, - success, - queued_job.script_hash.map(|x| x.0), - queued_job.script_path, - &queued_job.args as &Option>>>, - result as Json<&T>, - raw_code, - raw_lock, - canceled_by.is_some(), - canceled_by.clone().map(|cb| cb.username).flatten(), - canceled_by.clone().map(|cb| cb.reason).flatten(), - queued_job.job_kind.clone() as JobKind, - queued_job.schedule_path, - queued_job.permissioned_as, - &queued_job.flow_status as &Option>>, - &raw_flow as &Option>>, - queued_job.is_flow_step, - skipped, - queued_job.language.clone() as Option, - queued_job.email, - queued_job.visible_to_owner, - if mem_peak > 0 { Some(mem_peak) } else { None }, - queued_job.tag, - queued_job.priority, - duration, + let (canceled, _duration) = sqlx::query!( + "WITH runtime AS ( + SELECT GREATEST($8, r.memory_peak) AS memory_peak, flow_status + FROM v2_job_runtime r + LEFT JOIN v2_job_flow_runtime f USING (id) + WHERE r.id = $1 + ), queued AS ( + DELETE FROM v2_job_queue q + WHERE q.id = $1 + RETURNING + q.id, q.workspace_id, q.started_at, q.worker, + COALESCE($5, q.canceled_by) AS canceled_by, + COALESCE($6, q.canceled_reason) AS canceled_reason + ) INSERT INTO v2_job_completed ( + id, workspace_id, started_at, worker, memory_peak, flow_status, result, + canceled_by, canceled_reason, + duration_ms, + status + ) SELECT + id, workspace_id, started_at, worker, memory_peak, flow_status, $7, + canceled_by, canceled_reason, + COALESCE($2::bigint, CASE + WHEN started_at IS NULL THEN 0 + ELSE (EXTRACT('epoch' FROM NOW()) - EXTRACT('epoch' FROM started_at)) * 1000 + END) AS duration_ms, + CASE + WHEN $4::BOOLEAN THEN 'skipped'::job_status + WHEN canceled_by IS NOT NULL THEN 'canceled'::job_status + WHEN $3::BOOLEAN THEN 'success'::job_status + ELSE 'failure'::job_status + END AS status + FROM queued, runtime + ON CONFLICT (id) DO UPDATE SET status = EXCLUDED.status, result = EXCLUDED.result + RETURNING status = 'canceled' AS \"canceled!\", duration_ms", + /* $1 */ queued_job.id, + /* $2 */ duration, + /* $3 */ success, + /* $4 */ skipped, + /* $5 */ canceled_by, + /* $6 */ canceled_reason, + /* $7 */ result as Json<&T>, + /* $8 */ if mem_peak > 0 { Some(mem_peak) } else { None }, ) + .map(|record| (record.canceled, record.duration_ms)) .fetch_one(&mut *tx) .await .map_err(|e| Error::InternalErr(format!("Could not add completed job {job_id}: {e:#}")))?; - + // Hacky trick used by `workflow_as_code` if !queued_job.is_flow_step { - if _duration > 500 - && (queued_job.job_kind == JobKind::Script - || queued_job.job_kind == JobKind::Preview) - { - if let Err(e) = sqlx::query!( - "UPDATE completed_job SET flow_status = q.flow_status FROM queue q WHERE completed_job.id = $1 AND q.id = $1 AND q.workspace_id = $2 AND completed_job.workspace_id = $2 AND q.flow_status IS NOT NULL", - &queued_job.id, - &queued_job.workspace_id - ) - .execute(&mut *tx) - .await { - tracing::error!("Could not update job duration: {}", e); - } - } if let Some(parent_job) = queued_job.parent_job { - if let Err(e) = sqlx::query_scalar!( - "UPDATE queue SET flow_status = jsonb_set(jsonb_set(COALESCE(flow_status, '{}'::jsonb), array[$1], COALESCE(flow_status->$1, '{}'::jsonb)), array[$1, 'duration_ms'], to_jsonb($2::bigint)) WHERE id = $3 AND workspace_id = $4", - &queued_job.id.to_string(), - _duration, - parent_job, - &queued_job.workspace_id - ) - .execute(&mut *tx) - .await { - tracing::error!("Could not update parent job flow_status: {}", e); - } + let _ = sqlx::query_scalar!( + "UPDATE v2_job_flow_runtime SET flow_status = jsonb_set( + jsonb_set( + COALESCE(flow_status, '{}'::jsonb), + array[$1], + COALESCE(flow_status->$1, '{}'::jsonb) + ), + array[$1, 'duration_ms'], + to_jsonb($2::bigint) + ) WHERE id = $3", + &queued_job.id.to_string(), + _duration, + parent_job + ) + .execute(&mut *tx) + .await + .inspect_err(|e| tracing::error!("Could not update parent job flow_status: {}", e)); } } // tracing::error!("Added completed job {:#?}", queued_job); let mut _skip_downstream_error_handlers = false; - tx = delete_job(tx, &queued_job.workspace_id, job_id).await?; // tracing::error!("3 {:?}", start.elapsed()); if queued_job.is_flow_step { @@ -689,7 +632,7 @@ pub async fn add_completed_job( parent_job ); sqlx::query!( - "UPDATE queue SET last_ping = now() WHERE id = $1 AND workspace_id = $2 AND canceled = false", + "UPDATE v2_queue SET last_ping = now() WHERE id = $1 AND workspace_id = $2 AND canceled = false", parent_job, &queued_job.workspace_id ) @@ -738,7 +681,7 @@ pub async fn add_completed_job( AND flow_status->'modules'->0->>'job' = $1 ) ) - FROM completed_job WHERE id = $2 AND workspace_id = $3", + FROM v2_completed_job WHERE id = $2 AND workspace_id = $3", Uuid::nil().to_string(), &queued_job.id, &queued_job.workspace_id @@ -757,7 +700,7 @@ pub async fn add_completed_job( match err { Error::QuotaExceeded(_) => (), // scheduling next job failed and could not disable schedule => make zombie job to retry - _ => return Ok((Some(job_id), 0, true)), + _ => return Ok((Some(job_id), canceled, 0, true)), } }; } @@ -868,7 +811,7 @@ pub async fn add_completed_job( "inserted completed job: {} (success: {success})", queued_job.id ); - Ok((None, _duration, _skip_downstream_error_handlers)) as windmill_common::error::Result<(Option, i64, bool)> + Result::Ok((None, canceled, _duration, _skip_downstream_error_handlers)) }) .retry( ConstantBuilder::default() @@ -925,18 +868,19 @@ pub async fn add_completed_job( #[cfg(feature = "enterprise")] if !success { async fn has_failure_module(db: &Pool, job: &QueuedJob) -> bool { - if let Ok(flow) = cache::job::fetch_flow(db, job.job_kind, job.script_hash).await { + if let Ok(flow) = + cache::job::fetch_flow(db, job.job_kind, job.script_hash.map(|x| x.0), None).await + { return flow.value().failure_module.is_some(); } - sqlx::query_scalar!("SELECT raw_flow->'failure_module' != 'null'::jsonb FROM job WHERE id = $1", job.id) - .fetch_one(db) - .or_else(|_| - sqlx::query_scalar!("SELECT raw_flow->'failure_module' != 'null'::jsonb FROM completed_job WHERE id = $1", job.id) - .fetch_one(db) - ) - .await - .unwrap_or(Some(false)) - .unwrap_or(false) + sqlx::query_scalar!( + "SELECT raw_flow->'failure_module' != 'null'::jsonb FROM v2_job WHERE id = $1", + job.id + ) + .fetch_one(db) + .await + .unwrap_or(Some(false)) + .unwrap_or(false) } if queued_job.email == ERROR_HANDLER_USER_EMAIL { @@ -1003,13 +947,8 @@ pub async fn add_completed_job( ); } - if let Err(err) = send_error_to_workspace_handler( - &queued_job, - canceled_by.is_some(), - db, - Json(&result), - ) - .await + if let Err(err) = + send_error_to_workspace_handler(&queued_job, canceled, db, Json(&result)).await { match err { Error::QuotaExceeded(_) => {} @@ -1028,7 +967,7 @@ pub async fn add_completed_job( } } - if !queued_job.is_flow_step && queued_job.job_kind == JobKind::Script && canceled_by.is_none() { + if !queued_job.is_flow_step && queued_job.job_kind == JobKind::Script && !canceled { if let Some(hash) = queued_job.script_hash { let p = sqlx::query_scalar!( "SELECT restart_unless_cancelled FROM script WHERE hash = $1 AND workspace_id = $2", @@ -1381,15 +1320,23 @@ async fn apply_schedule_handlers<'a, 'c, T: Serialize + Send + Sync>( let times = schedule.on_failure_times.unwrap_or(1).max(1); let exact = schedule.on_failure_exact.unwrap_or(false); if times > 1 || exact { - let past_jobs = sqlx::query_as::<_, CompletedJobSubset>( - "SELECT success, result, started_at FROM completed_job WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4 ORDER BY created_at DESC LIMIT $5", + let past_jobs = sqlx::query!( + "SELECT + success AS \"success!\", + result AS \"result: Json>\", + started_at AS \"started_at!\" + FROM v2_completed_job + WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4 + ORDER BY created_at DESC + LIMIT $5", + &schedule.workspace_id, + &schedule.path, + script_path, + job_id, + if exact { times } else { times - 1 } as i64 ) - .bind(&schedule.workspace_id) - .bind(&schedule.path) - .bind(script_path) - .bind(job_id) - .bind(if exact { times } else { times - 1 } as i64,) - .fetch_all(db).await?; + .fetch_all(db) + .await?; let match_times = if exact { past_jobs.len() == times as usize @@ -1444,15 +1391,22 @@ async fn apply_schedule_handlers<'a, 'c, T: Serialize + Send + Sync>( if let Some(ref on_recovery_path) = schedule.on_recovery.clone() { let tx = db.begin().await?; let times = schedule.on_recovery_times.unwrap_or(1).max(1); - let past_jobs = sqlx::query_as::<_, CompletedJobSubset>( - "SELECT success, result, started_at FROM completed_job WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4 ORDER BY created_at DESC LIMIT $5", + let past_jobs = sqlx::query!( + "SELECT + success AS \"success!\", + result AS \"result: Json>\", + started_at AS \"started_at!\"\ + FROM v2_completed_job WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4 + ORDER BY created_at DESC + LIMIT $5", + &schedule.workspace_id, + &schedule.path, + script_path, + job_id, + times as i64 ) - .bind(&schedule.workspace_id) - .bind(&schedule.path) - .bind(script_path) - .bind(job_id) - .bind(times as i64) - .fetch_all(db).await?; + .fetch_all(db) + .await?; if past_jobs.len() < times as usize { return Ok(()); @@ -1464,7 +1418,7 @@ async fn apply_schedule_handlers<'a, 'c, T: Serialize + Send + Sync>( return Ok(()); } - let failed_job = past_jobs[past_jobs.len() - 1].clone(); + let failed_job = &past_jobs[past_jobs.len() - 1]; if !failed_job.success { handle_recovered_schedule( @@ -1476,7 +1430,8 @@ async fn apply_schedule_handlers<'a, 'c, T: Serialize + Send + Sync>( schedule.is_flow, w_id, &on_recovery_path, - failed_job, + failed_job.result.as_ref().map(AsRef::as_ref), + failed_job.started_at, result, times, started_at, @@ -1622,7 +1577,8 @@ async fn handle_recovered_schedule<'a, 'c, T: Serialize + Send + Sync>( is_flow: bool, w_id: &str, on_recovery_path: &str, - error_job: CompletedJobSubset, + result: Option<&Box>, + started_at: DateTime, successful_job_result: Json<&'a T>, successful_times: i32, successful_job_started_at: DateTime, @@ -1632,10 +1588,7 @@ async fn handle_recovered_schedule<'a, 'c, T: Serialize + Send + Sync>( get_payload_tag_from_prefixed_path(on_recovery_path, db, w_id).await?; let mut extra = HashMap::new(); - extra.insert( - "error_started_at".to_string(), - to_raw_value(&error_job.started_at), - ); + extra.insert("error_started_at".to_string(), to_raw_value(&started_at)); extra.insert("schedule_path".to_string(), to_raw_value(&schedule_path)); extra.insert("path".to_string(), to_raw_value(&script_path)); extra.insert("is_flow".to_string(), to_raw_value(&is_flow)); @@ -1661,9 +1614,8 @@ async fn handle_recovered_schedule<'a, 'c, T: Serialize + Send + Sync>( } } - let args = error_job - .result - .and_then(|x| serde_json::from_str::>>(x.0.get()).ok()) + let args = result + .and_then(|x| serde_json::from_str::>>(x.get()).ok()) .unwrap_or_else(HashMap::new); let (email, permissioned_as) = if let Some(on_behalf_of) = on_behalf_of.as_ref() { @@ -1878,7 +1830,7 @@ pub async fn pull( ); sqlx::query_scalar!( - "SELECT null FROM queue WHERE id = $1 FOR UPDATE", + "SELECT null FROM v2_job_queue WHERE id = $1 FOR UPDATE", pulled_job.id ) .fetch_one(&mut *tx) @@ -1919,7 +1871,7 @@ pub async fn pull( let min_started_at = sqlx::query!( "SELECT COALESCE((SELECT MIN(started_at) as min_started_at - FROM queue + FROM v2_queue WHERE script_path = $1 AND job_kind != 'dependencies' AND running = true AND workspace_id = $2 AND canceled = false AND concurrent_limit > 0), $3) as min_started_at, now() AS now", job_script_path, &pulled_job.workspace_id, @@ -1966,7 +1918,7 @@ pub async fn pull( let job_uuid: Uuid = pulled_job.id; let avg_script_duration: Option = sqlx::query_scalar!( "SELECT CAST(ROUND(AVG(duration_ms), 0) AS BIGINT) AS avg_duration_s FROM - (SELECT duration_ms FROM concurrency_key LEFT JOIN completed_job ON completed_job.id = concurrency_key.job_id WHERE key = $1 AND ended_at IS NOT NULL + (SELECT duration_ms FROM concurrency_key LEFT JOIN v2_completed_job ON v2_completed_job.id = concurrency_key.job_id WHERE key = $1 AND ended_at IS NOT NULL ORDER BY ended_at DESC LIMIT 10) AS t", job_concurrency_key @@ -1998,7 +1950,7 @@ pub async fn pull( loop { let nestimated = estimated_next_schedule_timestamp + inc; let jobs_in_window = sqlx::query_scalar!( - "SELECT COUNT(*) FROM queue LEFT JOIN concurrency_key ON concurrency_key.job_id = queue.id + "SELECT COUNT(*) FROM v2_queue LEFT JOIN concurrency_key ON concurrency_key.job_id = v2_queue.id WHERE key = $1 AND running = false AND canceled = false AND scheduled_for >= $2 AND scheduled_for < $3", job_concurrency_key, estimated_next_schedule_timestamp, @@ -2022,7 +1974,7 @@ pub async fn pull( // if using posgtres, then we're able to re-queue the entire batch of scheduled job for this script_path, so we do it sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET running = false , started_at = null , scheduled_for = $1 @@ -2193,17 +2145,6 @@ fn fullpath_with_workspace( ) } -#[derive(FromRow)] -pub struct ResultR { - result: Option>>, -} - -#[derive(FromRow)] -pub struct ResultWithId { - result: Option>>, - id: Uuid, -} - pub async fn get_result_by_id( db: Pool, w_id: String, @@ -2222,25 +2163,29 @@ pub async fn get_result_by_id( { Ok(res) => Ok(res), Err(_) => { - let running_flow_job =sqlx::query_as::<_, QueuedJob>( - "SELECT * FROM queue WHERE COALESCE((SELECT root_job FROM queue WHERE id = $1), $1) = id AND workspace_id = $2" - ).bind(flow_id) - .bind(&w_id) - .fetch_optional(&db).await?; - match running_flow_job { - Some(job) => { - let restarted_from = windmill_common::utils::not_found_if_none( - job.parse_flow_status() - .map(|status| status.restarted_from) - .flatten(), + let root = sqlx::query!( + "SELECT + id As \"id!\", + flow_status->'restarted_from'->'flow_job_id' AS \"restarted_from: Json\" + FROM v2_queue + WHERE COALESCE((SELECT root_job FROM v2_queue WHERE id = $1), $1) = id AND workspace_id = $2", + flow_id, + &w_id + ) + .fetch_optional(&db) + .await?; + match root { + Some(root) => { + let restarted_from_id = not_found_if_none( + root.restarted_from, "Id not found in the result's mapping of the root job and root job had no restarted from information", - format!("parent: {}, root: {}, id: {}", flow_id, job.id, node_id), + format!("parent: {}, root: {}, id: {}", flow_id, root.id, node_id), )?; get_result_by_id_from_original_flow( &db, w_id.as_str(), - &restarted_from.flow_job_id, + &restarted_from_id, node_id.as_str(), json_path.clone(), ) @@ -2261,12 +2206,6 @@ pub async fn get_result_by_id( } } -#[derive(FromRow)] -struct FlowJobResult { - leaf_jobs: Option>>, - parent_job: Option, -} - pub async fn get_result_and_success_by_id_from_flow( db: &Pool, w_id: &str, @@ -2307,7 +2246,8 @@ pub async fn get_result_and_success_by_id_from_flow( let success = match &job_result { JobResult::SingleJob(job_id) => { sqlx::query_scalar!( - "SELECT success FROM completed_job WHERE id = $1 AND workspace_id = $2", + "SELECT success AS \"success!\" + FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", job_id, w_id ) @@ -2324,7 +2264,11 @@ pub async fn get_result_and_success_by_id_from_flow( SELECT module->>'type' = 'Success' FROM modules WHERE module->>'id' = $3"#, - if completed { "completed_job" } else { "queue" } + if completed { + "v2_completed_job" + } else { + "v2_queue" + } ); sqlx::query_scalar(&query) .bind(flow_id) @@ -2363,11 +2307,14 @@ pub async fn get_result_by_id_from_running_flow_inner( flow_id: &Uuid, node_id: &str, ) -> error::Result { - let flow_job_result = sqlx::query_as::<_, FlowJobResult>( - "SELECT leaf_jobs->$1::text as leaf_jobs, parent_job FROM queue WHERE COALESCE((SELECT root_job FROM queue WHERE id = $2), $2) = id AND workspace_id = $3") - .bind(node_id) - .bind(flow_id) - .bind(w_id) + let flow_job_result = sqlx::query!( + "SELECT leaf_jobs->$1::text AS \"leaf_jobs: Json>\", parent_job + FROM v2_queue + WHERE COALESCE((SELECT root_job FROM v2_queue WHERE id = $2), $2) = id AND workspace_id = $3", + node_id, + flow_id, + w_id, + ) .fetch_optional(db) .await?; @@ -2384,11 +2331,12 @@ pub async fn get_result_by_id_from_running_flow_inner( if job_result.is_none() && flow_job_result.parent_job.is_some() { let parent_job = flow_job_result.parent_job.unwrap(); - let root_job = sqlx::query_scalar!("SELECT root_job FROM queue WHERE id = $1", parent_job) - .fetch_optional(db) - .await? - .flatten() - .unwrap_or(parent_job); + let root_job = + sqlx::query_scalar!("SELECT root_job FROM v2_queue WHERE id = $1", parent_job) + .fetch_optional(db) + .await? + .flatten() + .unwrap_or(parent_job); return get_result_by_id_from_running_flow_inner(db, w_id, &root_job, node_id).await; } @@ -2401,18 +2349,13 @@ pub async fn get_result_by_id_from_running_flow_inner( Ok(result_id) } -#[async_recursion] async fn get_completed_flow_node_result_rec( db: &Pool, w_id: &str, - subflows: Vec, + subflows: impl std::iter::Iterator, node_id: &str, ) -> error::Result> { - for subflow in subflows { - let flow_status = subflow.parse_flow_status().ok_or_else(|| { - error::Error::InternalErr(format!("Could not parse flow status of {}", subflow.id)) - })?; - + for (id, flow_status) in subflows { if let Some(node_status) = flow_status .modules .iter() @@ -2423,15 +2366,27 @@ async fn get_completed_flow_node_result_rec( (Some(_), Some(jobs)) => Ok(Some(JobResult::ListJob(jobs))), _ => Err(error::Error::NotFound(format!( "Flow result by id not found going top-down in subflows (currently: {}), (id: {})", - subflow.id, + id, node_id, ))), }; } else { - let subflows = sqlx::query_as::<_, CompletedJob>( - "SELECT *, null as labels FROM completed_job WHERE parent_job = $1 AND workspace_id = $2 AND flow_status IS NOT NULL", - ).bind(subflow.id).bind(w_id).fetch_all(db).await?; - match get_completed_flow_node_result_rec(db, w_id, subflows, node_id).await? { + let subflows = sqlx::query!( + "SELECT id AS \"id!\", flow_status AS \"flow_status!: Json\" + FROM v2_completed_job + WHERE parent_job = $1 AND workspace_id = $2 AND flow_status IS NOT NULL", + id, + w_id + ) + .map(|record| (record.id, record.flow_status.0)) + .fetch_all(db) + .await? + .into_iter(); + match Box::pin(get_completed_flow_node_result_rec( + db, w_id, subflows, node_id, + )) + .await? + { Some(res) => return Ok(Some(res)), None => continue, }; @@ -2447,23 +2402,25 @@ async fn get_result_by_id_from_original_flow_inner( completed_flow_id: &Uuid, node_id: &str, ) -> error::Result { - let flow_job = sqlx::query_as::<_, CompletedJob>( - "SELECT *, null as labels FROM completed_job WHERE id = $1 AND workspace_id = $2", + let flow_job = sqlx::query!( + "SELECT id AS \"id!\", flow_status AS \"flow_status!: Json\" + FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + completed_flow_id, + w_id ) - .bind(completed_flow_id) - .bind(w_id) + .map(|record| (record.id, record.flow_status.0)) .fetch_optional(db) .await?; - let flow_job = windmill_common::utils::not_found_if_none( + let flow_job = not_found_if_none( flow_job, "Root completed job", format!("root: {}, id: {}", completed_flow_id, node_id), )?; - match get_completed_flow_node_result_rec(db, w_id, vec![flow_job], node_id).await? { + match get_completed_flow_node_result_rec(db, w_id, [flow_job].into_iter(), node_id).await? { Some(res) => Ok(res), - None => Err(error::Error::NotFound(format!( + None => Err(Error::NotFound(format!( "Flow result by id not found going top-down from {}, (id: {})", completed_flow_id, node_id ))), @@ -2499,26 +2456,26 @@ async fn extract_result_from_job_result( let Some(job_id) = job_ids.get(idx).cloned() else { return Ok(to_raw_value(&serde_json::Value::Null)); }; - Ok(sqlx::query_as::<_, ResultR>( - "SELECT result #> $3 as result FROM completed_job WHERE id = $1 AND workspace_id = $2", - ) - .bind(job_id) - .bind(w_id) - .bind( - parts.map(|x| x.to_string()).collect::>() + Ok(sqlx::query_scalar!( + "SELECT result #> $3 AS \"result: Json>\" + FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + job_id, + w_id, + parts.collect::>() as Vec<&str> ) .fetch_optional(db) .await? - .map(|r| r.result.map(|x| x.0)) .flatten() + .map(|x| x.0) .unwrap_or_else(|| to_raw_value(&serde_json::Value::Null))) } None => { - let rows = sqlx::query_as::<_, ResultWithId>( - "SELECT id, result FROM completed_job WHERE id = ANY($1) AND workspace_id = $2", + let rows = sqlx::query!( + "SELECT id AS \"id!\", result AS \"result: Json>\" + FROM v2_completed_job WHERE id = ANY($1) AND workspace_id = $2", + job_ids.as_slice(), + w_id ) - .bind(job_ids.as_slice()) - .bind(w_id) .fetch_all(db) .await? .into_iter() @@ -2535,15 +2492,15 @@ async fn extract_result_from_job_result( Ok(to_raw_value(&result)) } }, - JobResult::SingleJob(x) => Ok(sqlx::query_as::<_, ResultR>( - "SELECT result #> $3 as result FROM completed_job WHERE id = $1 AND workspace_id = $2", - ) - .bind(x) - .bind(w_id) - .bind( + JobResult::SingleJob(x) => Ok(sqlx::query!( + "SELECT result #> $3 AS \"result: Json>\" + FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + x, + w_id, json_path - .map(|x| x.split(".").map(|x| x.to_string()).collect::>()) - .unwrap_or_default(), + .as_ref() + .map(|x| x.split(".").collect::>()) + .unwrap_or_default() as Vec<&str>, ) .fetch_optional(db) .await? @@ -2564,7 +2521,7 @@ pub async fn delete_job<'c>( } let job_removed = sqlx::query_scalar!( - "DELETE FROM queue WHERE workspace_id = $1 AND id = $2 RETURNING 1", + "DELETE FROM v2_queue WHERE workspace_id = $1 AND id = $2 RETURNING 1", w_id, job_id ) @@ -2588,7 +2545,7 @@ pub async fn delete_job<'c>( pub async fn job_is_complete(db: &DB, id: Uuid, w_id: &str) -> error::Result { Ok(sqlx::query_scalar!( - "SELECT EXISTS(SELECT 1 FROM completed_job WHERE id = $1 AND workspace_id = $2)", + "SELECT EXISTS(SELECT 1 FROM v2_completed_job WHERE id = $1 AND workspace_id = $2)", id, w_id ) @@ -2604,7 +2561,7 @@ async fn get_queued_job_tx<'c>( ) -> error::Result> { sqlx::query_as::<_, QueuedJob>( "SELECT * - FROM queue WHERE id = $1 AND workspace_id = $2", + FROM v2_queue WHERE id = $1 AND workspace_id = $2", ) .bind(id) .bind(w_id) @@ -2616,7 +2573,7 @@ async fn get_queued_job_tx<'c>( pub async fn get_queued_job(id: &Uuid, w_id: &str, db: &DB) -> error::Result> { sqlx::query_as::<_, QueuedJob>( "SELECT * - FROM queue WHERE id = $1 AND workspace_id = $2", + FROM v2_queue WHERE id = $1 AND workspace_id = $2", ) .bind(id) .bind(w_id) @@ -2729,6 +2686,181 @@ lazy_static::lazy_static! { pub static ref RE_ARG_TAG: Regex = Regex::new(r#"\$args\[(\w+)\]"#).unwrap(); } +pub struct RawJob<'a> { + pub created_by: &'a str, + pub permissioned_as: &'a str, + pub permissioned_as_email: &'a str, + pub kind: JobKind, + pub runnable_id: Option, + pub runnable_path: Option<&'a str>, + pub parent_job: Option, + pub script_lang: Option, + pub flow_step_id: Option<&'a str>, + pub flow_root_job: Option, + pub schedule_path: Option<&'a str>, + pub tag: &'a str, + pub same_worker: bool, + pub visible_to_owner: bool, + pub concurrent_limit: Option, + pub concurrency_time_window_s: Option, + pub cache_ttl: Option, + pub timeout: Option, + pub priority: Option, + pub pre_run_error: Option<&'a str>, + pub raw_code: Option<&'a str>, + pub raw_lock: Option<&'a str>, + pub raw_flow: Option<&'a FlowValue>, + pub flow_status: Option<&'a FlowStatus>, + pub scheduled_for: Option>, + pub running: bool, +} + +impl<'a> RawJob<'a> { + pub const fn default() -> Self { + Self { + created_by: "missing", + permissioned_as: "g/all", + permissioned_as_email: "missing@email.xyz", + kind: JobKind::Script, + runnable_id: None, + runnable_path: None, + parent_job: None, + script_lang: None, + flow_step_id: None, + flow_root_job: None, + schedule_path: None, + tag: "other", + same_worker: false, + visible_to_owner: false, + concurrent_limit: None, + concurrency_time_window_s: None, + cache_ttl: None, + timeout: None, + priority: None, + pre_run_error: None, + raw_code: None, + raw_lock: None, + raw_flow: None, + flow_status: None, + scheduled_for: None, + running: false, + } + } + + pub async fn push<'c>( + &self, + tx: sqlx::PgTransaction<'c>, + id: Uuid, + workspace_id: &str, + args: Json>, + ) -> error::Result> { + self.push_many(tx, &[id], workspace_id, &[args]).await + } + + pub async fn push_many<'c>( + &self, + mut tx: sqlx::PgTransaction<'c>, + id: &[Uuid], + workspace_id: &str, + args: &[Json>], + ) -> error::Result> { + if id.len() > args.len() { + return Err(Error::InternalErr( + "args must be at least as long as id".to_string(), + )); + } + let created_at = Utc::now(); + sqlx::query!( + "INSERT INTO v2_job ( + id, workspace_id, + created_at, created_by, permissioned_as, permissioned_as_email, + kind, runnable_id, runnable_path, parent_job, script_lang, + flow_step_id, flow_root_job, + schedule_path, + tag, same_worker, visible_to_owner, concurrent_limit, concurrency_time_window_s, + cache_ttl, timeout, priority, + args, pre_run_error, + raw_code, raw_lock, raw_flow + ) SELECT + unnest($1::uuid[]), $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, + $16, $17, $18, $19, $20, $21, $22, unnest($23::jsonb[]), $24, $25, $26, $27 + ", + id, + workspace_id, + created_at, + self.created_by, + self.permissioned_as, + self.permissioned_as_email, + self.kind as JobKind, + self.runnable_id, + self.runnable_path, + self.parent_job, + self.script_lang as Option, + self.flow_step_id, + self.flow_root_job, + self.schedule_path, + self.tag, + self.same_worker, + self.visible_to_owner, + self.concurrent_limit, + self.concurrency_time_window_s, + self.cache_ttl, + self.timeout, + self.priority, + args as &[Json>], + self.pre_run_error, + self.raw_code, + self.raw_lock, + self.raw_flow.map(Json) as Option>, + ) + .execute(&mut *tx) + .warn_after_seconds(1) + .await?; + sqlx::query!( + "INSERT INTO v2_job_queue ( + id, workspace_id, started_at, scheduled_for, running, created_at, tag, priority + ) SELECT unnest($1::uuid[]), $2, $3, $4, $5, $6, $7, $8", + id, + workspace_id, + self.running.then(|| created_at), + self.scheduled_for.unwrap_or(created_at), + self.running, + created_at, + self.tag, + self.priority, + ) + .execute(&mut *tx) + .warn_after_seconds(1) + .await?; + sqlx::query!( + "INSERT INTO v2_job_runtime (id) SELECT unnest($1::uuid[])", + id + ) + .execute(&mut *tx) + .warn_after_seconds(1) + .await?; + if let Some(flow_status) = self.flow_status { + sqlx::query!( + "INSERT INTO v2_job_flow_runtime (id, flow_status) SELECT unnest($1::uuid[]), $2", + id, + Json(flow_status) as Json<&FlowStatus>, + ) + .execute(&mut *tx) + .warn_after_seconds(1) + .await?; + } + + #[cfg(feature = "prometheus")] + if METRICS_ENABLED.load(std::sync::atomic::Ordering::Relaxed) { + // TODO: technically the job isn't queued yet, as the transaction can be rolled back. + // Should be solved when moving these metrics to the queue abstraction. + QUEUE_PUSH_COUNT.inc_by(id.len() as u64); + } + + Ok(tx) + } +} + // #[instrument(level = "trace", skip_all)] pub async fn push<'c, 'd>( _db: &Pool, @@ -2744,7 +2876,7 @@ pub async fn push<'c, 'd>( parent_job: Option, root_job: Option, job_id: Option, - is_flow_step: bool, + _is_flow_step: bool, mut same_worker: bool, pre_run_error: Option<&windmill_common::error::Error>, visible_to_owner: bool, @@ -2843,11 +2975,13 @@ pub async fn push<'c, 'd>( ))); } - let in_queue = - sqlx::query_scalar!("SELECT COUNT(id) FROM queue WHERE email = $1", email) - .fetch_one(_db) - .await? - .unwrap_or(0); + let in_queue = sqlx::query_scalar!( + "SELECT COUNT(id) FROM v2_queue WHERE email = $1", + email + ) + .fetch_one(_db) + .await? + .unwrap_or(0); if in_queue > MAX_FREE_EXECS.into() { return Err(error::Error::QuotaExceeded(format!( @@ -2856,7 +2990,7 @@ pub async fn push<'c, 'd>( } let concurrent_runs = sqlx::query_scalar!( - "SELECT COUNT(id) FROM queue WHERE running = true AND email = $1", + "SELECT COUNT(id) FROM v2_queue WHERE running = true AND email = $1", email ) .fetch_one(_db) @@ -2898,7 +3032,7 @@ pub async fn push<'c, 'd>( } let in_queue_workspace = sqlx::query_scalar!( - "SELECT COUNT(id) FROM queue WHERE workspace_id = $1", + "SELECT COUNT(id) FROM v2_queue WHERE workspace_id = $1", workspace_id ) .fetch_one(_db) @@ -2912,7 +3046,7 @@ pub async fn push<'c, 'd>( } let concurrent_runs_workspace = sqlx::query_scalar!( - "SELECT COUNT(id) FROM queue WHERE running = true AND workspace_id = $1", + "SELECT COUNT(id) FROM v2_queue WHERE running = true AND workspace_id = $1", workspace_id ) .fetch_one(_db) @@ -3203,41 +3337,20 @@ pub async fn push<'c, 'd>( JobPayload::RawFlow { mut value, path, restarted_from } => { add_virtual_items_if_necessary(&mut value.modules); - let flow_status: FlowStatus = match restarted_from { - Some(restarted_from_val) => { - let (_, _, _, step_n, truncated_modules, user_states, cleanup_module) = - restarted_flows_resolution( - _db, - workspace_id, - restarted_from_val.flow_job_id, - restarted_from_val.step_id.as_str(), - restarted_from_val.branch_or_iteration_n, - ) - .await?; - FlowStatus { - step: step_n, - modules: truncated_modules, - // failure_module is reset - failure_module: Box::new(FlowStatusModuleWParent { - parent_module: None, - module_status: FlowStatusModule::WaitingForPriorSteps { - id: "failure".to_string(), - }, - }), - cleanup_module, - // retry status is reset - retry: RetryStatus { fail_count: 0, failed_jobs: vec![] }, - // TODO: for now, flows with approval conditions aren't supported for restart - approval_conditions: None, - restarted_from: Some(RestartedFrom { - flow_job_id: restarted_from_val.flow_job_id, - step_id: restarted_from_val.step_id, - branch_or_iteration_n: restarted_from_val.branch_or_iteration_n, - }), - user_states, - preprocessor_module: None, - } - } + let flow_status = match restarted_from { + Some(restarted_from) => restarted_flows_resolution( + workspace_id, + &value, + sqlx::query_scalar!( + "SELECT flow_status AS \"flow_status: Json>\" + FROM v2_job_completed WHERE id = $1", + restarted_from.flow_job_id, + ) + .fetch_optional(_db) + .await? + .flatten(), + restarted_from, + )?, _ => { value.preprocessor_module = None; FlowStatus::new(&value) @@ -3416,45 +3529,26 @@ pub async fn push<'c, 'd>( ) } JobPayload::RestartedFlow { completed_job_id, step_id, branch_or_iteration_n } => { - let ( - version, - flow_path, - flow_data, - step_n, - truncated_modules, - user_states, - cleanup_module, - ) = restarted_flows_resolution( - _db, - workspace_id, - completed_job_id, - step_id.as_str(), - branch_or_iteration_n, + let (job_kind, runnable_id, runnable_path, flow_status) = sqlx::query!( + "SELECT kind AS \"kind: JobKind\", runnable_id, runnable_path, + flow_status AS \"flow_status: Json>\" + FROM v2_job JOIN v2_job_completed USING (id) + WHERE id = $1", + completed_job_id ) - .await?; - let restarted_flow_status = FlowStatus { - step: step_n, - modules: truncated_modules, - // failure_module is reset - failure_module: Box::new(FlowStatusModuleWParent { - parent_module: None, - module_status: FlowStatusModule::WaitingForPriorSteps { - id: "failure".to_string(), - }, - }), - cleanup_module, - // retry status is reset - retry: RetryStatus { fail_count: 0, failed_jobs: vec![] }, - // TODO: for now, flows with approval conditions aren't supported for restart - approval_conditions: None, - restarted_from: Some(RestartedFrom { - flow_job_id: completed_job_id, - step_id, - branch_or_iteration_n, - }), - user_states, - preprocessor_module: None, - }; + .map(|r| (r.kind, r.runnable_id, r.runnable_path, r.flow_status)) + .fetch_optional(_db) + .await? + .ok_or_else(|| { + Error::InternalErr(format!("{:?}: completed job not found", completed_job_id)) + })?; + let flow_data = cache::job::fetch_flow(_db, job_kind, runnable_id, None).await?; + let flow_status = restarted_flows_resolution( + workspace_id, + flow_data.value(), + flow_status, + RestartedFrom { flow_job_id: completed_job_id, step_id, branch_or_iteration_n }, + )?; let value = flow_data.value(); let priority = value.priority; let concurrency_key = value.concurrency_key.clone(); @@ -3463,19 +3557,19 @@ pub async fn push<'c, 'd>( let cache_ttl = value.cache_ttl.map(|x| x as i32); // Keep inserting `value` if not all workers are updated. // Starting at `v1.440`, the value is fetched on pull from the version id. - let value_o = if version.is_none() || !*MIN_VERSION_IS_AT_LEAST_1_440.read().await { + let value_o = if runnable_id.is_none() || !*MIN_VERSION_IS_AT_LEAST_1_440.read().await { Some(value.clone()) } else { // `raw_flow` is fetched on pull. None }; ( - version, - flow_path, + runnable_id, + runnable_path, None, JobKind::Flow, value_o, - Some(restarted_flow_status), + Some(flow_status), None, concurrency_key, concurrent_limit, @@ -3644,12 +3738,9 @@ pub async fn push<'c, 'd>( let mut tx = tx.into_tx().await?; let job_id: Uuid = if let Some(job_id) = job_id { - let conflicting_id = sqlx::query_scalar!( - "SELECT 1 FROM queue WHERE id = $1 UNION ALL select 1 FROM completed_job WHERE id = $1", - job_id - ) - .fetch_optional(&mut *tx) - .await?; + let conflicting_id = sqlx::query_scalar!("SELECT 1 FROM v2_job WHERE id = $1", job_id) + .fetch_optional(&mut *tx) + .await?; if conflicting_id.is_some() { return Err(Error::BadRequest(format!( @@ -3690,80 +3781,52 @@ pub async fn push<'c, 'd>( None }; - let raw_flow = raw_flow.map(Json); - - sqlx::query!( - "INSERT INTO job (id, workspace_id, raw_code, raw_lock, raw_flow, tag) - VALUES ($1, $2, $3, $4, $5, $6)", - job_id, - workspace_id, - raw_code, - raw_lock, - raw_flow.as_ref() as Option<&Json>, - tag, - ) - .execute(&mut *tx) - .warn_after_seconds(1) - .await?; - - let (raw_code, raw_lock, raw_flow) = if !*MIN_VERSION_IS_AT_LEAST_1_427.read().await { - (raw_code, raw_lock, raw_flow) - } else { - (None, None, None) - }; - - tracing::debug!("Pushing job {job_id} with tag {tag}, schedule_path {schedule_path:?}, script_path: {script_path:?}, email {email}, workspace_id {workspace_id}"); - let uuid = sqlx::query_scalar!( - "INSERT INTO queue - (workspace_id, id, running, parent_job, created_by, permissioned_as, scheduled_for, - script_hash, script_path, raw_code, raw_lock, args, job_kind, schedule_path, raw_flow, \ - flow_status, is_flow_step, language, started_at, same_worker, pre_run_error, email, \ - visible_to_owner, root_job, tag, concurrent_limit, concurrency_time_window_s, timeout, \ - flow_step_id, cache_ttl, priority, last_ping) - VALUES ($1, $2, $3, $4, $5, $6, COALESCE($7, now()), $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, CASE WHEN $3 THEN now() END, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, NULL) \ - RETURNING id", - workspace_id, - job_id, - is_running, + let pre_run_error = pre_run_error.map(Error::to_string); + let args = serde_json::value::to_raw_value(&args).map_err(|e| { + Error::InternalErr(format!("Could not serialize args for job {job_id}: {e:#}")) + })?; + tracing::debug!( + "Pushing job {job_id} with tag {tag}, schedule_path {schedule_path:?}, \ + script_path: {script_path:?}, email {email}, workspace_id {workspace_id}" + ); + tx = RawJob { + created_by: &user, + permissioned_as: &permissioned_as, + permissioned_as_email: &email, + kind: job_kind, + runnable_id: script_hash, + runnable_path: script_path.as_deref(), parent_job, - user, - permissioned_as, - scheduled_for_o, - script_hash, - script_path.clone(), - raw_code, - raw_lock, - Json(args) as Json, - job_kind.clone() as JobKind, - schedule_path, - raw_flow.as_ref() as Option<&Json>, - flow_status.map(Json) as Option>, - is_flow_step, - language as Option, + script_lang: language, + flow_step_id: flow_step_id.as_deref(), + flow_root_job: root_job, + schedule_path: schedule_path.as_deref(), + tag: &tag, same_worker, - pre_run_error.map(|e| e.to_string()), - email, visible_to_owner, - root_job, - tag, concurrent_limit, - if concurrent_limit.is_some() { concurrency_time_window_s } else { None }, - custom_timeout, - flow_step_id, + concurrency_time_window_s, cache_ttl, - final_priority, - ) - .fetch_one(&mut *tx) - .warn_after_seconds(1) + timeout: custom_timeout, + priority: final_priority, + pre_run_error: pre_run_error.as_deref(), + raw_code: raw_code.as_deref(), + raw_lock: raw_lock.as_deref(), + raw_flow: raw_flow.as_ref(), + flow_status: flow_status.as_ref(), + scheduled_for: scheduled_for_o, + running: is_running, + } + .push(tx, job_id, workspace_id, Json(args)) .await - .map_err(|e| Error::InternalErr(format!("Could not insert into queue {job_id} with tag {tag}, schedule_path {schedule_path:?}, script_path: {script_path:?}, email {email}, workspace_id {workspace_id}: {e:#}")))?; - + .map_err(|e| { + Error::InternalErr(format!( + "Could not insert into queue {job_id} with tag {tag}, \ + schedule_path {schedule_path:?}, script_path: {script_path:?}, \ + email {email}, workspace_id {workspace_id}: {e:#}" + )) + })?; tracing::debug!("Pushed {job_id}"); - // TODO: technically the job isn't queued yet, as the transaction can be rolled back. Should be solved when moving these metrics to the queue abstraction. - #[cfg(feature = "prometheus")] - if METRICS_ENABLED.load(std::sync::atomic::Ordering::Relaxed) { - QUEUE_PUSH_COUNT.inc(); - } if JOB_TOKEN.is_none() { let job_authed = match authed { @@ -3873,7 +3936,7 @@ pub async fn push<'c, 'd>( .await?; } - Ok((uuid, tx)) + Ok((job_id, tx)) } pub fn canceled_job_to_result(job: &QueuedJob) -> serde_json::Value { @@ -3885,50 +3948,19 @@ pub fn canceled_job_to_result(job: &QueuedJob) -> serde_json::Value { serde_json::json!({"message": format!("Job canceled: {reason} by {canceler}"), "name": "Canceled", "reason": reason, "canceler": canceler}) } -async fn restarted_flows_resolution( - db: &Pool, +fn restarted_flows_resolution( workspace_id: &str, - completed_flow_id: Uuid, - restart_step_id: &str, - branch_or_iteration_n: Option, -) -> Result< - ( - Option, - Option, - Arc, - i32, - Vec, - HashMap, - FlowCleanupModule, - ), - Error, -> { - let row = sqlx::query!( - "SELECT - script_path, script_hash AS \"script_hash: ScriptHash\", - job_kind AS \"job_kind: JobKind\", - flow_status AS \"flow_status: Json>\", - raw_flow AS \"raw_flow: Json>\" - FROM completed_job WHERE id = $1 and workspace_id = $2", - completed_flow_id, - workspace_id, - ) - .fetch_one(db) // TODO: should we try to use the passed-in `tx` here? - .await - .map_err(|err| { - Error::InternalErr(format!( - "completed job not found for UUID {} in workspace {}: {}", - completed_flow_id, workspace_id, err - )) - })?; - - let flow_data = cache::job::fetch_flow(db, row.job_kind, row.script_hash) - .or_else(|_| cache::job::fetch_preview_flow(db, &completed_flow_id, row.raw_flow)) - .await?; - let flow_value = flow_data.value(); - let flow_status = row - .flow_status - .as_ref() + flow_value: &FlowValue, + flow_status: Option>>, + restart_from: RestartedFrom, +) -> error::Result { + let RestartedFrom { + flow_job_id: completed_flow_id, + step_id: restart_step_id, + branch_or_iteration_n, + } = &restart_from; + + let mut flow_status = flow_status .and_then(|v| serde_json::from_str::(v.get()).ok()) .ok_or(Error::InternalErr(format!( "Unable to parse flow status for job {} in workspace {}", @@ -3939,20 +3971,21 @@ async fn restarted_flows_resolution( let mut dependent_module = false; let mut truncated_modules: Vec = vec![]; for module in flow_status.modules { + let id = module.id(); let Some(module_definition) = flow_value .modules .iter() - .find(|flow_value_module| flow_value_module.id == module.id()) + .find(|flow_value_module| &flow_value_module.id == &id) else { // skip module as it doesn't appear in the flow_value anymore continue; }; - if module.id() == restart_step_id { + if &id == restart_step_id { // if the module ID is the one we want to restart the flow at, or if it's past it in the flow, // set the module as WaitingForPriorSteps as it needs to be re-run if branch_or_iteration_n.is_none() || branch_or_iteration_n.unwrap() == 0 { // The module as WaitingForPriorSteps as the entire module (i.e. all the branches) need to be re-run - truncated_modules.push(FlowStatusModule::WaitingForPriorSteps { id: module.id() }); + truncated_modules.push(FlowStatusModule::WaitingForPriorSteps { id }); } else { // expect a module to be either a branchall (resp. loop), and resume the flow from this branch (resp. iteration) let branch_or_iteration_n = branch_or_iteration_n.unwrap(); @@ -3981,7 +4014,7 @@ async fn restarted_flows_resolution( new_flow_jobs_success.truncate(branch_or_iteration_n); } truncated_modules.push(FlowStatusModule::InProgress { - id: module.id(), + id, job: new_flow_jobs[new_flow_jobs.len() - 1], // set to last finished job from completed flow iterator: None, flow_jobs: Some(new_flow_jobs), @@ -4019,7 +4052,7 @@ async fn restarted_flows_resolution( new_flow_jobs_success.truncate(branch_or_iteration_n); } truncated_modules.push(FlowStatusModule::InProgress { - id: module.id(), + id, job: new_flow_jobs[new_flow_jobs.len() - 1], // set to last finished job from completed flow iterator: Some(Iterator { index: branch_or_iteration_n - 1, // same deal as above, this refers to the last finished job @@ -4045,7 +4078,7 @@ async fn restarted_flows_resolution( } dependent_module = true; } else if dependent_module { - truncated_modules.push(FlowStatusModule::WaitingForPriorSteps { id: module.id() }); + truncated_modules.push(FlowStatusModule::WaitingForPriorSteps { id }); } else { // else we simply "transfer" the module from the completed flow to the new one if it's a success step_n = step_n + 1; @@ -4066,13 +4099,19 @@ async fn restarted_flows_resolution( ))); } - Ok(( - row.script_hash.map(|x| x.0), - row.script_path, - flow_data, - step_n, - truncated_modules, - flow_status.user_states, - flow_status.cleanup_module, - )) + flow_status.modules = truncated_modules; + flow_status.step = step_n; + flow_status.restarted_from = Some(restart_from); + // failure_module is reset + flow_status.failure_module = Box::new(FlowStatusModuleWParent { + parent_module: None, + module_status: FlowStatusModule::WaitingForPriorSteps { id: "failure".to_string() }, + }); + // retry status is reset + flow_status.retry = RetryStatus { fail_count: 0, failed_jobs: vec![] }; + // no preprocessor module + flow_status.preprocessor_module = None; + // TODO: for now, flows with approval conditions aren't supported for restart + flow_status.approval_conditions = None; + Ok(flow_status) } diff --git a/backend/windmill-queue/src/schedule.rs b/backend/windmill-queue/src/schedule.rs index ea9f476ab6538..dfb8170df92c4 100644 --- a/backend/windmill-queue/src/schedule.rs +++ b/backend/windmill-queue/src/schedule.rs @@ -71,7 +71,7 @@ pub async fn push_scheduled_job<'c>( let next = next.with_timezone(&chrono::Utc); let already_exists: bool = query_scalar!( - "SELECT EXISTS (SELECT 1 FROM queue WHERE workspace_id = $1 AND schedule_path = $2 AND scheduled_for = $3)", + "SELECT EXISTS (SELECT 1 FROM v2_queue WHERE workspace_id = $1 AND schedule_path = $2 AND scheduled_for = $3)", &schedule.workspace_id, &schedule.path, next diff --git a/backend/windmill-worker/Cargo.toml b/backend/windmill-worker/Cargo.toml index 7bec64455070f..ca00a3185f5dd 100644 --- a/backend/windmill-worker/Cargo.toml +++ b/backend/windmill-worker/Cargo.toml @@ -102,6 +102,7 @@ object_store = { workspace = true, optional = true} convert_case.workspace = true yaml-rust.workspace = true backon.workspace = true +ulid.workspace = true opentelemetry = { workspace = true, optional = true } bollard = { workspace = true, optional = true } diff --git a/backend/windmill-worker/src/bench.rs b/backend/windmill-worker/src/bench.rs new file mode 100644 index 0000000000000..a22ff71ea79c0 --- /dev/null +++ b/backend/windmill-worker/src/bench.rs @@ -0,0 +1,226 @@ +use serde::Serialize; +use serde_json::json; +use tokio::time::Instant; +use windmill_common::{ + worker::{write_file, TMP_DIR}, + DB, +}; + +#[derive(Serialize)] +pub struct BenchmarkInfo { + #[serde(skip)] + pub start: Instant, + #[serde(skip)] + pub iters: u64, + timings: Vec, + pub iter_durations: Vec, + pub total_duration: Option, +} + +impl BenchmarkInfo { + pub fn new() -> Self { + BenchmarkInfo { + iters: 0, + timings: vec![], + start: Instant::now(), + iter_durations: vec![], + total_duration: None, + } + } + + pub fn add_iter(&mut self, bench: BenchmarkIter, inc_iters: bool) { + if inc_iters { + self.iters += 1; + } + let elapsed_total = bench.start.elapsed().as_nanos() as u64; + self.timings.push(bench); + self.iter_durations.push(elapsed_total); + } + + pub fn write_to_file(&mut self, path: &str) -> anyhow::Result<()> { + let total_duration = self.start.elapsed().as_millis() as u64; + self.total_duration = Some(total_duration as u64); + + println!( + "Writing benchmark {path}, duration of benchmark: {total_duration}s and RPS: {}", + self.iters as f64 / total_duration as f64 + ); + write_file(TMP_DIR, path, &serde_json::to_string(&self).unwrap()).expect("write profiling"); + Ok(()) + } +} + +#[derive(Serialize)] +pub struct BenchmarkIter { + #[serde(skip)] + pub start: Instant, + #[serde(skip)] + last_instant: Instant, + last_step: String, + timings: Vec<(String, u32)>, +} + +impl BenchmarkIter { + pub fn new() -> Self { + BenchmarkIter { + last_instant: Instant::now(), + timings: vec![], + start: Instant::now(), + last_step: String::new(), + } + } + + pub fn add_timing(&mut self, name: &str) { + let elapsed = self.last_instant.elapsed().as_nanos() as u32; + self.timings + .push((format!("{}->{}", self.last_step, name), elapsed)); + self.last_instant = Instant::now(); + self.last_step = name.to_string(); + } +} + +pub async fn benchmark_init(benchmark_jobs: usize, db: &DB) { + use std::iter; + + use windmill_common::{jobs::JobKind, scripts::ScriptLang}; + use windmill_queue::RawJob; + + let benchmark_kind = std::env::var("BENCHMARK_KIND").unwrap_or("noop".to_string()); + let uuids = Vec::from_iter(iter::repeat_with(|| ulid::Ulid::new().into()).take(benchmark_jobs)); + + if !uuids.is_empty() { + match benchmark_kind.as_str() { + "dedicated" => { + // you need to create the script first, check https://github.com/windmill-labs/windmill/blob/b76a92cfe454c686f005c65f534e29e039f3c706/benchmarks/lib.ts#L47 + let hash = sqlx::query_scalar!( + "SELECT hash FROM script WHERE path = $1 AND workspace_id = $2", + "f/benchmarks/dedicated", + "admins" + ) + .fetch_one(db) + .await + .unwrap_or_else(|_e| panic!("failed to insert dedicated jobs")); + RawJob { + runnable_id: Some(hash), + runnable_path: Some("f/benchmarks/dedicated"), + kind: JobKind::Script, + script_lang: Some(ScriptLang::Bun), + tag: "admins:f/benchmarks/dedicated", + created_by: "admin", + permissioned_as: "u/admin", + permissioned_as_email: "admin@windmill.dev", + ..RawJob::default() + } + .push_many(db.begin().await.unwrap(), uuids.as_slice(), "admins", &[]) + .await + .unwrap_or_else(|_e| panic!("failed to insert dedicated jobs")) + .commit() + .await + .unwrap_or_else(|_e| panic!("failed to commit insert of dedicated jobs")); + } + "parallelflow" => { + // create dedicated script + sqlx::query!( + "INSERT INTO script ( + summary, description, dedicated_worker, content, workspace_id, path, hash, + language, tag, created_by, lock + ) VALUES ('', '', true, $1, $2, $3, $4, $5, $6, $7, '') + ON CONFLICT (workspace_id, hash) DO NOTHING", + "export async function main() { + console.log('hello world'); + }", + "admins", + "u/admin/parallelflow", + 1234567890, + ScriptLang::Deno as ScriptLang, + "flow", + "admin", + ) + .execute(db) + .await + .unwrap_or_else(|e| panic!("failed to insert parallelflow script {e:#}")); + RawJob { + kind: JobKind::FlowPreview, + tag: "flow", + created_by: "admin", + permissioned_as: "u/admin", + permissioned_as_email: "admin@windmill.dev", + raw_flow: Some( + &serde_json::from_value(json!({ + "modules": [ + { + "id": "a", + "value": { + "type": "forloopflow", + "modules": [ + { + "id": "b", + "value": { + "path": "u/admin/parallelflow", + "type": "script", + "tag_override": "", + "input_transforms": {} + }, + "summary": "calctest" + } + ], + "iterator": { + "expr": "[...new Array(300)]", + "type": "javascript" + }, + "parallel": true, + "parallelism": 10, + "skip_failures": true + } + } + ], + "preprocessor_module": null + })) + .unwrap(), + ), + flow_status: Some( + &serde_json::from_value(json!({ + "step": 0, + "modules": [ + { + "id": "a", + "type": "WaitingForPriorSteps" + } + ], + "cleanup_module": {}, + "failure_module": { + "id": "failure", + "type": "WaitingForPriorSteps" + }, + "preprocessor_module": null + })) + .unwrap(), + ), + ..RawJob::default() + } + .push_many(db.begin().await.unwrap(), uuids.as_slice(), "admins", &[]) + .await + .unwrap_or_else(|_e| panic!("failed to insert parallelflow jobs")) + .commit() + .await + .unwrap_or_else(|_e| panic!("failed to commit insert of parallelflow jobs")); + } + _ => { + RawJob { + kind: JobKind::Noop, + tag: "deno", + created_by: "admin", + permissioned_as: "u/admin", + permissioned_as_email: "admin@windmill.dev", + ..RawJob::default() + } + .push_many(db.begin().await.unwrap(), uuids.as_slice(), "admins", &[]) + .await + .unwrap_or_else(|_e| panic!("failed to insert noop jobs")) + .commit() + .await + .unwrap_or_else(|_e| panic!("failed to commit insert of noop jobs")); + } + } + } +} diff --git a/backend/windmill-worker/src/common.rs b/backend/windmill-worker/src/common.rs index 61ec9cee6a33c..1eb50f0fe6204 100644 --- a/backend/windmill-worker/src/common.rs +++ b/backend/windmill-worker/src/common.rs @@ -265,7 +265,7 @@ pub async fn transform_json_value( } Value::String(y) if y.starts_with("$") => { let flow_path = if let Some(uuid) = job.parent_job { - sqlx::query_scalar!("SELECT script_path FROM queue WHERE id = $1", uuid) + sqlx::query_scalar!("SELECT script_path FROM v2_queue WHERE id = $1", uuid) .fetch_optional(db) .await? .flatten() @@ -399,7 +399,7 @@ pub async fn get_reserved_variables( db: &sqlx::Pool, ) -> Result, Error> { let flow_path = if let Some(uuid) = job.parent_job { - sqlx::query_scalar!("SELECT script_path FROM queue WHERE id = $1", uuid) + sqlx::query_scalar!("SELECT script_path FROM v2_queue WHERE id = $1", uuid) .fetch_optional(db) .await? .flatten() diff --git a/backend/windmill-worker/src/handle_child.rs b/backend/windmill-worker/src/handle_child.rs index 5dcdbfa28763e..6d824ea0d8e4b 100644 --- a/backend/windmill-worker/src/handle_child.rs +++ b/backend/windmill-worker/src/handle_child.rs @@ -206,17 +206,17 @@ pub async fn handle_child( let set_reason = async { if matches!(kill_reason, KillReason::Timeout { .. }) { - if let Err(err) = sqlx::query( + if let Err(err) = sqlx::query!( r#" - UPDATE queue + UPDATE v2_queue SET canceled = true , canceled_by = 'timeout' , canceled_reason = $1 WHERE id = $2 "#, + format!("duration > {}", timeout_duration.as_secs()), + job_id ) - .bind(format!("duration > {}", timeout_duration.as_secs())) - .bind(job_id) .execute(&db) .await { @@ -644,9 +644,14 @@ where } } if job_id != Uuid::nil() { - let (canceled, canceled_by, canceled_reason, already_completed) = sqlx::query_as::<_, (bool, Option, Option, bool)>("UPDATE queue SET mem_peak = $1, last_ping = now() WHERE id = $2 RETURNING canceled, canceled_by, canceled_reason, false") - .bind(*mem_peak) - .bind(job_id) + let (canceled, canceled_by, canceled_reason, already_completed) = sqlx::query!( + "UPDATE v2_queue SET mem_peak = $1, last_ping = now() + WHERE id = $2 + RETURNING canceled AS \"canceled!\", canceled_by, canceled_reason", + *mem_peak, + job_id + ) + .map(|x| (x.canceled, x.canceled_by, x.canceled_reason, false)) .fetch_optional(&db) .await .unwrap_or_else(|e| { diff --git a/backend/windmill-worker/src/lib.rs b/backend/windmill-worker/src/lib.rs index d887f14d81a1f..9a0587fd73d88 100644 --- a/backend/windmill-worker/src/lib.rs +++ b/backend/windmill-worker/src/lib.rs @@ -38,6 +38,9 @@ mod worker_flow; mod worker_lockfiles; #[cfg(feature = "oracledb")] mod oracledb_executor; +#[cfg(feature = "benchmark")] +pub mod bench; + pub use worker::*; pub use result_processor::handle_job_error; diff --git a/backend/windmill-worker/src/python_executor.rs b/backend/windmill-worker/src/python_executor.rs index b8265b71f7350..508381f1baed4 100644 --- a/backend/windmill-worker/src/python_executor.rs +++ b/backend/windmill-worker/src/python_executor.rs @@ -191,8 +191,8 @@ pub async fn uv_pip_compile( "SELECT lockfile FROM pip_resolution_cache WHERE hash = $1", req_hash ) - .fetch_optional(db) - .await? + .fetch_optional(db) + .await? { logs.push_str(&format!("\nfound cached resolution: {req_hash}")); return Ok(cached); @@ -269,8 +269,8 @@ pub async fn uv_pip_compile( false, occupancy_metrics, ) - .await - .map_err(|e| Error::ExecutionErr(format!("Lock file generation failed: {e:?}")))?; + .await + .map_err(|e| Error::ExecutionErr(format!("Lock file generation failed: {e:?}")))?; } else { let mut args = vec![ "pip", @@ -368,13 +368,13 @@ pub async fn uv_pip_compile( false, occupancy_metrics, ) - .await - .map_err(|e| { - Error::ExecutionErr(format!( - "Lock file generation failed.\n\ncommand: {uv_cmd} {}\n\n{e:?}", - args.join(" ") - )) - })?; + .await + .map_err(|e| { + Error::ExecutionErr(format!( + "Lock file generation failed.\n\ncommand: {uv_cmd} {}\n\n{e:?}", + args.join(" ") + )) + })?; } let path_lock = format!("{job_dir}/requirements.txt"); @@ -396,30 +396,30 @@ pub async fn uv_pip_compile( } /** - Iterate over all python paths and if same folder has same name multiple times, - then merge the content and put to /site-packages +Iterate over all python paths and if same folder has same name multiple times, +then merge the content and put to /site-packages - Solves problem with imports for some dependencies. +Solves problem with imports for some dependencies. - Default layout (/windmill/cache/): +Default layout (/windmill/cache/): - dep==x.y.z - └── X - └── A - dep-ext==x.y.z - └── X - └── B +dep==x.y.z +└── X + └── A +dep-ext==x.y.z +└── X + └── B - In this case python would be confused with finding B module. +In this case python would be confused with finding B module. - This function will convert it to (/): +This function will convert it to (/): - site-packages - └── X - ├── A - └── B +site-packages +└── X + ├── A + └── B - This way python has no problems with finding correct module +This way python has no problems with finding correct module */ #[tracing::instrument(level = "trace", skip_all)] async fn postinstall( @@ -489,7 +489,7 @@ async fn postinstall( "\n\nCopying some packages from cache to job_dir...\n".to_string(), db, ) - .await; + .await; // Remove PATHs we just moved additional_python_paths.retain(|e| !paths_to_remove.contains(e)); // Instead add shared path @@ -555,7 +555,7 @@ pub async fn handle_python_job( canceled_by, &mut Some(occupancy_metrics), ) - .await?; + .await?; tracing::debug!("Finished handling python dependencies"); @@ -572,7 +572,7 @@ pub async fn handle_python_job( "\n\n--- PYTHON CODE EXECUTION ---\n".to_string(), db, ) - .await; + .await; let ( import_loader, @@ -592,7 +592,7 @@ pub async fn handle_python_job( job.args.as_ref(), false, ) - .await?; + .await?; tracing::debug!("Finished preparing wrapper"); @@ -808,7 +808,7 @@ mount {{ false, &mut Some(occupancy_metrics), ) - .await?; + .await?; if apply_preprocessor { let args = read_file(&format!("{job_dir}/args.json")) @@ -1101,8 +1101,8 @@ async fn handle_python_deps( db, &mut already_visited, ) - .await? - .join("\n"); + .await? + .join("\n"); if !requirements.is_empty() { requirements = uv_pip_compile( job_id, @@ -1117,10 +1117,10 @@ async fn handle_python_deps( annotations.no_uv || annotations.no_uv_compile, annotations.no_cache, ) - .await - .map_err(|e| { - Error::ExecutionErr(format!("pip compile failed: {}", e.to_string())) - })?; + .await + .map_err(|e| { + Error::ExecutionErr(format!("pip compile failed: {}", e.to_string())) + })?; } &requirements } @@ -1144,7 +1144,7 @@ async fn handle_python_deps( annotations.no_uv || annotations.no_uv_install, false, ) - .await?; + .await?; additional_python_paths.append(&mut venv_path); } Ok(additional_python_paths) @@ -1402,7 +1402,7 @@ pub async fn handle_python_reqs( ), db, ) - .await; + .await; // Drop lock, so next print success can fire } no_uv_install |= *USE_PIP_INSTALL; @@ -1450,16 +1450,16 @@ pub async fn handle_python_reqs( } else { NSJAIL_CONFIG_DOWNLOAD_PY_CONTENT }) - .replace("{WORKER_DIR}", &worker_dir) - .replace( - "{CACHE_DIR}", - if no_uv_install { - PIP_CACHE_DIR - } else { - PY311_CACHE_DIR - }, - ) - .replace("{CLONE_NEWUSER}", &(!*DISABLE_NUSER).to_string()), + .replace("{WORKER_DIR}", &worker_dir) + .replace( + "{CACHE_DIR}", + if no_uv_install { + PIP_CACHE_DIR + } else { + PY311_CACHE_DIR + }, + ) + .replace("{CLONE_NEWUSER}", &(!*DISABLE_NUSER).to_string()), )?; }; @@ -1500,7 +1500,7 @@ pub async fn handle_python_reqs( format!("\nenv deps from local cache: {}\n", in_cache.join(", ")), db, ) - .await; + .await; } let (kill_tx, ..) = tokio::sync::broadcast::channel::<()>(1); @@ -1574,7 +1574,7 @@ pub async fn handle_python_reqs( sqlx::query_scalar::<_, bool> (r#" - UPDATE queue + UPDATE v2_queue SET last_ping = now() , mem_peak = $1 WHERE id = $2 @@ -1763,7 +1763,7 @@ pub async fn handle_python_reqs( ), db, ) - .await; + .await; pids.lock().await.get_mut(i).and_then(|e| e.take()); return Err(e.into()); } @@ -1848,7 +1848,7 @@ pub async fn handle_python_reqs( start, db, // ) - .await; + .await; #[cfg(all(feature = "enterprise", feature = "parquet", unix))] if s3_push { @@ -1954,8 +1954,8 @@ pub async fn start_worker( None, None, ) - .await - .to_vec(); + .await + .to_vec(); let context_envs = build_envs_map(context).await; let additional_python_paths = handle_python_deps( @@ -1972,7 +1972,7 @@ pub async fn start_worker( &mut canceled_by, &mut None, ) - .await?; + .await?; let _args = None; let ( @@ -2070,7 +2070,7 @@ for line in sys.stdin: None, None, ) - .await; + .await; let mut proc_envs = HashMap::new(); let additional_python_paths_folders = additional_python_paths.iter().join(":"); @@ -2099,5 +2099,5 @@ for line in sys.stdin: script_path, "python", ) - .await + .await } diff --git a/backend/windmill-worker/src/result_processor.rs b/backend/windmill-worker/src/result_processor.rs index 5812b121750df..d15d510abc09d 100644 --- a/backend/windmill-worker/src/result_processor.rs +++ b/backend/windmill-worker/src/result_processor.rs @@ -26,7 +26,7 @@ use windmill_common::{ }; #[cfg(feature = "benchmark")] -use windmill_common::bench::{BenchmarkInfo, BenchmarkIter}; +use crate::bench::{BenchmarkInfo, BenchmarkIter}; use windmill_queue::{append_logs, get_queued_job, CanceledBy, WrappedError}; diff --git a/backend/windmill-worker/src/worker.rs b/backend/windmill-worker/src/worker.rs index 0a0599047f4de..94015812ced5f 100644 --- a/backend/windmill-worker/src/worker.rs +++ b/backend/windmill-worker/src/worker.rs @@ -90,12 +90,26 @@ use tokio::{ use rand::Rng; use crate::{ - bash_executor::{handle_bash_job, handle_powershell_job}, bun_executor::handle_bun_job, common::{ + bash_executor::{handle_bash_job, handle_powershell_job}, + bun_executor::handle_bun_job, + common::{ build_args_map, cached_result_path, get_cached_resource_value_if_valid, get_reserved_variables, update_worker_ping_for_failed_init_script, OccupancyMetrics, - }, csharp_executor::handle_csharp_job, deno_executor::handle_deno_job, go_executor::handle_go_job, graphql_executor::do_graphql, handle_child::SLOW_LOGS, handle_job_error, job_logger::NO_LOGS_AT_ALL, js_eval::{eval_fetch_timeout, transpile_ts}, pg_executor::do_postgresql, result_processor::{process_result, start_background_processor}, worker_flow::{handle_flow, update_flow_status_in_progress}, worker_lockfiles::{ + }, + csharp_executor::handle_csharp_job, + deno_executor::handle_deno_job, + go_executor::handle_go_job, + graphql_executor::do_graphql, + handle_child::SLOW_LOGS, + handle_job_error, + job_logger::NO_LOGS_AT_ALL, + js_eval::{eval_fetch_timeout, transpile_ts}, + pg_executor::do_postgresql, + result_processor::{process_result, start_background_processor}, + worker_flow::{handle_flow, update_flow_status_in_progress}, + worker_lockfiles::{ handle_app_dependency_job, handle_dependency_job, handle_flow_dependency_job, - } + }, }; #[cfg(feature = "rust")] @@ -132,7 +146,7 @@ use crate::mssql_executor::do_mssql; use crate::bigquery_executor::do_bigquery; #[cfg(feature = "benchmark")] -use windmill_common::bench::{benchmark_init, BenchmarkInfo, BenchmarkIter}; +use crate::bench::{benchmark_init, BenchmarkInfo, BenchmarkIter}; use windmill_common::add_time; @@ -325,7 +339,6 @@ const DOTNET_DEFAULT_PATH: &str = "C:\\Program Files\\dotnet\\dotnet.exe"; #[cfg(unix)] const DOTNET_DEFAULT_PATH: &str = "/usr/bin/dotnet"; - lazy_static::lazy_static! { pub static ref JOB_TOKEN: Option = std::env::var("JOB_TOKEN").ok(); @@ -995,9 +1008,9 @@ pub async fn run_worker( let is_dedicated_worker: bool = WORKER_CONFIG.read().await.dedicated_worker.is_some(); #[cfg(feature = "benchmark")] - let benchmark_jobs: i32 = std::env::var("BENCHMARK_JOBS") + let benchmark_jobs: usize = std::env::var("BENCHMARK_JOBS") .unwrap_or("5000".to_string()) - .parse::() + .parse::() .unwrap(); #[cfg(feature = "benchmark")] @@ -1225,7 +1238,7 @@ pub async fn run_worker( tokio::task::spawn( (async move { tracing::info!(worker = %worker_name, hostname = %hostname, "vacuuming queue"); - if let Err(e) = sqlx::query!("VACUUM (skip_locked) queue") + if let Err(e) = sqlx::query!("VACUUM (skip_locked) v2_job_queue, v2_job_runtime, v2_job_flow_runtime") .execute(&db2) .await { @@ -1272,7 +1285,7 @@ pub async fn run_worker( same_worker_job.job_id ); let r = sqlx::query_as::<_, PulledJob>( - "UPDATE queue SET last_ping = now() WHERE id = $1 RETURNING *", + "UPDATE v2_queue SET last_ping = now() WHERE id = $1 RETURNING *", ) .bind(same_worker_job.job_id) .fetch_optional(db) @@ -1948,7 +1961,7 @@ async fn handle_queued_job( .await?; } else if let Some(parent_job) = job.parent_job { if let Err(e) = sqlx::query_scalar!( - "UPDATE queue SET flow_status = jsonb_set(jsonb_set(COALESCE(flow_status, '{}'::jsonb), array[$1], COALESCE(flow_status->$1, '{}'::jsonb)), array[$1, 'started_at'], to_jsonb(now()::text)) WHERE id = $2 AND workspace_id = $3", + "UPDATE v2_queue SET flow_status = jsonb_set(jsonb_set(COALESCE(flow_status, '{}'::jsonb), array[$1], COALESCE(flow_status->$1, '{}'::jsonb)), array[$1, 'started_at'], to_jsonb(now()::text)) WHERE id = $2 AND workspace_id = $3", &job.id.to_string(), parent_job, &job.workspace_id @@ -1961,22 +1974,9 @@ async fn handle_queued_job( } let started = Instant::now(); - // Pre-fetch preview jobs raw values if necessary. - // The `raw_*` values passed to this function are the original raw values from `queue` tables, - // they are kept for backward compatibility as they have been moved to the `job` table. - let preview_data = match (job.job_kind, job.script_hash) { - ( - JobKind::Preview - | JobKind::Dependencies - | JobKind::FlowPreview - | JobKind::Flow - | JobKind::FlowDependencies, - None, - ) => Some(cache::job::fetch_preview(db, &job.id, raw_lock, raw_code, raw_flow).await?), - _ => None, - }; + let raw_data = RawData::from_raw(raw_code, raw_lock, raw_flow)?; let cached_res_path = if job.cache_ttl.is_some() { - Some(cached_result_path(db, &client.get_authed().await, &job, preview_data.as_ref()).await) + Some(cached_result_path(db, &client.get_authed().await, &job, raw_data.as_ref()).await) } else { None }; @@ -2017,10 +2017,11 @@ async fn handle_queued_job( } }; if job.is_flow() { - let flow_data = match preview_data { + let runnable_id = job.script_hash.map(|x| x.0); + let flow_data = match raw_data { Some(RawData::Flow(data)) => data, // Not a preview: fetch from the cache or the database. - _ => cache::job::fetch_flow(db, job.job_kind, job.script_hash).await?, + _ => cache::job::fetch_flow(db, job.job_kind, runnable_id, None).await?, }; handle_flow( job, @@ -2078,7 +2079,7 @@ async fn handle_queued_job( JobKind::Dependencies => { handle_dependency_job( &job, - preview_data.as_ref(), + raw_data.as_ref(), &mut mem_peak, &mut canceled_by, job_dir, @@ -2094,7 +2095,7 @@ async fn handle_queued_job( JobKind::FlowDependencies => { handle_flow_dependency_job( &job, - preview_data.as_ref(), + raw_data.as_ref(), &mut mem_peak, &mut canceled_by, job_dir, @@ -2130,13 +2131,13 @@ async fn handle_queued_job( .unwrap_or_else(|| serde_json::from_str("{}").unwrap())), _ => { let metric_timer = Instant::now(); - let preview_data = preview_data.and_then(|data| match data { + let raw_data = raw_data.and_then(|data| match data { RawData::Script(data) => Some(data), _ => None, }); let r = handle_code_execution_job( job.as_ref(), - preview_data, + raw_data, db, client, job_dir, diff --git a/backend/windmill-worker/src/worker_flow.rs b/backend/windmill-worker/src/worker_flow.rs index b320b9a4456df..adbd263aa9355 100644 --- a/backend/windmill-worker/src/worker_flow.rs +++ b/backend/windmill-worker/src/worker_flow.rs @@ -11,6 +11,9 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; +#[cfg(feature = "benchmark")] +use crate::bench::BenchmarkIter; + use crate::common::{cached_result_path, save_in_cache}; use crate::js_eval::{eval_timeout, IdContext}; use crate::{ @@ -18,7 +21,6 @@ use crate::{ KEEP_JOB_DIR, }; use anyhow::Context; -use futures::TryFutureExt; use mappable_rc::Marc; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue; @@ -30,8 +32,6 @@ use tracing::instrument; use uuid::Uuid; use windmill_common::add_time; use windmill_common::auth::JobPerms; -#[cfg(feature = "benchmark")] -use windmill_common::bench::BenchmarkIter; use windmill_common::cache::{self, RawData}; use windmill_common::db::Authed; use windmill_common::flow_status::{ @@ -42,7 +42,6 @@ use windmill_common::jobs::{ script_hash_to_tag_and_limits, script_path_to_payload, BranchResults, JobKind, JobPayload, OnBehalfOf, QueuedJob, RawCode, ENTRYPOINT_OVERRIDE, }; -use windmill_common::scripts::ScriptHash; use windmill_common::users::username_to_permissioned_as; use windmill_common::utils::WarnAfterExt; use windmill_common::worker::to_raw_value; @@ -220,11 +219,11 @@ pub async fn update_flow_status_after_job_completion_internal( let (job_kind, script_hash, old_status, raw_flow) = sqlx::query!( "SELECT - job_kind AS \"job_kind: JobKind\", - script_hash AS \"script_hash: ScriptHash\", + job_kind AS \"job_kind!: JobKind\", + script_hash, flow_status AS \"flow_status!: Json>\", raw_flow AS \"raw_flow: Json>\" - FROM queue WHERE id = $1 AND workspace_id = $2 LIMIT 1", + FROM v2_queue WHERE id = $1 AND workspace_id = $2 LIMIT 1", flow, w_id ) @@ -248,9 +247,7 @@ pub async fn update_flow_status_after_job_completion_internal( )) })?; - let flow_data = cache::job::fetch_flow(db, job_kind, script_hash) - .or_else(|_| cache::job::fetch_preview_flow(db, &flow, raw_flow)) - .await?; + let flow_data = cache::job::fetch_flow(db, job_kind, script_hash, raw_flow).await?; let flow_value = flow_data.value(); let module_step = Step::from_i32_and_len(old_status.step, old_status.modules.len()); @@ -356,7 +353,7 @@ pub async fn update_flow_status_after_job_completion_internal( let args = sqlx::query_as::<_, RowArgs>( "SELECT args - FROM queue + FROM v2_queue WHERE id = $2", ) .bind(old_status.step) @@ -412,7 +409,7 @@ pub async fn update_flow_status_after_job_completion_internal( if matches!(module_step, Step::PreprocessorStep) { sqlx::query!( - "UPDATE queue SET args = (select result FROM completed_job WHERE id = $1) WHERE id = $2", + "UPDATE v2_queue SET args = (select result FROM v2_completed_job WHERE id = $1) WHERE id = $2", job_id_for_status, flow ).execute(db).await.map_err(|e| { @@ -420,7 +417,7 @@ pub async fn update_flow_status_after_job_completion_internal( })?; sqlx::query!( - r#"UPDATE completed_job SET args = '{"reason":"PREPROCESSOR_ARGS_ARE_DISCARDED"}'::jsonb WHERE id = $1"#, + r#"UPDATE v2_completed_job SET args = '{"reason":"PREPROCESSOR_ARGS_ARE_DISCARDED"}'::jsonb WHERE id = $1"#, job_id_for_status ) .execute(db) @@ -455,7 +452,7 @@ pub async fn update_flow_status_after_job_completion_internal( let nindex = if let Some(position) = position { sqlx::query_scalar!( - "UPDATE queue + "UPDATE v2_queue SET flow_status = JSONB_SET( JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4), ARRAY['modules', $1::TEXT, 'iterator', 'index'], @@ -470,7 +467,7 @@ pub async fn update_flow_status_after_job_completion_internal( json!(success) )} else { sqlx::query_scalar!( - "UPDATE queue + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'iterator', 'index'], ((flow_status->'modules'->$1::int->'iterator'->>'index')::int + 1)::text::jsonb), last_ping = NULL WHERE id = $2 @@ -502,7 +499,7 @@ pub async fn update_flow_status_after_job_completion_internal( let nindex = if let Some(position) = position { sqlx::query_scalar!( - "UPDATE queue + "UPDATE v2_queue SET flow_status = JSONB_SET( JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4), ARRAY['modules', $1::TEXT, 'branchall', 'branch'], ((flow_status->'modules'->$1::int->'branchall'->>'branch')::int + 1)::text::jsonb), @@ -515,7 +512,7 @@ pub async fn update_flow_status_after_job_completion_internal( json!(success) ) } else { sqlx::query_scalar!( - "UPDATE queue + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'branchall', 'branch'], ((flow_status->'modules'->$1::int->'branchall'->>'branch')::int + 1)::text::jsonb), last_ping = NULL WHERE id = $2 @@ -550,7 +547,7 @@ pub async fn update_flow_status_after_job_completion_internal( let new_status = if skip_loop_failures || sqlx::query_scalar!( - "SELECT success FROM completed_job WHERE id = ANY($1)", + "SELECT success AS \"success!\" FROM v2_completed_job WHERE id = ANY($1)", jobs.as_slice() ) .fetch_all(&mut *tx) @@ -609,7 +606,7 @@ pub async fn update_flow_status_after_job_completion_internal( if parallelism.is_some() { sqlx::query!( - "UPDATE queue SET suspend = 0 WHERE parent_job = $1 AND suspend = $2 AND (flow_status->'step')::int = 0", + "UPDATE v2_queue SET suspend = 0 WHERE parent_job = $1 AND suspend = $2 AND (flow_status->'step')::int = 0", flow, nindex ) @@ -713,7 +710,7 @@ pub async fn update_flow_status_after_job_completion_internal( let is_skipped = if current_module.as_ref().is_some_and(|m| m.skip_if.is_some()) { sqlx::query_scalar!( - "SELECT job_kind = 'identity' FROM completed_job WHERE id = $1", + "SELECT job_kind = 'identity' FROM v2_completed_job WHERE id = $1", job_id_for_status ) .fetch_one(db) @@ -766,7 +763,7 @@ pub async fn update_flow_status_after_job_completion_internal( let step_counter = if inc_step_counter { sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['step'], $1) WHERE id = $2", json!(old_status.step + 1), @@ -790,7 +787,7 @@ pub async fn update_flow_status_after_job_completion_internal( if let Some(new_status) = new_status.as_ref() { if is_failure_step { let parent_module = sqlx::query_scalar!( - "SELECT flow_status->'failure_module'->>'parent_module' FROM queue WHERE id = $1", + "SELECT flow_status->'failure_module'->>'parent_module' FROM v2_queue WHERE id = $1", flow ) .fetch_one(&mut *tx) @@ -801,7 +798,7 @@ pub async fn update_flow_status_after_job_completion_internal( })?; sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['failure_module'], $1) WHERE id = $2", json!(FlowStatusModuleWParent { @@ -819,7 +816,7 @@ pub async fn update_flow_status_after_job_completion_internal( })?; } else if matches!(module_step, Step::PreprocessorStep) { sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['preprocessor_module'], $1) WHERE id = $2", json!(new_status), @@ -834,7 +831,7 @@ pub async fn update_flow_status_after_job_completion_internal( })?; } else { sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2) WHERE id = $3", old_status.step.to_string(), @@ -849,9 +846,9 @@ pub async fn update_flow_status_after_job_completion_internal( if let Some(job_result) = new_status.job_result() { sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET leaf_jobs = JSONB_SET(coalesce(leaf_jobs, '{}'::jsonb), ARRAY[$1::TEXT], $2) - WHERE COALESCE((SELECT root_job FROM queue WHERE id = $3), $3) = id", + WHERE COALESCE((SELECT root_job FROM v2_queue WHERE id = $3), $3) = id", new_status.id(), json!(job_result), flow @@ -883,7 +880,7 @@ pub async fn update_flow_status_after_job_completion_internal( let args = sqlx::query_as::<_, RowArgs>( "SELECT args - FROM queue + FROM v2_queue WHERE id = $2", ) .bind(old_status.step) @@ -925,20 +922,19 @@ pub async fn update_flow_status_after_job_completion_internal( if old_status.retry.fail_count > 0 && matches!(&new_status, Some(FlowStatusModule::Success { .. })) { - sqlx::query( - "UPDATE queue + sqlx::query!( + "UPDATE v2_queue SET flow_status = flow_status - 'retry' - WHERE id = $1 - RETURNING flow_status", + WHERE id = $1", + flow ) - .bind(flow) .execute(&mut *tx) .await .context("remove flow status retry")?; } let flow_job = sqlx::query_as::<_, QueuedJob>( - "SELECT * FROM queue WHERE id = $1 AND workspace_id = $2", + "SELECT * FROM v2_queue WHERE id = $1 AND workspace_id = $2", ) .bind(flow) .bind(w_id) @@ -1034,7 +1030,7 @@ pub async fn update_flow_status_after_job_completion_internal( _cleanup_module.flow_jobs_to_clean ); sqlx::query!( - "UPDATE completed_job + "UPDATE v2_completed_job SET logs = '##DELETED##', args = '{}'::jsonb, result = '{}'::jsonb WHERE id = ANY($1)", &_cleanup_module.flow_jobs_to_clean, @@ -1042,7 +1038,7 @@ pub async fn update_flow_status_after_job_completion_internal( .execute(db) .await .map_err(|e| { - Error::InternalErr(format!("error while cleaning up completed_job: {e:#}")) + Error::InternalErr(format!("error while cleaning up completed job: {e:#}")) })?; } } @@ -1190,7 +1186,7 @@ async fn set_success_in_flow_job_success<'c>( let position = find_flow_job_index(flow_jobs, job_id_for_status); if let Some(position) = position { sqlx::query!( - "UPDATE queue SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4) WHERE id = $2", + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4) WHERE id = $2", old_status.step as i32, flow, position as i32, @@ -1214,7 +1210,7 @@ async fn retrieve_flow_jobs_results( ) -> error::Result> { let results = sqlx::query_as::<_, BranchResults>( "SELECT result, id - FROM completed_job + FROM v2_completed_job WHERE id = ANY($1) AND workspace_id = $2", ) .bind(job_uuids.as_slice()) @@ -1246,24 +1242,27 @@ async fn compute_skip_branchall_failure<'c>( flow_module: Option<&FlowModule>, ) -> Result, Error> { let branch = if parallel { - sqlx::query_scalar!("SELECT script_path FROM completed_job WHERE id = $1", job) - .fetch_one(db) - .await - .map_err(|e| { - Error::InternalErr(format!("error during retrieval of branchall index: {e:#}")) - })? - .map(|p| { - BRANCHALL_INDEX_RE - .captures(&p) - .map(|x| x.get(1).unwrap().as_str().parse::().ok()) - .flatten() - .ok_or(Error::InternalErr(format!( - "could not parse branchall index from path: {p}" - ))) - }) - .ok_or_else(|| { - Error::InternalErr(format!("no branchall script path found for job {job}")) - })?? + sqlx::query_scalar!( + "SELECT script_path FROM v2_completed_job WHERE id = $1", + job + ) + .fetch_one(db) + .await + .map_err(|e| { + Error::InternalErr(format!("error during retrieval of branchall index: {e:#}")) + })? + .map(|p| { + BRANCHALL_INDEX_RE + .captures(&p) + .map(|x| x.get(1).unwrap().as_str().parse::().ok()) + .flatten() + .ok_or(Error::InternalErr(format!( + "could not parse branchall index from path: {p}" + ))) + }) + .ok_or_else(|| { + Error::InternalErr(format!("no branchall script path found for job {job}")) + })?? } else { branch as i32 }; @@ -1355,41 +1354,51 @@ pub async fn update_flow_status_in_progress( let step = get_step_of_flow_status(db, flow).await?; match step { Step::Step(step) => { - sqlx::query(&format!( - "UPDATE queue - SET flow_status = jsonb_set(jsonb_set(flow_status, '{{modules, {step}, job}}', $1), '{{modules, {step}, type}}', $2) - WHERE id = $3 AND workspace_id = $4", - )) - .bind(json!(job_in_progress.to_string())) - .bind(json!("InProgress")) - .bind(flow) - .bind(w_id) + sqlx::query!( + "UPDATE v2_queue + SET flow_status = jsonb_set( + jsonb_set(flow_status, ARRAY['modules', $4::INTEGER::TEXT, 'job'], to_jsonb($1::UUID::TEXT)), + ARRAY['modules', $4::INTEGER::TEXT, 'type'], + to_jsonb('InProgress'::text) + ) + WHERE id = $2 AND workspace_id = $3", + job_in_progress, + flow, + w_id, + step as i32 + ) .execute(db) .await?; } Step::PreprocessorStep => { - sqlx::query(&format!( - "UPDATE queue - SET flow_status = jsonb_set(jsonb_set(flow_status, '{{preprocessor_module, job}}', $1), '{{preprocessor_module, type}}', $2) - WHERE id = $3 AND workspace_id = $4", - )) - .bind(json!(job_in_progress.to_string())) - .bind(json!("InProgress")) - .bind(flow) - .bind(w_id) + sqlx::query!( + "UPDATE v2_queue + SET flow_status = jsonb_set( + jsonb_set(flow_status, ARRAY['preprocessor_module', 'job'], to_jsonb($1::UUID::TEXT)), + ARRAY['preprocessor_module', 'type'], + to_jsonb('InProgress'::text) + ) + WHERE id = $2 AND workspace_id = $3", + job_in_progress, + flow, + w_id + ) .execute(db) .await?; } Step::FailureStep => { - sqlx::query(&format!( - "UPDATE queue - SET flow_status = jsonb_set(jsonb_set(flow_status, '{{failure_module, job}}', $1), '{{failure_module, type}}', $2) - WHERE id = $3 AND workspace_id = $4", - )) - .bind(json!(job_in_progress.to_string())) - .bind(json!("InProgress")) - .bind(flow) - .bind(w_id) + sqlx::query!( + "UPDATE v2_queue + SET flow_status = jsonb_set( + jsonb_set(flow_status, ARRAY['failure_module', 'job'], to_jsonb($1::UUID::TEXT)), + ARRAY['failure_module', 'type'], + to_jsonb('InProgress'::text) + ) + WHERE id = $2 AND workspace_id = $3", + job_in_progress, + flow, + w_id + ) .execute(db) .await?; } @@ -1420,7 +1429,7 @@ impl Step { #[instrument(level = "trace", skip_all)] pub async fn get_step_of_flow_status(db: &DB, id: Uuid) -> error::Result { let r = sqlx::query!( - "SELECT (flow_status->'step')::integer as step, jsonb_array_length(flow_status->'modules') as len FROM queue WHERE id = $1", + "SELECT (flow_status->'step')::integer as step, jsonb_array_length(flow_status->'modules') as len FROM v2_queue WHERE id = $1", id ) .fetch_one(db) @@ -1572,11 +1581,6 @@ pub struct ResumeRow { pub resume_id: i32, } -#[derive(FromRow)] -pub struct RawArgs { - pub args: Option>>>, -} - lazy_static::lazy_static! { static ref CRASH_FORCEFULLY_AT_STEP: Option = std::env::var("CRASH_FORCEFULLY_AT_STEP") .ok() @@ -1683,7 +1687,7 @@ async fn push_next_flow_job( .await?; if no_flow_overlap { let overlapping = sqlx::query_scalar!( - "SELECT id FROM queue WHERE schedule_path = $1 AND workspace_id = $2 AND id != $3 AND running = true", + "SELECT id AS \"id!\" FROM v2_queue WHERE schedule_path = $1 AND workspace_id = $2 AND id != $3 AND running = true", flow_job.schedule_path.as_ref().unwrap(), flow_job.workspace_id.as_str(), flow_job.id @@ -1796,7 +1800,7 @@ async fn push_next_flow_job( * * This only works because jobs::resume_job does the same thing. */ sqlx::query_scalar!( - "SELECT null FROM queue WHERE id = $1 FOR UPDATE", + "SELECT null FROM v2_job_queue WHERE id = $1 FOR UPDATE", flow_job.id ) .fetch_one(&mut *tx) @@ -1872,13 +1876,13 @@ async fn push_next_flow_job( user_groups_required: user_groups_required, self_approval_disabled: self_approval_disabled, }; - sqlx::query( - "UPDATE queue + sqlx::query!( + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['approval_conditions'], $1) WHERE id = $2", + json!(approval_conditions), + flow_job.id ) - .bind(json!(approval_conditions)) - .bind(flow_job.id) .execute(&mut *tx) .await?; } @@ -1904,32 +1908,33 @@ async fn push_next_flow_job( resume_messages.push(to_raw_value(&js)); } - sqlx::query( - "UPDATE queue + sqlx::query!( + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'approvers'], $2) WHERE id = $3", + (status.step - 1).to_string(), + json!(resumes + .into_iter() + .map(|r| Approval { + resume_id: r.resume_id as u16, + approver: r + .approver.clone() + .unwrap_or_else(|| "unknown".to_string()) + }) + .collect::>() + ), + flow_job.id ) - .bind(status.step - 1) - .bind(json!(resumes - .into_iter() - .map(|r| Approval { - resume_id: r.resume_id as u16, - approver: r - .approver.clone() - .unwrap_or_else(|| "unknown".to_string()) - }) - .collect::>())) - .bind(flow_job.id) .execute(&mut *tx) .await?; // Remove the approval conditions from the flow status - sqlx::query( - "UPDATE queue + sqlx::query!( + "UPDATE v2_queue SET flow_status = flow_status - 'approval_conditions' WHERE id = $1", + flow_job.id ) - .bind(flow_job.id) .execute(&mut *tx) .await?; @@ -1942,22 +1947,22 @@ async fn push_next_flow_job( FlowStatusModule::WaitingForPriorSteps { .. } ) && is_disapproved.is_none() { - sqlx::query( - "UPDATE queue SET + sqlx::query!( + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['modules', flow_status->>'step'::text], $1), suspend = $2, suspend_until = now() + $3 WHERE id = $4", + json!(FlowStatusModule::WaitingForEvents { id: status_module.id(), count: required_events, job: last }), + (required_events - resume_messages.len() as u16) as i32, + Duration::from_secs(suspend.timeout.map(|t| t.into()).unwrap_or_else(|| 30 * 60)) as Duration, + flow_job.id, ) - .bind(json!(FlowStatusModule::WaitingForEvents { id: status_module.id(), count: required_events, job: last })) - .bind((required_events - resume_messages.len() as u16) as i32) - .bind(Duration::from_secs(suspend.timeout.map(|t| t.into()).unwrap_or_else(|| 30 * 60))) - .bind(flow_job.id) .execute(&mut *tx) .await?; sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET last_ping = null WHERE id = $1 AND last_ping = $2", flow_job.id, @@ -2128,15 +2133,15 @@ async fn push_next_flow_job( scheduled_for_o = Some(from_now(retry_in)); status.retry.failed_jobs.push(job.clone()); - sqlx::query( - "UPDATE queue + sqlx::query!( + "UPDATE v2_queue SET flow_status = JSONB_SET(JSONB_SET(flow_status, ARRAY['retry'], $1), ARRAY['modules', $3::TEXT, 'failed_retries'], $4) WHERE id = $2", + json!(RetryStatus { fail_count, ..status.retry.clone() }), + flow_job.id, + status.step.to_string(), + json!(status.retry.failed_jobs) ) - .bind(json!(RetryStatus { fail_count, ..status.retry.clone() })) - .bind(flow_job.id) - .bind(status.step) - .bind(json!(status.retry.failed_jobs)) .execute(db) .warn_after_seconds(2) .await @@ -2165,13 +2170,13 @@ async fn push_next_flow_job( status_module = status.failure_module.module_status.clone(); if module.retry.as_ref().is_some_and(|x| x.has_attempts()) { - sqlx::query( - "UPDATE queue + sqlx::query!( + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['retry'], $1) WHERE id = $2", + json!(RetryStatus { fail_count: 0, failed_jobs: vec![] }), + flow_job.id ) - .bind(json!(RetryStatus { fail_count: 0, failed_jobs: vec![] })) - .bind(flow_job.id) .execute(db) .await .context("update flow retry")?; @@ -2224,17 +2229,16 @@ async fn push_next_flow_job( ); Ok(Marc::new(hm)) } else if let Some(id) = get_args_from_id { - let row = sqlx::query_as::<_, RawArgs>( - "SELECT args FROM completed_job WHERE id = $1 AND workspace_id = $2", + let args = sqlx::query_scalar!( + "SELECT args AS \"args: Json>>\" + FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + id, + &flow_job.workspace_id ) - .bind(id) - .bind(&flow_job.workspace_id) .fetch_optional(db) .await?; - if let Some(raw_args) = row { - Ok(Marc::new( - raw_args.args.map(|x| x.0).unwrap_or_else(HashMap::new), - )) + if let Some(args) = args { + Ok(Marc::new(args.map(|x| x.0).unwrap_or_else(HashMap::new))) } else { Ok(Marc::new(HashMap::new())) } @@ -2313,23 +2317,23 @@ async fn push_next_flow_job( let (job_payloads, next_status) = match next_flow_transform { NextFlowTransform::Continue(job_payload, next_state) => (job_payload, next_state), NextFlowTransform::EmptyInnerFlows => { - sqlx::query( - "UPDATE queue + sqlx::query!( + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2) WHERE id = $3", + status.step.to_string(), + json!(FlowStatusModule::Success { + id: status_module.id(), + job: Uuid::nil(), + flow_jobs: Some(vec![]), + flow_jobs_success: Some(vec![]), + branch_chosen: None, + approvers: vec![], + failed_retries: vec![], + skipped: false, + }), + flow_job.id ) - .bind(status.step) - .bind(json!(FlowStatusModule::Success { - id: status_module.id(), - job: Uuid::nil(), - flow_jobs: Some(vec![]), - flow_jobs_success: Some(vec![]), - branch_chosen: None, - approvers: vec![], - failed_retries: vec![], - skipped: false, - })) - .bind(flow_job.id) .execute(db) .await?; // flow is reprocessed by the worker in a state where the module has completed successfully. @@ -2365,7 +2369,7 @@ async fn push_next_flow_job( if i % 100 == 0 && i != 0 { tracing::info!(id = %flow_job.id, root_id = %job_root, "pushed (non-commited yet) first {i} subflows of {len}"); sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET last_ping = now() WHERE id = $1 AND last_ping < now()", flow_job.id, @@ -2591,7 +2595,7 @@ async fn push_next_flow_job( if i as u16 >= p { sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET suspend = $1, suspend_until = now() + interval '14 day', running = true WHERE id = $2", (i as u16 - p + 1) as i32, @@ -2609,13 +2613,13 @@ async fn push_next_flow_job( error::Error::InternalErr(format!("Unable to serialize uuid: {e:#}")) })?; - sqlx::query( - "UPDATE queue + sqlx::query!( + "UPDATE v2_queue SET flow_status = JSONB_SET(flow_status, ARRAY['cleanup_module', 'flow_jobs_to_clean'], COALESCE(flow_status->'cleanup_module'->'flow_jobs_to_clean', '[]'::jsonb) || $1) WHERE id = $2", + uuid_singleton_json, + root_job.unwrap_or(flow_job.id) ) - .bind(uuid_singleton_json) - .bind(root_job.unwrap_or(flow_job.id)) .execute(&mut *inner_tx) .await?; } @@ -2739,7 +2743,7 @@ async fn push_next_flow_job( match step { Step::FailureStep => { sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET flow_status = JSONB_SET( JSONB_SET(flow_status, ARRAY['failure_module'], $1), ARRAY['step'], $2) WHERE id = $3", @@ -2755,7 +2759,7 @@ async fn push_next_flow_job( } Step::PreprocessorStep => { sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET flow_status = JSONB_SET( JSONB_SET(flow_status, ARRAY['preprocessor_module'], $1), ARRAY['step'], $2) WHERE id = $3", @@ -2768,7 +2772,7 @@ async fn push_next_flow_job( } Step::Step(i) => { sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET flow_status = JSONB_SET( JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2), ARRAY['step'], $3) WHERE id = $4", @@ -2785,7 +2789,7 @@ async fn push_next_flow_job( potentially_crash_for_testing(); sqlx::query!( - "UPDATE queue + "UPDATE v2_queue SET last_ping = null WHERE id = $1", flow_job.id @@ -3947,11 +3951,11 @@ async fn get_previous_job_result( Ok(Some(retrieve_flow_jobs_results(db, w_id, flow_jobs).await?)) } Some(FlowStatusModule::Success { job, .. }) => Ok(Some( - sqlx::query_scalar::<_, Json>>( - "SELECT result FROM completed_job WHERE id = $1 AND workspace_id = $2", + sqlx::query_scalar!( + "SELECT result AS \"result!: Json>\" FROM v2_completed_job WHERE id = $1 AND workspace_id = $2", + job, + w_id ) - .bind(job) - .bind(w_id) .fetch_one(db) .await? .0, diff --git a/backend/windmill-worker/src/worker_lockfiles.rs b/backend/windmill-worker/src/worker_lockfiles.rs index 06b7e4f0d2ec0..399aae736ca4c 100644 --- a/backend/windmill-worker/src/worker_lockfiles.rs +++ b/backend/windmill-worker/src/worker_lockfiles.rs @@ -632,15 +632,17 @@ pub async fn handle_flow_dependency_job( let new_flow_value = Json(serde_json::value::to_raw_value(&flow).map_err(to_anyhow)?); // Re-check cancellation to ensure we don't accidentally override a flow. - if sqlx::query_scalar!("SELECT canceled FROM queue WHERE id = $1", job.id) - .fetch_optional(db) - .await - .map(|v| Some(true) == v) - .unwrap_or_else(|err| { - tracing::error!(%job.id, %err, "error checking cancellation for job {0}: {err}", job.id); - false - }) - { + if sqlx::query_scalar!( + "SELECT canceled AS \"canceled!\" FROM v2_queue WHERE id = $1", + job.id + ) + .fetch_optional(db) + .await + .map(|v| Some(true) == v) + .unwrap_or_else(|err| { + tracing::error!(%job.id, %err, "error checking cancellation for job {0}: {err}", job.id); + false + }) { return Ok(to_raw_value_owned(json!({ "status": "Flow lock generation was canceled", }))); @@ -1524,15 +1526,17 @@ pub async fn handle_app_dependency_job( .await?; // Re-check cancelation to ensure we don't accidentially override an app. - if sqlx::query_scalar!("SELECT canceled FROM queue WHERE id = $1", job.id) - .fetch_optional(db) - .await - .map(|v| Some(true) == v) - .unwrap_or_else(|err| { - tracing::error!(%job.id, %err, "error checking cancelation for job {0}: {err}", job.id); - false - }) - { + if sqlx::query_scalar!( + "SELECT canceled AS \"canceled!\" FROM v2_queue WHERE id = $1", + job.id + ) + .fetch_optional(db) + .await + .map(|v| Some(true) == v) + .unwrap_or_else(|err| { + tracing::error!(%job.id, %err, "error checking cancelation for job {0}: {err}", job.id); + false + }) { return Ok(()); }