diff --git a/backend/.sqlx/query-06ea1f5772fd580f76899bc5e0d8819f36de7c701eb36d66b010d3f569ee465d.json b/backend/.sqlx/query-06ea1f5772fd580f76899bc5e0d8819f36de7c701eb36d66b010d3f569ee465d.json new file mode 100644 index 0000000000000..14b1a67a60684 --- /dev/null +++ b/backend/.sqlx/query-06ea1f5772fd580f76899bc5e0d8819f36de7c701eb36d66b010d3f569ee465d.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO database_trigger (\n publication_name,\n replication_slot_name,\n workspace_id, \n path, \n script_path, \n is_flow, \n email, \n enabled, \n database_resource_path, \n edited_by,\n edited_at\n ) \n VALUES (\n $1, \n $2, \n $3, \n $4, \n $5, \n $6, \n $7, \n $8, \n $9, \n $10, \n now()\n )", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Bool", + "Varchar", + "Bool", + "Varchar", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "06ea1f5772fd580f76899bc5e0d8819f36de7c701eb36d66b010d3f569ee465d" +} diff --git a/backend/.sqlx/query-11b698f82a54aac68b3617047dfe2b18dd6da7d962118fee276af354218baac2.json b/backend/.sqlx/query-11b698f82a54aac68b3617047dfe2b18dd6da7d962118fee276af354218baac2.json new file mode 100644 index 0000000000000..99004617618ab --- /dev/null +++ b/backend/.sqlx/query-11b698f82a54aac68b3617047dfe2b18dd6da7d962118fee276af354218baac2.json @@ -0,0 +1,93 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT path, script_path, is_flow, route_path, workspace_id, is_async, requires_auth, edited_by, email, http_method as \"http_method: _\", static_asset_config as \"static_asset_config: _\" FROM http_trigger", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "path", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "is_flow", + "type_info": "Bool" + }, + { + "ordinal": 3, + "name": "route_path", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "workspace_id", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "is_async", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "requires_auth", + "type_info": "Bool" + }, + { + "ordinal": 7, + "name": "edited_by", + "type_info": "Varchar" + }, + { + "ordinal": 8, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 9, + "name": "http_method: _", + "type_info": { + "Custom": { + "name": "http_method", + "kind": { + "Enum": [ + "get", + "post", + "put", + "delete", + "patch" + ] + } + } + } + }, + { + "ordinal": 10, + "name": "static_asset_config: _", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + true + ] + }, + "hash": "11b698f82a54aac68b3617047dfe2b18dd6da7d962118fee276af354218baac2" +} diff --git a/backend/.sqlx/query-199a76c04e3f0891ad09af27b9534bbabdd8703bfdf4d43df2c65e50d4ca2c85.json b/backend/.sqlx/query-199a76c04e3f0891ad09af27b9534bbabdd8703bfdf4d43df2c65e50d4ca2c85.json new file mode 100644 index 0000000000000..7dfa56ad4049b --- /dev/null +++ b/backend/.sqlx/query-199a76c04e3f0891ad09af27b9534bbabdd8703bfdf4d43df2c65e50d4ca2c85.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n schemaname AS schema_name,\n tablename AS table_name,\n attnames AS columns,\n rowfilter AS where_clause\n FROM\n pg_publication_tables\n WHERE\n pubname = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "schema_name", + "type_info": "Name" + }, + { + "ordinal": 1, + "name": "table_name", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "columns", + "type_info": "NameArray" + }, + { + "ordinal": 3, + "name": "where_clause", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Name" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "199a76c04e3f0891ad09af27b9534bbabdd8703bfdf4d43df2c65e50d4ca2c85" +} diff --git a/backend/.sqlx/query-1c3898da62bd9736789eb69c95b379f46a9c5d8a3ee79ae34e07215f46dc775f.json b/backend/.sqlx/query-1c3898da62bd9736789eb69c95b379f46a9c5d8a3ee79ae34e07215f46dc775f.json new file mode 100644 index 0000000000000..3017ccbe7c317 --- /dev/null +++ b/backend/.sqlx/query-1c3898da62bd9736789eb69c95b379f46a9c5d8a3ee79ae34e07215f46dc775f.json @@ -0,0 +1,104 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n workspace_id,\n path,\n script_path,\n replication_slot_name,\n publication_name,\n is_flow,\n edited_by,\n email,\n edited_at,\n server_id,\n last_server_ping,\n extra_perms,\n error,\n enabled,\n database_resource_path\n FROM\n database_trigger\n WHERE\n enabled IS TRUE\n AND (\n server_id IS NULL OR\n last_server_ping IS NULL OR\n last_server_ping < now() - interval '15 seconds'\n )\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "workspace_id", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "path", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "replication_slot_name", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "publication_name", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "is_flow", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "edited_by", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 8, + "name": "edited_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "server_id", + "type_info": "Varchar" + }, + { + "ordinal": 10, + "name": "last_server_ping", + "type_info": "Timestamptz" + }, + { + "ordinal": 11, + "name": "extra_perms", + "type_info": "Jsonb" + }, + { + "ordinal": 12, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 13, + "name": "enabled", + "type_info": "Bool" + }, + { + "ordinal": 14, + "name": "database_resource_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + false, + false + ] + }, + "hash": "1c3898da62bd9736789eb69c95b379f46a9c5d8a3ee79ae34e07215f46dc775f" +} diff --git a/backend/.sqlx/query-4469ee6c206c46951980ea1bc73f126f339d2e3cf97f363be8921084b16dac45.json b/backend/.sqlx/query-4469ee6c206c46951980ea1bc73f126f339d2e3cf97f363be8921084b16dac45.json new file mode 100644 index 0000000000000..46a8d2e4f04fd --- /dev/null +++ b/backend/.sqlx/query-4469ee6c206c46951980ea1bc73f126f339d2e3cf97f363be8921084b16dac45.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT pubname AS publication_name FROM pg_publication;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "publication_name", + "type_info": "Name" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "4469ee6c206c46951980ea1bc73f126f339d2e3cf97f363be8921084b16dac45" +} diff --git a/backend/.sqlx/query-4ee0017771f46f0272817d18edb821940cb5064e3f155b9630b131c09c9dba13.json b/backend/.sqlx/query-4ee0017771f46f0272817d18edb821940cb5064e3f155b9630b131c09c9dba13.json new file mode 100644 index 0000000000000..52136dd33e185 --- /dev/null +++ b/backend/.sqlx/query-4ee0017771f46f0272817d18edb821940cb5064e3f155b9630b131c09c9dba13.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT \n slot_name,\n active\n FROM\n pg_replication_slots \n WHERE \n plugin = 'pgoutput' AND\n slot_type = 'logical';\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "slot_name", + "type_info": "Name" + }, + { + "ordinal": 1, + "name": "active", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + true + ] + }, + "hash": "4ee0017771f46f0272817d18edb821940cb5064e3f155b9630b131c09c9dba13" +} diff --git a/backend/.sqlx/query-71109588434e2dcc93c3fd8e510fdd5be442a0c2e28ba636a971956fc2067eeb.json b/backend/.sqlx/query-71109588434e2dcc93c3fd8e510fdd5be442a0c2e28ba636a971956fc2067eeb.json new file mode 100644 index 0000000000000..231c4af37ada5 --- /dev/null +++ b/backend/.sqlx/query-71109588434e2dcc93c3fd8e510fdd5be442a0c2e28ba636a971956fc2067eeb.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM database_trigger WHERE script_path = $1 AND is_flow = $2 AND workspace_id = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Bool", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "71109588434e2dcc93c3fd8e510fdd5be442a0c2e28ba636a971956fc2067eeb" +} diff --git a/backend/.sqlx/query-7193f39f7c776a9d7f0036acaf4d02b72c92d6bc6a5e41b581a2102f38f445f2.json b/backend/.sqlx/query-7193f39f7c776a9d7f0036acaf4d02b72c92d6bc6a5e41b581a2102f38f445f2.json new file mode 100644 index 0000000000000..d6f872fb5344b --- /dev/null +++ b/backend/.sqlx/query-7193f39f7c776a9d7f0036acaf4d02b72c92d6bc6a5e41b581a2102f38f445f2.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE database_trigger \n SET \n server_id = $1, \n last_server_ping = now() \n WHERE \n enabled IS TRUE \n AND workspace_id = $2 \n AND path = $3 \n AND (\n server_id IS NULL \n OR last_server_ping IS NULL \n OR last_server_ping < now() - INTERVAL '15 seconds'\n ) \n RETURNING true\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "7193f39f7c776a9d7f0036acaf4d02b72c92d6bc6a5e41b581a2102f38f445f2" +} diff --git a/backend/.sqlx/query-7bb99a2717aa903c1217d4b538f1e52e904778d2390ec927dc605500cb30598c.json b/backend/.sqlx/query-7bb99a2717aa903c1217d4b538f1e52e904778d2390ec927dc605500cb30598c.json new file mode 100644 index 0000000000000..51af9348b48b4 --- /dev/null +++ b/backend/.sqlx/query-7bb99a2717aa903c1217d4b538f1e52e904778d2390ec927dc605500cb30598c.json @@ -0,0 +1,107 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n workspace_id,\n path,\n script_path,\n is_flow,\n edited_by,\n email,\n edited_at,\n server_id,\n last_server_ping,\n extra_perms,\n error,\n enabled,\n replication_slot_name,\n publication_name,\n database_resource_path\n FROM \n database_trigger\n WHERE \n workspace_id = $1 AND \n path = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "workspace_id", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "path", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "is_flow", + "type_info": "Bool" + }, + { + "ordinal": 4, + "name": "edited_by", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "edited_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 7, + "name": "server_id", + "type_info": "Varchar" + }, + { + "ordinal": 8, + "name": "last_server_ping", + "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "extra_perms", + "type_info": "Jsonb" + }, + { + "ordinal": 10, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 11, + "name": "enabled", + "type_info": "Bool" + }, + { + "ordinal": 12, + "name": "replication_slot_name", + "type_info": "Varchar" + }, + { + "ordinal": 13, + "name": "publication_name", + "type_info": "Varchar" + }, + { + "ordinal": 14, + "name": "database_resource_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + false, + false, + false, + false + ] + }, + "hash": "7bb99a2717aa903c1217d4b538f1e52e904778d2390ec927dc605500cb30598c" +} diff --git a/backend/.sqlx/query-86ae16175ace0179e784aacfd381771f0137ecab6671d632febadede729e7783.json b/backend/.sqlx/query-86ae16175ace0179e784aacfd381771f0137ecab6671d632febadede729e7783.json new file mode 100644 index 0000000000000..cbf85f6b0d594 --- /dev/null +++ b/backend/.sqlx/query-86ae16175ace0179e784aacfd381771f0137ecab6671d632febadede729e7783.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n puballtables AS all_table,\n pubinsert AS insert,\n pubupdate AS update,\n pubdelete AS delete\n FROM\n pg_publication\n WHERE\n pubname = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "all_table", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "insert", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "update", + "type_info": "Bool" + }, + { + "ordinal": 3, + "name": "delete", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Name" + ] + }, + "nullable": [ + false, + false, + false, + false + ] + }, + "hash": "86ae16175ace0179e784aacfd381771f0137ecab6671d632febadede729e7783" +} diff --git a/backend/.sqlx/query-92d6b4d1d98e6aad828233bef32614152b08b21dec5589ee2836ee6461fed192.json b/backend/.sqlx/query-92d6b4d1d98e6aad828233bef32614152b08b21dec5589ee2836ee6461fed192.json new file mode 100644 index 0000000000000..ab6d0dba846aa --- /dev/null +++ b/backend/.sqlx/query-92d6b4d1d98e6aad828233bef32614152b08b21dec5589ee2836ee6461fed192.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE database_trigger \n SET \n script_path = $1, \n path = $2, \n is_flow = $3, \n edited_by = $4, \n email = $5, \n database_resource_path = $6, \n replication_slot_name = $7,\n publication_name = $8,\n edited_at = now(), \n error = NULL,\n server_id = NULL\n WHERE \n workspace_id = $9 AND \n path = $10\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Bool", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "92d6b4d1d98e6aad828233bef32614152b08b21dec5589ee2836ee6461fed192" +} diff --git a/backend/.sqlx/query-b6194b7b291c8171f8f31223c620a98774c02b53c9f7ee2a230acd23a52decea.json b/backend/.sqlx/query-b6194b7b291c8171f8f31223c620a98774c02b53c9f7ee2a230acd23a52decea.json new file mode 100644 index 0000000000000..a1bd4554da8f6 --- /dev/null +++ b/backend/.sqlx/query-b6194b7b291c8171f8f31223c620a98774c02b53c9f7ee2a230acd23a52decea.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM database_trigger \n WHERE \n workspace_id = $1 AND \n path = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "b6194b7b291c8171f8f31223c620a98774c02b53c9f7ee2a230acd23a52decea" +} diff --git a/backend/.sqlx/query-c9930fcfe79541af570eace58ba7e15a0816a6b4fd036cf7b991a210654b2633.json b/backend/.sqlx/query-c9930fcfe79541af570eace58ba7e15a0816a6b4fd036cf7b991a210654b2633.json new file mode 100644 index 0000000000000..da7b44d1b95c7 --- /dev/null +++ b/backend/.sqlx/query-c9930fcfe79541af570eace58ba7e15a0816a6b4fd036cf7b991a210654b2633.json @@ -0,0 +1,95 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT path, script_path, is_flow, route_path, workspace_id, is_async, requires_auth, edited_by, email, http_method as \"http_method: _\", static_asset_config as \"static_asset_config: _\" FROM http_trigger WHERE workspace_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "path", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "is_flow", + "type_info": "Bool" + }, + { + "ordinal": 3, + "name": "route_path", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "workspace_id", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "is_async", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "requires_auth", + "type_info": "Bool" + }, + { + "ordinal": 7, + "name": "edited_by", + "type_info": "Varchar" + }, + { + "ordinal": 8, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 9, + "name": "http_method: _", + "type_info": { + "Custom": { + "name": "http_method", + "kind": { + "Enum": [ + "get", + "post", + "put", + "delete", + "patch" + ] + } + } + } + }, + { + "ordinal": 10, + "name": "static_asset_config: _", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + true + ] + }, + "hash": "c9930fcfe79541af570eace58ba7e15a0816a6b4fd036cf7b991a210654b2633" +} diff --git a/backend/.sqlx/query-d9ccd4438f4d56d0040b4a277bda842b2e37d0f6fbca5ae3a603e4e707ad352a.json b/backend/.sqlx/query-d9ccd4438f4d56d0040b4a277bda842b2e37d0f6fbca5ae3a603e4e707ad352a.json new file mode 100644 index 0000000000000..46eec750ba54e --- /dev/null +++ b/backend/.sqlx/query-d9ccd4438f4d56d0040b4a277bda842b2e37d0f6fbca5ae3a603e4e707ad352a.json @@ -0,0 +1,35 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT resource_type, value, extra_perms FROM resource WHERE path = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "resource_type", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "value", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "extra_perms", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + false, + true, + false + ] + }, + "hash": "d9ccd4438f4d56d0040b4a277bda842b2e37d0f6fbca5ae3a603e4e707ad352a" +} diff --git a/backend/.sqlx/query-e1cb02452f12b34b60961c1dccdd518fe77028881d6169157834dc14470838ac.json b/backend/.sqlx/query-e1cb02452f12b34b60961c1dccdd518fe77028881d6169157834dc14470838ac.json new file mode 100644 index 0000000000000..6d8e9cf3f6005 --- /dev/null +++ b/backend/.sqlx/query-e1cb02452f12b34b60961c1dccdd518fe77028881d6169157834dc14470838ac.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT EXISTS(\n SELECT 1 \n FROM database_trigger \n WHERE \n path = $1 AND \n workspace_id = $2\n )", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "e1cb02452f12b34b60961c1dccdd518fe77028881d6169157834dc14470838ac" +} diff --git a/backend/.sqlx/query-e9f3b3e8ee8da31fe5ac79ae1df35c7ac271b76c26415882807416c45d7fc29a.json b/backend/.sqlx/query-e9f3b3e8ee8da31fe5ac79ae1df35c7ac271b76c26415882807416c45d7fc29a.json new file mode 100644 index 0000000000000..c415b7e7185db --- /dev/null +++ b/backend/.sqlx/query-e9f3b3e8ee8da31fe5ac79ae1df35c7ac271b76c26415882807416c45d7fc29a.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE \n database_trigger\n SET \n last_server_ping = now(),\n error = $1\n WHERE\n workspace_id = $2\n AND path = $3\n AND server_id = $4 \n AND enabled IS TRUE\n RETURNING 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "e9f3b3e8ee8da31fe5ac79ae1df35c7ac271b76c26415882807416c45d7fc29a" +} diff --git a/backend/.sqlx/query-ef099c39553ac1eb69d9c00c826edcc1bfcb1b663c96250a2f5a5d9af2825a00.json b/backend/.sqlx/query-ef099c39553ac1eb69d9c00c826edcc1bfcb1b663c96250a2f5a5d9af2825a00.json new file mode 100644 index 0000000000000..246c5640900a0 --- /dev/null +++ b/backend/.sqlx/query-ef099c39553ac1eb69d9c00c826edcc1bfcb1b663c96250a2f5a5d9af2825a00.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT \n \n EXISTS(SELECT 1 FROM websocket_trigger WHERE workspace_id = $1) AS \"websocket_used!\", \n \n EXISTS(SELECT 1 FROM http_trigger WHERE workspace_id = $1) AS \"http_routes_used!\",\n EXISTS(SELECT 1 FROM kafka_trigger WHERE workspace_id = $1) as \"kafka_used!\",\n EXISTS(SELECT 1 FROM database_trigger WHERE workspace_id = $1) AS \"database_used!\"\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "websocket_used!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "http_routes_used!", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "kafka_used!", + "type_info": "Bool" + }, + { + "ordinal": 3, + "name": "database_used!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null, + null, + null, + null + ] + }, + "hash": "ef099c39553ac1eb69d9c00c826edcc1bfcb1b663c96250a2f5a5d9af2825a00" +} diff --git a/backend/.sqlx/query-f84dae24a41c71d22ef55903ffa03b63e46cf3ccbdbe8117f7c9eb348016e0fe.json b/backend/.sqlx/query-f84dae24a41c71d22ef55903ffa03b63e46cf3ccbdbe8117f7c9eb348016e0fe.json new file mode 100644 index 0000000000000..a06a321be0fde --- /dev/null +++ b/backend/.sqlx/query-f84dae24a41c71d22ef55903ffa03b63e46cf3ccbdbe8117f7c9eb348016e0fe.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE database_trigger \n SET \n enabled = $1, \n email = $2, \n edited_by = $3, \n edited_at = now(), \n server_id = NULL, \n error = NULL\n WHERE \n path = $4 AND \n workspace_id = $5 \n RETURNING 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Bool", + "Varchar", + "Varchar", + "Text", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "f84dae24a41c71d22ef55903ffa03b63e46cf3ccbdbe8117f7c9eb348016e0fe" +} diff --git a/backend/.vscode/settings.json b/backend/.vscode/settings.json index d9d4cdd047b79..71910a7ae2f8d 100644 --- a/backend/.vscode/settings.json +++ b/backend/.vscode/settings.json @@ -1,5 +1,8 @@ { "python.analysis.typeCheckingMode": "basic", "rust-analyzer.linkedProjects": ["./windmill-common/Cargo.toml"], - "rust-analyzer.showUnlinkedFileNotification": false + "rust-analyzer.showUnlinkedFileNotification": false, + "conventionalCommits.scopes": [ + "restructring triggers, decoding trigger message on work" + ] } diff --git a/backend/Cargo.lock b/backend/Cargo.lock index 54b68aa39297e..ee2e0528d4f75 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -6006,6 +6006,15 @@ dependencies = [ "indexmap 2.7.0", ] +[[package]] +name = "pg_escape" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c7bc82ccbe2c7ef7ceed38dcac90d7ff46681e061e9d7310cbcd409113e303" +dependencies = [ + "phf", +] + [[package]] name = "phf" version = "0.11.2" @@ -6145,7 +6154,7 @@ dependencies = [ "native-tls", "tokio", "tokio-native-tls", - "tokio-postgres", + "tokio-postgres 0.7.12", ] [[package]] @@ -6166,6 +6175,33 @@ dependencies = [ "stringprep", ] +[[package]] +name = "postgres-protocol" +version = "0.6.7" +source = "git+https://github.com/imor/rust-postgres?rev=20265ef38e32a06f76b6f9b678e2077fc2211f6b#20265ef38e32a06f76b6f9b678e2077fc2211f6b" +dependencies = [ + "base64 0.22.1", + "byteorder", + "bytes", + "fallible-iterator", + "hmac", + "md-5 0.10.6", + "memchr", + "rand 0.8.5", + "sha2 0.10.8", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.7" +source = "git+https://github.com/imor/rust-postgres?rev=20265ef38e32a06f76b6f9b678e2077fc2211f6b#20265ef38e32a06f76b6f9b678e2077fc2211f6b" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol 0.6.7 (git+https://github.com/imor/rust-postgres?rev=20265ef38e32a06f76b6f9b678e2077fc2211f6b)", +] + [[package]] name = "postgres-types" version = "0.2.8" @@ -6177,7 +6213,7 @@ dependencies = [ "bytes", "chrono", "fallible-iterator", - "postgres-protocol", + "postgres-protocol 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde", "serde_json", "uuid 1.11.0", @@ -7118,7 +7154,7 @@ dependencies = [ "borsh", "bytes", "num-traits", - "postgres-types", + "postgres-types 0.2.8", "rand 0.8.5", "rkyv", "serde", @@ -9391,6 +9427,31 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-postgres" +version = "0.7.11" +source = "git+https://github.com/imor/rust-postgres?rev=20265ef38e32a06f76b6f9b678e2077fc2211f6b#20265ef38e32a06f76b6f9b678e2077fc2211f6b" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot", + "percent-encoding", + "phf", + "pin-project-lite", + "postgres-protocol 0.6.7 (git+https://github.com/imor/rust-postgres?rev=20265ef38e32a06f76b6f9b678e2077fc2211f6b)", + "postgres-types 0.2.7", + "rand 0.8.5", + "socket2", + "tokio", + "tokio-util", + "whoami", +] + [[package]] name = "tokio-postgres" version = "0.7.12" @@ -9408,8 +9469,8 @@ dependencies = [ "percent-encoding", "phf", "pin-project-lite", - "postgres-protocol", - "postgres-types", + "postgres-protocol 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "postgres-types 0.2.8", "rand 0.8.5", "socket2", "tokio", @@ -10605,6 +10666,7 @@ dependencies = [ "gethostname", "git-version", "lazy_static", + "memchr", "object_store", "once_cell", "prometheus", @@ -10646,6 +10708,7 @@ dependencies = [ "axum", "base32", "base64 0.22.1", + "byteorder", "bytes", "candle-core", "candle-nn", @@ -10669,11 +10732,13 @@ dependencies = [ "magic-crypt", "mail-parser", "matchit", + "memchr", "mime_guess", "native-tls", "object_store", "openidconnect", "openssl", + "pg_escape", "pin-project", "prometheus", "quick_cache", @@ -10683,6 +10748,7 @@ dependencies = [ "reqwest 0.12.9", "rsa", "rust-embed", + "rust_decimal", "samael", "serde", "serde_json", @@ -10691,11 +10757,13 @@ dependencies = [ "sql-builder", "sqlx", "tempfile", + "thiserror 2.0.9", "time", "tinyvector", "tokenizers", "tokio", "tokio-native-tls", + "tokio-postgres 0.7.11", "tokio-tar", "tokio-tungstenite", "tokio-util", @@ -11054,7 +11122,6 @@ dependencies = [ "async-recursion", "axum", "backon", - "bigdecimal", "chrono", "chrono-tz 0.10.0", "cron", @@ -11144,7 +11211,7 @@ dependencies = [ "tar", "tiberius", "tokio", - "tokio-postgres", + "tokio-postgres 0.7.12", "tokio-util", "tracing", "urlencoding", diff --git a/backend/Cargo.toml b/backend/Cargo.toml index e9701b9b99a66..804209e541e54 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -71,6 +71,7 @@ mysql = ["windmill-worker/mysql"] mssql = ["windmill-worker/mssql"] bigquery = ["windmill-worker/bigquery"] websocket = ["windmill-api/websocket"] +database = ["windmill-api/database"] python = ["windmill-worker/python"] smtp = ["windmill-api/smtp", "windmill-common/smtp"] csharp = ["windmill-worker/csharp"] @@ -112,6 +113,8 @@ serde.workspace = true deno_core = { workspace = true, optional = true } object_store = { workspace = true, optional = true } quote.workspace = true +memchr.workspace = true + [target.'cfg(not(target_env = "msvc"))'.dependencies] tikv-jemallocator = { optional = true, workspace = true } @@ -152,6 +155,7 @@ windmill-parser-graphql = { path = "./parsers/windmill-parser-graphql" } windmill-parser-php = { path = "./parsers/windmill-parser-php" } windmill-api-client = { path = "./windmill-api-client" } +memchr = "2.7.4" axum = { version = "^0.7", features = ["multipart"] } headers = "^0" hyper = { version = "^1", features = ["full"] } @@ -232,7 +236,7 @@ sqlx = { version = "0.8.0", features = [ "runtime-tokio-rustls", "bigdecimal" ] } -bigdecimal = "^0" +bigdecimal = {version = "^0"} dotenv = "^0" ulid = { version = "^1", features = ["uuid"] } futures = "^0" @@ -258,6 +262,7 @@ wasm-bindgen-test = "0.3.42" convert_case = "0.6.0" getrandom = "0.2" tokio-postgres = {version = "^0.7", features = ["array-impls", "with-serde_json-1", "with-chrono-0_4", "with-uuid-1", "with-bit-vec-0_6"]} +rust-postgres = { package = "tokio-postgres", git = "https://github.com/imor/rust-postgres", rev = "20265ef38e32a06f76b6f9b678e2077fc2211f6b"} bit-vec = "=0.6.3" mappable-rc = "^0" mysql_async = { version = "*", default-features = false, features = ["minimal", "default", "native-tls-tls", "rust_decimal"]} @@ -267,7 +272,7 @@ native-tls = "^0" # samael = { git="https://github.com/njaremko/samael", rev="464d015e3ae393e4b5dd00b4d6baa1b617de0dd6", features = ["xmlsec"] } samael = { version="0.0.14", features = ["xmlsec"] } gcp_auth = "0.9.0" -rust_decimal = { version = "^1", features = ["db-postgres"]} +rust_decimal = { version = "^1", features = ["db-postgres", "serde-float"]} jsonwebtoken = "8.3.0" pem = "3.0.1" nix = { version = "0.27.1", features = ["process", "signal"] } @@ -285,7 +290,7 @@ openssl = "=0.10" mail-parser = "^0" matchit = "=0.7.3" rdkafka = { version = "0.36.2", features = ["cmake-build", "ssl-vendored"] } - +pg_escape = "0.1.1" datafusion = "39.0.0" object_store = { version = "0.10.0", features = ["aws", "azure"] } openidconnect = { version = "4.0.0-rc.1" } @@ -307,6 +312,7 @@ opentelemetry-semantic-conventions = { version = "*", features = ["semconv_exper bollard = "0.18.1" tonic = { version = "^0", features = ["tls-native-roots"] } +byteorder = "1.5.0" tikv-jemallocator = { version = "0.5" } tikv-jemalloc-sys = { version = "^0.5" } diff --git a/backend/migrations/20241123152203_database_triggers.down.sql b/backend/migrations/20241123152203_database_triggers.down.sql new file mode 100644 index 0000000000000..7c18c0075bdb7 --- /dev/null +++ b/backend/migrations/20241123152203_database_triggers.down.sql @@ -0,0 +1,3 @@ +-- Add down migration script here +DROP TABLE IF EXISTS database_trigger; +DROP TYPE IF EXISTS transaction; \ No newline at end of file diff --git a/backend/migrations/20241123152203_database_triggers.up.sql b/backend/migrations/20241123152203_database_triggers.up.sql new file mode 100644 index 0000000000000..5e5560d5e069f --- /dev/null +++ b/backend/migrations/20241123152203_database_triggers.up.sql @@ -0,0 +1,21 @@ +-- Add up migration script here +CREATE TABLE database_trigger( + path VARCHAR(255) NOT NULL, + script_path VARCHAR(255) NOT NULL, + is_flow BOOLEAN NOT NULL, + workspace_id VARCHAR(50) NOT NULL, + edited_by VARCHAR(50) NOT NULL, + email VARCHAR(255) NOT NULL, + edited_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + extra_perms JSONB NULL, + database_resource_path VARCHAR(255) NOT NULL, + error TEXT NULL, + server_id VARCHAR(50) NULL, + last_server_ping TIMESTAMPTZ NULL, + replication_slot_name VARCHAR(255) NOT NULL, + publication_name VARCHAR(255) NOT NULL, + enabled BOOLEAN NOT NULL, + CONSTRAINT PK_database_trigger PRIMARY KEY (path,workspace_id), + CONSTRAINT fk_database_trigger_workspace FOREIGN KEY (workspace_id) + REFERENCES workspace(id) ON DELETE CASCADE +); \ No newline at end of file diff --git a/backend/windmill-api/Cargo.toml b/backend/windmill-api/Cargo.toml index 95778875d8783..eeb392b9f4fc3 100644 --- a/backend/windmill-api/Cargo.toml +++ b/backend/windmill-api/Cargo.toml @@ -27,6 +27,7 @@ zip = ["dep:async_zip"] oauth2 = ["dep:async-oauth2"] http_trigger = ["dep:matchit"] static_frontend = ["dep:rust-embed"] +database = ["dep:rust-postgres", "dep:pg_escape", "dep:byteorder", "dep:memchr", "dep:thiserror", "dep:rust_decimal"] [dependencies] windmill-queue.workspace = true @@ -104,8 +105,13 @@ matchit = { workspace = true, optional = true } tokio-tungstenite = { workspace = true, optional = true} rdkafka = { workspace = true, optional = true } const_format.workspace = true - pin-project.workspace = true http.workspace = true async-stream.workspace = true ulid.workspace = true +rust-postgres = { workspace = true, optional = true } +pg_escape = { workspace = true, optional = true } +byteorder = { workspace = true, optional = true } +memchr = { workspace = true, optional = true } +thiserror = { workspace = true, optional = true } +rust_decimal = { workspace = true, optional = true } \ No newline at end of file diff --git a/backend/windmill-api/openapi.yaml b/backend/windmill-api/openapi.yaml index 4443894e5e30e..0cd5f4d49e4fc 100644 --- a/backend/windmill-api/openapi.yaml +++ b/backend/windmill-api/openapi.yaml @@ -283,7 +283,7 @@ paths: text/plain: schema: type: string - + /users/set_password_of/{user}: post: summary: set password for a specific user (require super admin) @@ -348,7 +348,6 @@ paths: schema: type: string - /users/create: post: summary: create user @@ -530,7 +529,7 @@ paths: text/plain: schema: type: string - + /users/export: get: summary: global export users (require super admin and EE) @@ -998,7 +997,6 @@ paths: schema: type: string - /settings/latest_key_renewal_attempt: get: summary: get latest key renewal attempt @@ -1084,7 +1082,6 @@ paths: schema: type: string - /settings/list_global: get: summary: list global settings @@ -2379,11 +2376,13 @@ paths: type: boolean kafka_used: type: boolean + database_used: + type: boolean required: - http_routes_used - websocket_used - kafka_used - + - database_used /w/{workspace}/users/list: get: summary: list users @@ -2876,7 +2875,7 @@ paths: description: Whether critical alerts should be muted. example: true responses: - '200': + "200": description: Successfully updated mute critical alert settings. content: application/json: @@ -2955,7 +2954,6 @@ paths: schema: type: string - /oauth/connect_slack_callback: post: summary: connect slack callback instance @@ -3172,7 +3170,7 @@ paths: type: string responses: "200": - description: get + description: get content: application/json: schema: @@ -3185,9 +3183,6 @@ paths: items: type: string - - - /w/{workspace}/resources/create: post: summary: create resource @@ -3478,7 +3473,6 @@ paths: application/json: schema: {} - /w/{workspace}/resources/type/delete/{path}: delete: summary: delete resource_type @@ -4019,6 +4013,11 @@ paths: - $ref: "#/components/parameters/PerPage" - $ref: "#/components/parameters/OrderDesc" - $ref: "#/components/parameters/CreatedBy" + - name: languages + description: Filter scripts by comma-separated programming languages (e.g., "Python,JavaScript"). + in: query + schema: + type: string - name: path_start description: mask to filter matching starting path in: query @@ -4466,7 +4465,6 @@ paths: description: Script version/hash content: application/json: - required: false schema: @@ -5037,7 +5035,6 @@ paths: schema: type: string - /w/{workspace}/flows/get/{path}: get: summary: get flow by path @@ -5095,7 +5092,6 @@ paths: items: $ref: "#/components/schemas/TruncatedToken" - /w/{workspace}/flows/toggle_workspace_error_handler/{path}: post: summary: Toggle ON and OFF the workspace error handler for a given flow @@ -5266,7 +5262,6 @@ paths: schema: type: string - /w/{workspace}/raw_apps/list: get: summary: list all raw apps @@ -6316,7 +6311,6 @@ paths: schema: type: integer - /w/{workspace}/jobs/queue/list_filtered_uuids: get: summary: get the ids of all jobs matching the given filters @@ -6628,7 +6622,6 @@ paths: schema: type: string - /w/{workspace}/jobs_u/get_args/{id}: get: summary: get job args @@ -7153,7 +7146,6 @@ paths: schema: type: string - /w/{workspace}/jobs_u/cancel/{id}/{resume_id}/{signature}: get: summary: cancel a job for a suspended flow @@ -7524,7 +7516,6 @@ paths: "201": description: default error handler set - /w/{workspace}/http_triggers/create: post: summary: create http trigger @@ -7577,7 +7568,7 @@ paths: summary: delete http trigger operationId: deleteHttpTrigger tags: - - http_trigger + - http_trigger parameters: - $ref: "#/components/parameters/WorkspaceId" - $ref: "#/components/parameters/Path" @@ -7606,7 +7597,6 @@ paths: schema: $ref: "#/components/schemas/HttpTrigger" - /w/{workspace}/http_triggers/list: get: summary: list http triggers @@ -7641,7 +7631,6 @@ paths: items: $ref: "#/components/schemas/HttpTrigger" - /w/{workspace}/http_triggers/exists/{path}: get: summary: does http trigger exists @@ -7744,7 +7733,7 @@ paths: summary: delete websocket trigger operationId: deleteWebsocketTrigger tags: - - websocket_trigger + - websocket_trigger parameters: - $ref: "#/components/parameters/WorkspaceId" - $ref: "#/components/parameters/Path" @@ -7773,7 +7762,6 @@ paths: schema: $ref: "#/components/schemas/WebsocketTrigger" - /w/{workspace}/websocket_triggers/list: get: summary: list websocket triggers @@ -7808,7 +7796,6 @@ paths: items: $ref: "#/components/schemas/WebsocketTrigger" - /w/{workspace}/websocket_triggers/exists/{path}: get: summary: does websocket trigger exists @@ -8018,6 +8005,362 @@ paths: schema: type: string + /w/{workspace}/database_triggers/get_template_script: + post: + summary: get template script + operationId: getTemplateScript + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + requestBody: + description: template script + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/TemplateScript" + responses: + "200": + description: template script + content: + text/plain: + schema: + type: string + + /w/{workspace}/database_triggers/slot/list/{path}: + get: + summary: list database slot + operationId: listDatabaseSlot + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + responses: + "200": + description: list database slot + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/SlotList" + + /w/{workspace}/database_triggers/slot/create/{path}: + post: + summary: create slot for database + operationId: createDatabaseSlot + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + requestBody: + description: new slot for database + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/Slot" + responses: + "201": + description: slot created + content: + text/plain: + schema: + type: string + + /w/{workspace}/database_triggers/slot/delete/{path}: + delete: + summary: delete database slot + operationId: deleteDatabaseSlot + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + requestBody: + description: delete slot of database + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/Slot" + responses: + "200": + description: database slot deleted + content: + text/plain: + schema: + type: string + + /w/{workspace}/database_triggers/publication/list/{path}: + get: + summary: list database publication + operationId: listDatabasePublication + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + responses: + "200": + description: database publication list + content: + application/json: + schema: + type: array + items: + type: string + + /w/{workspace}/database_triggers/publication/get/{publication}/{path}: + get: + summary: get database publication + operationId: getDatabasePublication + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + - $ref: "#/components/parameters/PublicationName" + responses: + "200": + description: database publication get + content: + application/json: + schema: + $ref: "#/components/schemas/PublicationData" + + /w/{workspace}/database_triggers/publication/create/{publication}/{path}: + post: + summary: create publication for database + operationId: createDatabasePublication + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + - $ref: "#/components/parameters/PublicationName" + requestBody: + description: new publication for database + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PublicationData" + responses: + "201": + description: publication created + content: + text/plain: + schema: + type: string + + /w/{workspace}/database_triggers/publication/update/{publication}/{path}: + post: + summary: update publication for database + operationId: updateDatabasePublication + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + - $ref: "#/components/parameters/PublicationName" + requestBody: + description: update publication for database + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PublicationData" + responses: + "201": + description: publication updated + content: + text/plain: + schema: + type: string + + + /w/{workspace}/database_triggers/publication/delete/{publication}/{path}: + delete: + summary: delete database publication + operationId: deleteDatabasePublication + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + - $ref: "#/components/parameters/PublicationName" + responses: + "200": + description: database publication deleted + content: + text/plain: + schema: + type: string + + /w/{workspace}/database_triggers/create: + post: + summary: create database trigger + operationId: createDatabaseTrigger + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + requestBody: + description: new database trigger + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/NewDatabaseTrigger" + responses: + "201": + description: database trigger created + content: + text/plain: + schema: + type: string + + /w/{workspace}/database_triggers/update/{path}: + post: + summary: update database trigger + operationId: updateDatabaseTrigger + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + requestBody: + description: updated trigger + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/EditDatabaseTrigger" + responses: + "200": + description: database trigger updated + content: + text/plain: + schema: + type: string + + /w/{workspace}/database_triggers/delete/{path}: + delete: + summary: delete database trigger + operationId: deleteDatabaseTrigger + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + responses: + "200": + description: database trigger deleted + content: + text/plain: + schema: + type: string + + /w/{workspace}/database_triggers/get/{path}: + get: + summary: get database trigger + operationId: getDatabaseTrigger + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + responses: + "200": + description: get database trigger + content: + application/json: + schema: + $ref: "#/components/schemas/DatabaseTrigger" + + /w/{workspace}/database_triggers/list: + get: + summary: list database triggers + operationId: listDatabaseTriggers + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + required: true + - $ref: "#/components/parameters/Page" + - $ref: "#/components/parameters/PerPage" + - name: path + description: filter by path + in: query + schema: + type: string + - name: is_flow + in: query + schema: + type: boolean + - name: path_start + in: query + schema: + type: string + responses: + "200": + description: database trigger list + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/DatabaseTrigger" + + /w/{workspace}/database_triggers/exists/{path}: + get: + summary: does database trigger exists + operationId: existsDatabaseTrigger + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + responses: + "200": + description: database trigger exists + content: + application/json: + schema: + type: boolean + + /w/{workspace}/database_triggers/setenabled/{path}: + post: + summary: set enabled database trigger + operationId: setDatabaseTriggerEnabled + tags: + - database_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + requestBody: + description: updated database trigger enable + required: true + content: + application/json: + schema: + type: object + properties: + enabled: + type: boolean + required: + - enabled + responses: + "200": + description: database trigger enabled set + content: + text/plain: + schema: + type: string /groups/list: get: @@ -8714,7 +9057,7 @@ paths: properties: id: type: string - values: + values: type: array items: type: object @@ -8741,12 +9084,11 @@ paths: description: queue counts content: application/json: - schema: + schema: type: object additionalProperties: type: integer - /configs/list_worker_groups: get: summary: list worker groups @@ -8859,7 +9201,6 @@ paths: items: $ref: "#/components/schemas/AutoscalingEvent" - /w/{workspace}/acls/get/{kind}/{path}: get: summary: get granular acls @@ -8874,8 +9215,7 @@ paths: required: true schema: type: string - enum: - [ + enum: [ script, group_, resource, @@ -8888,6 +9228,7 @@ paths: http_trigger, websocket_trigger, kafka_trigger, + database_trigger, ] responses: "200": @@ -8927,6 +9268,7 @@ paths: http_trigger, websocket_trigger, kafka_trigger, + database_trigger, ] requestBody: description: acl to add @@ -8977,6 +9319,7 @@ paths: http_trigger, websocket_trigger, kafka_trigger, + database_trigger, ] requestBody: description: acl to add @@ -10096,7 +10439,7 @@ paths: tags: - service_logs parameters: - - $ref: "#/components/parameters/Path" + - $ref: "#/components/parameters/Path" responses: "200": description: log stream @@ -10408,6 +10751,12 @@ components: required: true schema: type: string + PublicationName: + name: publication + in: path + required: true + schema: + type: string VersionId: name: version in: path @@ -10830,27 +11179,7 @@ components: lock_error_logs: type: string language: - type: string - enum: - [ - python3, - deno, - go, - bash, - powershell, - postgresql, - mysql, - bigquery, - snowflake, - mssql, - graphql, - nativets, - bun, - php, - rust, - ansible, - csharp, - ] + $ref: "#/components/schemas/ScriptLang" kind: type: string enum: [script, failure, trigger, command, approval] @@ -10913,7 +11242,6 @@ components: - no_main_func - has_preprocessor - NewScript: type: object properties: @@ -10934,27 +11262,7 @@ components: lock: type: string language: - type: string - enum: - [ - python3, - deno, - go, - bash, - powershell, - postgresql, - mysql, - bigquery, - snowflake, - mssql, - graphql, - nativets, - bun, - php, - rust, - ansible, - csharp, - ] + $ref: "#/components/schemas/ScriptLang" kind: type: string enum: [script, failure, trigger, command, approval] @@ -11159,27 +11467,7 @@ components: is_flow_step: type: boolean language: - type: string - enum: - [ - python3, - deno, - go, - bash, - powershell, - postgresql, - mysql, - bigquery, - snowflake, - mssql, - graphql, - nativets, - bun, - php, - rust, - ansible, - csharp, - ] + $ref: "#/components/schemas/ScriptLang" email: type: string visible_to_owner: @@ -11282,27 +11570,7 @@ components: is_flow_step: type: boolean language: - type: string - enum: - [ - python3, - deno, - go, - bash, - powershell, - postgresql, - mysql, - bigquery, - snowflake, - mssql, - graphql, - nativets, - bun, - php, - rust, - ansible, - csharp, - ] + $ref: "#/components/schemas/ScriptLang" is_skipped: type: boolean email: @@ -11828,6 +12096,29 @@ components: - no_main_func - has_preprocessor + ScriptLang: + type: string + enum: + [ + python3, + deno, + go, + bash, + powershell, + postgresql, + mysql, + bigquery, + snowflake, + mssql, + graphql, + nativets, + bun, + php, + rust, + ansible, + csharp + ] + Preview: type: object properties: @@ -11838,27 +12129,7 @@ components: args: $ref: "#/components/schemas/ScriptArgs" language: - type: string - enum: - [ - python3, - deno, - go, - bash, - powershell, - postgresql, - mysql, - bigquery, - snowflake, - mssql, - graphql, - nativets, - bun, - php, - rust, - ansible, - csharp, - ] + $ref: "#/components/schemas/ScriptLang" tag: type: string kind: @@ -12223,16 +12494,36 @@ components: - is_flow - args - HttpTrigger: + TriggerExtraProperty: type: object properties: - path: + email: + type: string + extra_perms: + type: object + additionalProperties: + type: boolean + workspace_id: type: string edited_by: type: string edited_at: type: string format: date-time + required: + - email + - extra_perms + - workspace_id + - edited_by + - edited_at + + HttpTrigger: + allOf: + - $ref: "#/components/schemas/TriggerExtraProperty" + type: object + properties: + path: + type: string script_path: type: string route_path: @@ -12250,14 +12541,6 @@ components: - s3 is_flow: type: boolean - extra_perms: - type: object - additionalProperties: - type: boolean - email: - type: string - workspace_id: - type: string http_method: type: string enum: @@ -12270,7 +12553,7 @@ components: type: boolean requires_auth: type: boolean - + required: - path - edited_by @@ -12319,7 +12602,7 @@ components: type: boolean requires_auth: type: boolean - + required: - path - script_path @@ -12390,33 +12673,24 @@ components: type: number websocket_count: type: number + database_count: + type: number kafka_count: type: number WebsocketTrigger: + allOf: + - $ref: "#/components/schemas/TriggerExtraProperty" type: object properties: path: type: string - edited_by: - type: string - edited_at: - type: string - format: date-time script_path: type: string url: type: string is_flow: type: boolean - extra_perms: - type: object - additionalProperties: - type: boolean - email: - type: string - workspace_id: - type: string server_id: type: string last_server_ping: @@ -12443,7 +12717,7 @@ components: $ref: "#/components/schemas/WebsocketTriggerInitialMessage" url_runnable_args: $ref: "#/components/schemas/ScriptArgs" - + required: - path - edited_by @@ -12487,7 +12761,7 @@ components: $ref: "#/components/schemas/WebsocketTriggerInitialMessage" url_runnable_args: $ref: "#/components/schemas/ScriptArgs" - + required: - path - script_path @@ -12523,7 +12797,7 @@ components: $ref: "#/components/schemas/WebsocketTriggerInitialMessage" url_runnable_args: $ref: "#/components/schemas/ScriptArgs" - + required: - path - script_path @@ -12535,7 +12809,7 @@ components: anyOf: - type: object properties: - raw_message: + raw_message: type: string required: - raw_message @@ -12544,7 +12818,7 @@ components: runnable_result: type: object properties: - path: + path: type: string args: $ref: "#/components/schemas/ScriptArgs" @@ -12554,8 +12828,166 @@ components: - path - args - is_flow - required: + required: - runnable_result + + Slot: + type: object + properties: + name: + type: string + + SlotList: + type: object + properties: + slot_name: + type: string + active: + type: boolean + + PublicationData: + type: object + properties: + table_to_track: + type: array + items: + $ref: "#/components/schemas/Relations" + transaction_to_track: + type: array + items: + type: string + required: + - transaction_to_track + + TableToTrack: + type: array + items: + type: object + properties: + table_name: + type: string + columns_name: + type: array + items: + type: string + where_clause: + type: string + required: + - table_name + + Relations: + type: object + properties: + schema_name: + type: string + table_to_track: + $ref: "#/components/schemas/TableToTrack" + required: + - schema_name + - table_to_track + + Language: + type: string + enum: + - Typescript + + TemplateScript: + type: object + properties: + database_resource_path: + type: string + relations: + type: array + items: + $ref: "#/components/schemas/Relations" + language: + $ref: "#/components/schemas/Language" + required: + - database_resource_path + - relations + - language + + DatabaseTrigger: + allOf: + - $ref: "#/components/schemas/TriggerExtraProperty" + type: object + properties: + path: + type: string + script_path: + type: string + is_flow: + type: boolean + enabled: + type: boolean + database_resource_path: + type: string + publication_name: + type: string + replication_slot_name: + type: string + error: + type: string + required: + - path + - script_path + - is_flow + - enabled + - database_resource_path + - replication_slot_name + - publication_name + + NewDatabaseTrigger: + type: object + properties: + replication_slot_name: + type: string + publication_name: + type: string + path: + type: string + script_path: + type: string + is_flow: + type: boolean + enabled: + type: boolean + database_resource_path: + type: string + required: + - path + - script_path + - is_flow + - enabled + - database_resource_path + - replication_slot_name + - publication_name + + EditDatabaseTrigger: + type: object + properties: + replication_slot_name: + type: string + publication_name: + type: string + path: + type: string + script_path: + type: string + is_flow: + type: boolean + enabled: + type: boolean + database_resource_path: + type: string + required: + - path + - script_path + - is_flow + - enabled + - database_resource_path + - replication_slot_name + - publication_name KafkaTrigger: type: object @@ -13456,27 +13888,7 @@ components: path: type: string language: - type: string - enum: - [ - python3, - deno, - go, - bash, - powershell, - postgresql, - mysql, - bigquery, - snowflake, - mssql, - graphql, - nativets, - bun, - php, - rust, - ansible, - csharp, - ] + $ref: "#/components/schemas/ScriptLang" required: - raw_code - path @@ -13503,14 +13915,14 @@ components: obscured_jobs: type: array items: - $ref : "#/components/schemas/ObscuredJob" + $ref: "#/components/schemas/ObscuredJob" omitted_obscured_jobs: description: "Obscured jobs omitted for security because of too specific filtering" type: boolean required: - jobs - obscured_jobs - + ExportedUser: type: object properties: @@ -13556,7 +13968,7 @@ components: type: object required: - name - + ExportedInstanceGroup: type: object properties: diff --git a/backend/windmill-api/src/ai.rs b/backend/windmill-api/src/ai.rs index 32c1f4e4dcc89..418ba5b407034 100644 --- a/backend/windmill-api/src/ai.rs +++ b/backend/windmill-api/src/ai.rs @@ -1,7 +1,4 @@ -use crate::{ - db::{ApiAuthed, DB}, - variables::decrypt, -}; +use crate::db::{ApiAuthed, DB}; use anthropic::AnthropicCache; use axum::{ body::Bytes, @@ -19,7 +16,7 @@ use serde::{Deserialize, Deserializer}; use windmill_audit::audit_ee::audit_log; use windmill_audit::ActionKind; use windmill_common::error::{to_anyhow, Result}; -use windmill_common::variables::build_crypt; +use windmill_common::variables::get_variable_or_self; use windmill_common::error::Error; @@ -346,38 +343,11 @@ lazy_static! { pub static ref AI_KEY_CACHE: Cache = Cache::new(500); } -struct Variable { - value: String, - is_secret: bool, -} - #[derive(Deserialize, Debug)] struct ProxyQueryParams { no_cache: Option, } -async fn get_variable_or_self(path: String, db: &DB, w_id: &str) -> Result { - if !path.starts_with("$var:") { - return Ok(path); - } - let path = path.strip_prefix("$var:").unwrap().to_string(); - let mut variable = sqlx::query_as!( - Variable, - "SELECT value, is_secret - FROM variable - WHERE path = $1 AND workspace_id = $2", - &path, - &w_id - ) - .fetch_one(db) - .await?; - if variable.is_secret { - let mc = build_crypt(db, w_id).await?; - variable.value = decrypt(&mc, variable.value)?; - } - Ok(variable.value) -} - #[derive(Deserialize, Debug)] pub struct AiResource { pub path: String, diff --git a/backend/windmill-api/src/apps.rs b/backend/windmill-api/src/apps.rs index 7218f98ec0d88..9c49962f32fc6 100644 --- a/backend/windmill-api/src/apps.rs +++ b/backend/windmill-api/src/apps.rs @@ -13,7 +13,6 @@ use crate::{ resources::get_resource_value_interpolated_internal, users::{require_owner_of_path, OptAuthed}, utils::WithStarredInfoQuery, - variables::encrypt, webhook_util::{WebhookMessage, WebhookShared}, HTTP_CLIENT, }; @@ -65,7 +64,7 @@ use windmill_common::{ http_get_from_hub, not_found_if_none, paginate, query_elems_from_hub, require_admin, Pagination, StripPath, }, - variables::{build_crypt, build_crypt_with_key_suffix}, + variables::{build_crypt, build_crypt_with_key_suffix, encrypt}, worker::{to_raw_value, CLOUD_HOSTED}, HUB_BASE_URL, }; diff --git a/backend/windmill-api/src/database_triggers/bool.rs b/backend/windmill-api/src/database_triggers/bool.rs new file mode 100644 index 0000000000000..040d8c4a0e744 --- /dev/null +++ b/backend/windmill-api/src/database_triggers/bool.rs @@ -0,0 +1,17 @@ +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum ParseBoolError { + #[error("invalid input value: {0}")] + InvalidInput(String), +} + +pub fn parse_bool(s: &str) -> Result { + if s == "t" { + Ok(true) + } else if s == "f" { + Ok(false) + } else { + Err(ParseBoolError::InvalidInput(s.to_string())) + } +} diff --git a/backend/windmill-api/src/database_triggers/converter.rs b/backend/windmill-api/src/database_triggers/converter.rs new file mode 100644 index 0000000000000..4096c795ebce2 --- /dev/null +++ b/backend/windmill-api/src/database_triggers/converter.rs @@ -0,0 +1,253 @@ +use core::str; +use std::{ + num::{ParseFloatError, ParseIntError}, + str::FromStr, +}; + +use super::{ + bool::{parse_bool, ParseBoolError}, + hex::{from_bytea_hex, ByteaHexParseError}, +}; +use chrono::{DateTime, FixedOffset, NaiveDate, NaiveDateTime, NaiveTime, Utc}; +use rust_decimal::Decimal; +use rust_postgres::types::Type; +use serde_json::{to_value, Number, Value}; +use thiserror::Error; +use uuid::Uuid; + +#[derive(Debug, Error)] +pub enum ConverterError { + #[error("invalid bool value")] + InvalidBool(#[from] ParseBoolError), + + #[error("invalid int value")] + InvalidInt(#[from] ParseIntError), + + #[error("invalid float value")] + InvalidFloat(#[from] ParseFloatError), + + #[error("invalid numeric: {0}")] + InvalidNumeric(#[from] rust_decimal::Error), + + #[error("invalid bytea: {0}")] + InvalidBytea(#[from] ByteaHexParseError), + + #[error("invalid uuid: {0}")] + InvalidUuid(#[from] uuid::Error), + + #[error("invalid json: {0}")] + InvalidJson(#[from] serde_json::Error), + + #[error("invalid timestamp: {0} ")] + InvalidTimestamp(#[from] chrono::ParseError), + + #[error("invalid array: {0}")] + InvalidArray(#[from] ArrayParseError), + + #[error("{0}")] + Custom(String), +} + +fn convert_into(number: T) -> Number +where + T: Sized, + serde_json::Number: From, +{ + serde_json::Number::from(number) +} + +pub struct Converter; + +#[derive(Debug, Error)] +pub enum ArrayParseError { + #[error("input too short")] + InputTooShort, + + #[error("missing braces")] + MissingBraces, +} + +fn f64_to_json_number(raw_val: f64) -> Result { + let temp = serde_json::Number::from_f64(raw_val.into()) + .ok_or(ConverterError::Custom("invalid json-float".to_string()))?; + Ok(Value::Number(temp)) +} + +impl Converter { + pub fn try_from_str(typ: Option<&Type>, str: &str) -> Result { + let value = match typ { + Some(typ) => match *typ { + Type::BOOL => Value::Bool(parse_bool(str)?), + Type::BOOL_ARRAY => { + Converter::parse_array(str, |str| Ok(Value::Bool(parse_bool(str)?)))? + } + Type::CHAR | Type::BPCHAR | Type::VARCHAR | Type::NAME | Type::TEXT => { + Value::String(str.to_string()) + } + Type::CHAR_ARRAY + | Type::BPCHAR_ARRAY + | Type::VARCHAR_ARRAY + | Type::NAME_ARRAY + | Type::TEXT_ARRAY => { + Converter::parse_array(str, |str| Ok(Value::String(str.to_string())))? + } + Type::INT2 => Value::Number(convert_into(str.parse::()?)), + Type::INT2_ARRAY => Converter::parse_array(str, |str| { + Ok(Value::Number(convert_into(str.parse::()?))) + })?, + Type::INT4 => Value::Number(convert_into(str.parse::()?)), + Type::INT4_ARRAY => Converter::parse_array(str, |str| { + Ok(Value::Number(convert_into(str.parse::()?))) + })?, + Type::INT8 => Value::Number(convert_into(str.parse::()?)), + Type::INT8_ARRAY => Converter::parse_array(str, |str| { + Ok(Value::Number(convert_into(str.parse::()?))) + })?, + Type::FLOAT4 => f64_to_json_number(str.parse::()?)?, + Type::FLOAT4_ARRAY => { + Converter::parse_array(str, |str| f64_to_json_number(str.parse::()?))? + } + Type::FLOAT8 => f64_to_json_number(str.parse::()?)?, + Type::FLOAT8_ARRAY => { + Converter::parse_array(str, |str| f64_to_json_number(str.parse::()?))? + } + Type::NUMERIC => serde_json::json!(Decimal::from_str(str)?), + Type::NUMERIC_ARRAY => Converter::parse_array(str, |str| { + Ok(serde_json::json!(Decimal::from_str(str)?)) + })?, + Type::BYTEA => to_value(from_bytea_hex(str)?).unwrap(), + Type::BYTEA_ARRAY => { + Converter::parse_array(str, |str| Ok(to_value(from_bytea_hex(str)?).unwrap()))? + } + Type::DATE => { + let date = NaiveDate::parse_from_str(str, "%Y-%m-%d")?; + Value::String(date.to_string()) + } + Type::DATE_ARRAY => Converter::parse_array(str, |str| { + let date = NaiveDate::parse_from_str(str, "%Y-%m-%d")?; + Ok(Value::String(date.to_string())) + })?, + Type::TIME => { + let time = NaiveTime::parse_from_str(str, "%H:%M:%S%.f")?; + Value::String(time.to_string()) + } + Type::TIME_ARRAY => Converter::parse_array(str, |str| { + let time = NaiveTime::parse_from_str(str, "%H:%M:%S%.f")?; + Ok(Value::String(time.to_string())) + })?, + Type::TIMESTAMP => { + let timestamp = NaiveDateTime::parse_from_str(str, "%Y-%m-%d %H:%M:%S%.f")?; + Value::String(timestamp.to_string()) + } + Type::TIMESTAMP_ARRAY => Converter::parse_array(str, |str| { + let timestamp = NaiveDateTime::parse_from_str(str, "%Y-%m-%d %H:%M:%S%.f")?; + Ok(Value::String(timestamp.to_string())) + })?, + Type::TIMESTAMPTZ => { + let val = match DateTime::::parse_from_str( + str, + "%Y-%m-%d %H:%M:%S%.f%#z", + ) { + Ok(val) => val, + Err(_) => { + DateTime::::parse_from_str(str, "%Y-%m-%d %H:%M:%S%.f%:z")? + } + }; + let utc: DateTime = val.into(); + Value::String(utc.to_string()) + } + Type::TIMESTAMPTZ_ARRAY => { + match Converter::parse_array(str, |str| { + let utc: DateTime = DateTime::::parse_from_str( + str, + "%Y-%m-%d %H:%M:%S%.f%#z", + )? + .into(); + Ok(Value::String(utc.to_string())) + }) { + Ok(val) => val, + Err(_) => Converter::parse_array(str, |str| { + let utc: DateTime = DateTime::::parse_from_str( + str, + "%Y-%m-%d %H:%M:%S%.f%#z", + )? + .into(); + Ok(Value::String(utc.to_string())) + })?, + } + } + Type::UUID => Value::String(Uuid::parse_str(str)?.to_string()), + Type::UUID_ARRAY => Converter::parse_array(str, |str| { + Ok(Value::String(Uuid::parse_str(str)?.to_string())) + })?, + Type::JSON | Type::JSONB => serde_json::from_str::(str)?, + Type::JSON_ARRAY | Type::JSONB_ARRAY => Converter::parse_array(str, |str| { + Ok(serde_json::from_str::(str)?) + })?, + Type::OID => Value::Number(convert_into(str.parse::()?)), + Type::OID_ARRAY => Converter::parse_array(str, |str| { + Ok(Value::Number(convert_into(str.parse::()?))) + })?, + _ => Value::String(str.to_string()), + }, + None => Value::String(str.to_string()), + }; + + Ok(value) + } + + fn parse_array

(str: &str, mut parse: P) -> Result + where + P: FnMut(&str) -> Result, + { + if str.len() < 2 { + return Err(ArrayParseError::InputTooShort.into()); + } + + if !str.starts_with('{') || !str.ends_with('}') { + return Err(ArrayParseError::MissingBraces.into()); + } + + let mut res = vec![]; + let str = &str[1..(str.len() - 1)]; + let mut val_str = String::with_capacity(10); + let mut in_quotes = false; + let mut in_escape = false; + let mut chars = str.chars(); + let mut done = str.is_empty(); + + while !done { + loop { + match chars.next() { + Some(c) => match c { + c if in_escape => { + val_str.push(c); + in_escape = false; + } + '"' => in_quotes = !in_quotes, + '\\' => in_escape = true, + ',' if !in_quotes => { + break; + } + c => { + val_str.push(c); + } + }, + None => { + done = true; + break; + } + } + } + let val = if val_str.to_lowercase() == "null" { + Value::Null + } else { + parse(&val_str)? + }; + res.push(val); + val_str.clear(); + } + let arr = Value::Array(res); + Ok(arr) + } +} diff --git a/backend/windmill-api/src/database_triggers/handler.rs b/backend/windmill-api/src/database_triggers/handler.rs new file mode 100644 index 0000000000000..8e58ee7de8cda --- /dev/null +++ b/backend/windmill-api/src/database_triggers/handler.rs @@ -0,0 +1,1315 @@ +use std::{ + collections::{ + hash_map::Entry::{Occupied, Vacant}, + HashMap, + }, + str::FromStr, +}; + +use axum::{ + extract::{Path, Query}, + Extension, Json, +}; +use http::StatusCode; +use itertools::Itertools; +use pg_escape::{quote_identifier, quote_literal}; +use rust_postgres::types::Type; +use serde::{Deserialize, Deserializer, Serialize}; +use sql_builder::{bind::Bind, SqlBuilder}; +use sqlx::{ + postgres::{types::Oid, PgConnectOptions, PgSslMode}, + Connection, FromRow, PgConnection, QueryBuilder, +}; +use windmill_audit::{audit_ee::audit_log, ActionKind}; +use windmill_common::error::Error; +use windmill_common::{ + db::UserDB, + error::{self, JsonResult}, + utils::{not_found_if_none, paginate, Pagination, StripPath}, + worker::CLOUD_HOSTED, +}; + +use crate::{ + database_triggers::mapper::{Mapper, MappingInfo}, + db::{ApiAuthed, DB}, + resources::get_resource_value_interpolated_internal, +}; + +#[derive(FromRow, Serialize, Deserialize, Debug)] +pub struct Database { + pub user: String, + pub password: String, + pub host: String, + pub port: u16, + pub dbname: String, + pub sslmode: String, + pub root_certificate_pem: String, +} + +#[derive(Debug, Clone, FromRow, Serialize, Deserialize)] +pub struct TableToTrack { + pub table_name: String, + pub where_clause: Option, + pub columns_name: Vec, +} + +impl TableToTrack { + fn new( + table_name: String, + where_clause: Option, + columns_name: Vec, + ) -> TableToTrack { + TableToTrack { table_name, where_clause, columns_name } + } +} + +#[derive(Debug, Clone, FromRow, Serialize, Deserialize)] +pub struct Relations { + pub schema_name: String, + pub table_to_track: Vec, +} + +impl Relations { + fn new(schema_name: String, table_to_track: Vec) -> Relations { + Relations { schema_name, table_to_track } + } + + fn add_new_table(&mut self, table_to_track: TableToTrack) { + self.table_to_track.push(table_to_track); + } +} + +#[derive(Deserialize)] +pub struct EditDatabaseTrigger { + replication_slot_name: String, + publication_name: String, + path: String, + script_path: String, + is_flow: bool, + database_resource_path: String, +} + +#[derive(Deserialize, Serialize, Debug)] + +pub struct NewDatabaseTrigger { + path: String, + script_path: String, + is_flow: bool, + enabled: bool, + database_resource_path: String, + replication_slot_name: String, + publication_name: String, +} + +async fn get_raw_postgres_connection(db: &Database) -> Result { + let options = { + let sslmode = if !db.sslmode.is_empty() { + PgSslMode::from_str(&db.sslmode)? + } else { + PgSslMode::Prefer + }; + let options = PgConnectOptions::new() + .host(&db.host) + .database(&db.dbname) + .port(db.port) + .ssl_mode(sslmode) + .username(&db.user); + + let options = if !db.root_certificate_pem.is_empty() { + options.ssl_root_cert_from_pem(db.root_certificate_pem.as_bytes().to_vec()) + } else { + options + }; + + if !db.password.is_empty() { + options.password(&db.password) + } else { + options + } + }; + + PgConnection::connect_with(&options) + .await + .map_err(Error::SqlErr) +} + +#[derive(Debug)] +pub enum Language { + Typescript, +} + +#[derive(Debug, Deserialize)] +pub struct TemplateScript { + database_resource_path: String, + #[serde(deserialize_with = "check_if_not_duplication_relation")] + relations: Option>, + #[serde(deserialize_with = "check_if_valid_language")] + language: Language, +} + +fn check_if_valid_language<'de, D>(language: D) -> std::result::Result +where + D: Deserializer<'de>, +{ + let language: String = String::deserialize(language)?; + + let language = match language.to_lowercase().as_str() { + "typescript" => Language::Typescript, + _ => { + return Err(serde::de::Error::custom( + "Language supported for custom script is only: Typescript", + )) + } + }; + + Ok(language) +} + +fn check_if_not_duplication_relation<'de, D>( + relations: D, +) -> std::result::Result>, D::Error> +where + D: Deserializer<'de>, +{ + let relations: Option> = Option::deserialize(relations)?; + + match relations { + Some(relations) => { + for relation in relations.iter() { + if relation.schema_name.is_empty() { + return Err(serde::de::Error::custom( + "Schema Name must not be empty".to_string(), + )); + } + + for table_to_track in relation.table_to_track.iter() { + if table_to_track.table_name.trim().is_empty() { + return Err(serde::de::Error::custom( + "Table name must not be empty".to_string(), + )); + } + } + } + + if !relations + .iter() + .map(|relation| relation.schema_name.as_str()) + .all_unique() + { + return Err(serde::de::Error::custom( + "You cannot choose a schema more than one time".to_string(), + )); + } + + Ok(Some(relations)) + } + None => Ok(None), + } +} + +#[derive(FromRow, Deserialize, Serialize, Debug)] +pub struct DatabaseTrigger { + pub path: String, + pub script_path: String, + pub is_flow: bool, + pub workspace_id: String, + pub edited_by: String, + pub email: String, + pub edited_at: chrono::DateTime, + pub extra_perms: Option, + pub database_resource_path: String, + pub error: Option, + pub server_id: Option, + pub replication_slot_name: String, + pub publication_name: String, + pub last_server_ping: Option>, + pub enabled: bool, +} + +pub async fn get_database_resource( + authed: ApiAuthed, + user_db: UserDB, + db: &DB, + database_resource_path: &str, + w_id: &str, +) -> Result { + let resource = get_resource_value_interpolated_internal( + &authed, + Some(user_db), + &db, + &w_id, + &database_resource_path, + None, + "", + ) + .await + .map_err(|_| Error::NotFound("Database resource do not exist".to_string()))?; + + let resource = match resource { + Some(resource) => serde_json::from_value::(resource).map_err(Error::SerdeJson)?, + None => { + return { + Err(Error::NotFound( + "Database resource do not exist".to_string(), + )) + } + } + }; + + Ok(resource) +} + +#[derive(Deserialize, Serialize)] +pub struct ListDatabaseTriggerQuery { + pub page: Option, + pub per_page: Option, + pub path: Option, + pub is_flow: Option, + pub path_start: Option, +} + +#[derive(Deserialize)] +pub struct SetEnabled { + pub enabled: bool, +} + +pub async fn create_database_trigger( + authed: ApiAuthed, + Extension(user_db): Extension, + Path(w_id): Path, + Json(new_database_trigger): Json, +) -> error::Result<(StatusCode, String)> { + let NewDatabaseTrigger { + database_resource_path, + path, + script_path, + enabled, + is_flow, + publication_name, + replication_slot_name, + } = new_database_trigger; + if *CLOUD_HOSTED { + return Err(error::Error::BadRequest( + "Database triggers are not supported on multi-tenant cloud, use dedicated cloud or self-host".to_string(), + )); + } + + let mut tx = user_db.begin(&authed).await?; + + sqlx::query!( + r#" + INSERT INTO database_trigger ( + publication_name, + replication_slot_name, + workspace_id, + path, + script_path, + is_flow, + email, + enabled, + database_resource_path, + edited_by, + edited_at + ) + VALUES ( + $1, + $2, + $3, + $4, + $5, + $6, + $7, + $8, + $9, + $10, + now() + )"#, + &publication_name, + &replication_slot_name, + &w_id, + &path, + script_path, + is_flow, + &authed.email, + enabled, + database_resource_path, + &authed.username + ) + .execute(&mut *tx) + .await?; + + audit_log( + &mut *tx, + &authed, + "database_triggers.create", + ActionKind::Create, + &w_id, + Some(path.as_str()), + None, + ) + .await?; + + tx.commit().await?; + + Ok((StatusCode::CREATED, path.to_string())) +} + +pub async fn list_database_triggers( + authed: ApiAuthed, + Extension(user_db): Extension, + Path(w_id): Path, + Query(lst): Query, +) -> error::JsonResult> { + let mut tx = user_db.begin(&authed).await?; + let (per_page, offset) = paginate(Pagination { per_page: lst.per_page, page: lst.page }); + let mut sqlb = SqlBuilder::select_from("database_trigger") + .fields(&[ + "workspace_id", + "path", + "script_path", + "is_flow", + "edited_by", + "email", + "edited_at", + "server_id", + "last_server_ping", + "extra_perms", + "error", + "enabled", + "database_resource_path", + "replication_slot_name", + "publication_name", + ]) + .order_by("edited_at", true) + .and_where("workspace_id = ?".bind(&w_id)) + .offset(offset) + .limit(per_page) + .clone(); + if let Some(path) = lst.path { + sqlb.and_where_eq("script_path", "?".bind(&path)); + } + if let Some(is_flow) = lst.is_flow { + sqlb.and_where_eq("is_flow", "?".bind(&is_flow)); + } + if let Some(path_start) = &lst.path_start { + sqlb.and_where_like_left("path", path_start); + } + let sql = sqlb + .sql() + .map_err(|e| error::Error::InternalErr(e.to_string()))?; + let rows = sqlx::query_as::<_, DatabaseTrigger>(&sql) + .fetch_all(&mut *tx) + .await + .map_err(|e| { + tracing::debug!("Error fetching database_trigger: {:#?}", e); + windmill_common::error::Error::InternalErr("server error".to_string()) + })?; + tx.commit().await.map_err(|e| { + tracing::debug!("Error commiting database_trigger: {:#?}", e); + windmill_common::error::Error::InternalErr("server error".to_string()) + })?; + + Ok(Json(rows)) +} + +#[derive(Deserialize, Serialize)] +pub struct PublicationData { + #[serde(default, deserialize_with = "check_if_not_duplication_relation")] + table_to_track: Option>, + #[serde(deserialize_with = "check_if_valid_transaction_type")] + transaction_to_track: Vec, +} + +fn check_if_valid_transaction_type<'de, D>( + transaction_type: D, +) -> std::result::Result, D::Error> +where + D: Deserializer<'de>, +{ + let mut transaction_type: Vec = Vec::deserialize(transaction_type)?; + if transaction_type.len() > 3 { + return Err(serde::de::Error::custom( + "More than 3 transaction type which is not authorized, you are only allowed to those 3 transaction types: Insert, Update and Delete" + .to_string(), + )); + } + transaction_type.sort_unstable(); + transaction_type.dedup(); + + for transaction in transaction_type.iter() { + match transaction.to_lowercase().as_ref() { + "insert" => {}, + "update" => {}, + "delete" => {}, + _ => { + return Err(serde::de::Error::custom( + "Only the following transaction types are allowed: Insert, Update and Delete (case insensitive)" + .to_string(), + )) + } + } + } + + Ok(transaction_type) +} + +impl PublicationData { + fn new( + table_to_track: Option>, + transaction_to_track: Vec, + ) -> PublicationData { + PublicationData { table_to_track, transaction_to_track } + } +} + +#[derive(Debug, Serialize)] +pub struct SlotList { + slot_name: Option, + active: Option, +} + +pub async fn list_slot_name( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, database_resource_path)): Path<(String, String)>, +) -> error::Result>> { + let database = + get_database_resource(authed, user_db, &db, &database_resource_path, &w_id).await?; + + let mut connection = get_raw_postgres_connection(&database).await?; + + let slots = sqlx::query_as!( + SlotList, + r#" + SELECT + slot_name, + active + FROM + pg_replication_slots + WHERE + plugin = 'pgoutput' AND + slot_type = 'logical'; + "# + ) + .fetch_all(&mut connection) + .await?; + + Ok(Json(slots)) +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Slot { + name: String, +} + +pub async fn create_slot( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, database_resource_path)): Path<(String, String)>, + Json(Slot { name }): Json, +) -> error::Result { + let database = + get_database_resource(authed, user_db, &db, &database_resource_path, &w_id).await?; + + let mut connection = get_raw_postgres_connection(&database).await?; + + let query = format!( + r#" + SELECT + * + FROM + pg_create_logical_replication_slot({}, 'pgoutput');"#, + quote_literal(&name) + ); + + sqlx::query(&query).execute(&mut connection).await?; + + Ok(format!("Slot {} created!", name)) +} + +pub async fn drop_slot_name( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, database_resource_path)): Path<(String, String)>, + Json(Slot { name }): Json, +) -> error::Result { + let database = + get_database_resource(authed, user_db, &db, &database_resource_path, &w_id).await?; + + let mut connection = get_raw_postgres_connection(&database).await?; + + let query = format!("SELECT pg_drop_replication_slot({});", quote_literal(&name)); + sqlx::query(&query).execute(&mut connection).await?; + + Ok(format!("Slot name {} deleted!", name)) +} +#[derive(Debug, Serialize)] +struct PublicationName { + publication_name: String, +} + +pub async fn list_database_publication( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, database_resource_path)): Path<(String, String)>, +) -> error::Result>> { + let database = + get_database_resource(authed, user_db, &db, &database_resource_path, &w_id).await?; + + let mut connection = get_raw_postgres_connection(&database).await?; + + let publication_names = sqlx::query_as!( + PublicationName, + "SELECT pubname AS publication_name FROM pg_publication;" + ) + .fetch_all(&mut connection) + .await?; + + let publications = publication_names + .iter() + .map(|publication| publication.publication_name.to_owned()) + .collect_vec(); + + Ok(Json(publications)) +} + +pub async fn get_publication_info( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, publication_name, database_resource_path)): Path<(String, String, String)>, +) -> error::Result> { + let database = + get_database_resource(authed, user_db, &db, &database_resource_path, &w_id).await?; + + let mut connection = get_raw_postgres_connection(&database).await?; + + let publication_data = + get_publication_scope_and_transaction(&publication_name, &mut connection).await; + + let (all_table, transaction_to_track) = match publication_data { + Ok(pub_data) => pub_data, + Err(Error::SqlErr(sqlx::Error::RowNotFound)) => { + return Err(Error::NotFound( + "Publication was not found, please create a new publication".to_string(), + )) + } + Err(e) => return Err(e), + }; + + let table_to_track = if !all_table { + Some(get_tracked_relations(&mut connection, &publication_name).await?) + } else { + None + }; + Ok(Json(PublicationData::new( + table_to_track, + transaction_to_track, + ))) +} + +async fn new_publication( + connection: &mut PgConnection, + publication_name: &str, + table_to_track: Option<&[Relations]>, + transaction_to_track: &[&str], +) -> Result<(), Error> { + let mut query = QueryBuilder::new("CREATE PUBLICATION "); + + query.push(quote_identifier(publication_name)); + + match table_to_track { + Some(database_component) if !database_component.is_empty() => { + query.push(" FOR"); + for (i, schema) in database_component.iter().enumerate() { + if schema.table_to_track.is_empty() { + query.push(" TABLES IN SCHEMA "); + query.push(quote_identifier(&schema.schema_name)); + } else { + query.push(" TABLE ONLY "); + for (j, table) in schema.table_to_track.iter().enumerate() { + query.push(quote_identifier(&table.table_name)); + if !table.columns_name.is_empty() { + query.push(" ("); + let columns = table + .columns_name + .iter() + .map(|column| quote_identifier(column)) + .join(", "); + query.push(&columns); + query.push(")"); + } + + if let Some(where_clause) = &table.where_clause { + query.push(" WHERE ("); + query.push(where_clause); + query.push(')'); + } + + if j + 1 != schema.table_to_track.len() { + query.push(", "); + } + } + } + if i < database_component.len() - 1 { + query.push(", "); + } + } + } + _ => { + query.push(" FOR ALL TABLES "); + } + }; + + if !transaction_to_track.is_empty() { + let transactions = || transaction_to_track.iter().join(", "); + query.push(" WITH (publish = '"); + query.push(transactions()); + query.push("');"); + } + + let query = query.build(); + + query.execute(&mut *connection).await?; + + Ok(()) +} + +pub async fn create_publication( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, publication_name, database_resource_path)): Path<(String, String, String)>, + Json(publication_data): Json, +) -> error::Result { + let PublicationData { table_to_track, transaction_to_track } = publication_data; + + let database = + get_database_resource(authed, user_db, &db, &database_resource_path, &w_id).await?; + + let mut connection = get_raw_postgres_connection(&database).await?; + + new_publication( + &mut connection, + &publication_name, + table_to_track.as_deref(), + &transaction_to_track.iter().map(AsRef::as_ref).collect_vec(), + ) + .await?; + + Ok(format!( + "Publication {} successfully created!", + publication_name + )) +} + +async fn drop_publication( + publication_name: &str, + connection: &mut PgConnection, +) -> Result<(), Error> { + let mut query = QueryBuilder::new("DROP PUBLICATION IF EXISTS "); + let quoted_publication_name = quote_identifier(publication_name); + query.push(quoted_publication_name); + query.push(";"); + query.build().execute(&mut *connection).await?; + Ok(()) +} + +pub async fn delete_publication( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, publication_name, database_resource_path)): Path<(String, String, String)>, +) -> error::Result { + let database = + get_database_resource(authed, user_db, &db, &database_resource_path, &w_id).await?; + + let mut connection = get_raw_postgres_connection(&database).await?; + + drop_publication(&publication_name, &mut connection).await?; + + Ok(format!( + "Publication {} successfully deleted!", + publication_name + )) +} + +pub async fn alter_publication( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, publication_name, database_resource_path)): Path<(String, String, String)>, + Json(publication_data): Json, +) -> error::Result { + let PublicationData { table_to_track, transaction_to_track } = publication_data; + let database = + get_database_resource(authed, user_db, &db, &database_resource_path, &w_id).await?; + + let mut connection = get_raw_postgres_connection(&database).await?; + + let (all_table, _) = + get_publication_scope_and_transaction(&publication_name, &mut connection).await?; + + let mut query = QueryBuilder::new(""); + let quoted_publication_name = quote_identifier(&publication_name); + + let transaction_to_track_as_str = transaction_to_track.iter().join(","); + + match table_to_track { + Some(ref relations) if !relations.is_empty() => { + if all_table { + drop_publication(&publication_name, &mut connection).await?; + new_publication( + &mut connection, + &publication_name, + table_to_track.as_deref(), + &transaction_to_track.iter().map(AsRef::as_ref).collect_vec(), + ) + .await?; + } else { + query.push("ALTER PUBLICATION "); + query.push("ed_publication_name); + query.push(" SET"); + for (i, relation) in relations.iter().enumerate() { + if relation.table_to_track.is_empty() { + query.push(" TABLES IN SCHEMA "); + let quoted_schema = quote_identifier(&relation.schema_name); + query.push("ed_schema); + } else { + query.push(" TABLE ONLY "); + for (j, table) in relation.table_to_track.iter().enumerate() { + let quoted_table = quote_identifier(&table.table_name); + query.push("ed_table); + if !table.columns_name.is_empty() { + query.push(" ("); + let columns = table + .columns_name + .iter() + .map(|column| quote_identifier(column)) + .join(", "); + query.push(&columns); + query.push(") "); + } + + if let Some(where_clause) = &table.where_clause { + query.push(" WHERE ("); + query.push(where_clause); + query.push(')'); + } + + if j + 1 != relation.table_to_track.len() { + query.push(", "); + } + } + } + if i < relations.len() - 1 { + query.push(','); + } + } + query.push(";"); + query.build().execute(&mut connection).await?; + query.reset(); + query.push("ALTER PUBLICATION "); + query.push("ed_publication_name); + query.push(format!( + " SET (publish = '{}');", + transaction_to_track_as_str + )); + } + } + _ => { + drop_publication(&publication_name, &mut connection).await?; + let to_execute = format!( + r#" + CREATE + PUBLICATION {} FOR ALL TABLES WITH (publish = '{}') + "#, + quoted_publication_name, transaction_to_track_as_str + ); + query.push(&to_execute); + } + }; + + query.build().execute(&mut connection).await?; + + Ok(format!( + "Publication {} successfully updated!", + publication_name + )) +} + +async fn get_publication_scope_and_transaction( + publication_name: &str, + connection: &mut PgConnection, +) -> Result<(bool, Vec), Error> { + #[derive(Debug, Deserialize, FromRow)] + struct PublicationTransaction { + all_table: bool, + insert: bool, + update: bool, + delete: bool, + } + + let transaction = sqlx::query_as!( + PublicationTransaction, + r#" + SELECT + puballtables AS all_table, + pubinsert AS insert, + pubupdate AS update, + pubdelete AS delete + FROM + pg_publication + WHERE + pubname = $1 + "#, + publication_name + ) + .fetch_one(&mut *connection) + .await?; + + let mut transaction_to_track = Vec::with_capacity(3); + + if transaction.insert { + transaction_to_track.push("insert".to_string()); + } + if transaction.update { + transaction_to_track.push("update".to_string()); + } + if transaction.delete { + transaction_to_track.push("delete".to_string()); + } + + Ok((transaction.all_table, transaction_to_track)) +} + +async fn get_tracked_relations( + connection: &mut PgConnection, + publication_name: &str, +) -> error::Result> { + #[derive(Debug, Deserialize, FromRow)] + struct PublicationData { + schema_name: Option, + table_name: Option, + columns: Option>, + where_clause: Option, + } + + let publications = sqlx::query_as!( + PublicationData, + r#" + SELECT + schemaname AS schema_name, + tablename AS table_name, + attnames AS columns, + rowfilter AS where_clause + FROM + pg_publication_tables + WHERE + pubname = $1 + "#, + publication_name + ) + .fetch_all(&mut *connection) + .await?; + + let mut table_to_track: HashMap = HashMap::new(); + + for publication in publications { + let schema_name = publication.schema_name.unwrap(); + let entry = table_to_track.entry(schema_name.clone()); + let table_to_track = TableToTrack::new( + publication.table_name.unwrap(), + publication.where_clause, + publication.columns.unwrap(), + ); + match entry { + Occupied(mut occuped) => { + occuped.get_mut().add_new_table(table_to_track); + } + Vacant(vacant) => { + vacant.insert(Relations::new(schema_name, vec![table_to_track])); + } + } + } + Ok(table_to_track.into_values().collect_vec()) +} + +pub async fn get_database_trigger( + authed: ApiAuthed, + Extension(user_db): Extension, + Path((w_id, path)): Path<(String, StripPath)>, +) -> JsonResult { + let mut tx = user_db.begin(&authed).await?; + let path = path.to_path(); + let trigger = sqlx::query_as!( + DatabaseTrigger, + r#" + SELECT + workspace_id, + path, + script_path, + is_flow, + edited_by, + email, + edited_at, + server_id, + last_server_ping, + extra_perms, + error, + enabled, + replication_slot_name, + publication_name, + database_resource_path + FROM + database_trigger + WHERE + workspace_id = $1 AND + path = $2 + "#, + &w_id, + &path + ) + .fetch_optional(&mut *tx) + .await?; + tx.commit().await?; + + let trigger = not_found_if_none(trigger, "Trigger", path)?; + + Ok(Json(trigger)) +} + +pub async fn update_database_trigger( + authed: ApiAuthed, + Extension(user_db): Extension, + Path((w_id, path)): Path<(String, StripPath)>, + Json(database_trigger): Json, +) -> error::Result { + let workspace_path = path.to_path(); + let EditDatabaseTrigger { + replication_slot_name, + publication_name, + script_path, + path, + is_flow, + database_resource_path, + } = database_trigger; + + let mut tx = user_db.begin(&authed).await?; + + sqlx::query!( + r#" + UPDATE database_trigger + SET + script_path = $1, + path = $2, + is_flow = $3, + edited_by = $4, + email = $5, + database_resource_path = $6, + replication_slot_name = $7, + publication_name = $8, + edited_at = now(), + error = NULL, + server_id = NULL + WHERE + workspace_id = $9 AND + path = $10 + "#, + script_path, + path, + is_flow, + &authed.username, + &authed.email, + database_resource_path, + replication_slot_name, + publication_name, + w_id, + workspace_path, + ) + .execute(&mut *tx) + .await?; + + audit_log( + &mut *tx, + &authed, + "database_triggers.update", + ActionKind::Create, + &w_id, + Some(&path), + None, + ) + .await?; + + tx.commit().await?; + + Ok(workspace_path.to_string()) +} + +pub async fn delete_database_trigger( + authed: ApiAuthed, + Extension(user_db): Extension, + Path((w_id, path)): Path<(String, StripPath)>, +) -> error::Result { + let path = path.to_path(); + let mut tx = user_db.begin(&authed).await?; + sqlx::query!( + r#" + DELETE FROM database_trigger + WHERE + workspace_id = $1 AND + path = $2 + "#, + w_id, + path, + ) + .execute(&mut *tx) + .await?; + + audit_log( + &mut *tx, + &authed, + "database_triggers.delete", + ActionKind::Delete, + &w_id, + Some(path), + None, + ) + .await?; + + tx.commit().await?; + + Ok(format!("Database trigger {path} deleted")) +} + +pub async fn exists_database_trigger( + Extension(db): Extension, + Path((w_id, path)): Path<(String, StripPath)>, +) -> JsonResult { + let path = path.to_path(); + let exists = sqlx::query_scalar!( + r#" + SELECT EXISTS( + SELECT 1 + FROM database_trigger + WHERE + path = $1 AND + workspace_id = $2 + )"#, + path, + w_id, + ) + .fetch_one(&db) + .await? + .unwrap_or(false); + Ok(Json(exists)) +} + +pub async fn set_enabled( + authed: ApiAuthed, + Extension(user_db): Extension, + Path((w_id, path)): Path<(String, StripPath)>, + Json(payload): Json, +) -> error::Result { + let mut tx = user_db.begin(&authed).await?; + let path = path.to_path(); + + // important to set server_id, last_server_ping and error to NULL to stop current database listener + let one_o = sqlx::query_scalar!( + r#" + UPDATE database_trigger + SET + enabled = $1, + email = $2, + edited_by = $3, + edited_at = now(), + server_id = NULL, + error = NULL + WHERE + path = $4 AND + workspace_id = $5 + RETURNING 1 + "#, + payload.enabled, + &authed.email, + &authed.username, + path, + w_id, + ) + .fetch_optional(&mut *tx) + .await? + .flatten(); + + not_found_if_none(one_o, "Database trigger", path)?; + + audit_log( + &mut *tx, + &authed, + "database_triggers.setenabled", + ActionKind::Update, + &w_id, + Some(path), + Some([("enabled", payload.enabled.to_string().as_ref())].into()), + ) + .await?; + + tx.commit().await?; + + Ok(format!( + "succesfully updated database trigger at path {} to status {}", + path, payload.enabled + )) +} + +pub async fn get_template_script( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path(w_id): Path, + Json(template_script): Json, +) -> error::Result { + use windmill_common::error::Error; + + let TemplateScript { database_resource_path, relations, language } = template_script; + if relations.is_none() { + return Err(Error::BadRequest( + "You must at least choose schema to fetch table from".to_string(), + )); + } + + let database = + get_database_resource(authed, user_db, &db, &database_resource_path, &w_id).await?; + + let mut pg_connection = get_raw_postgres_connection(&database).await?; + + #[derive(Debug, FromRow, Deserialize)] + struct ColumnInfo { + table_schema: Option, + table_name: Option, + column_name: Option, + oid: Oid, + is_nullable: bool, + } + + let relations = relations.unwrap(); + let mut schema_or_fully_qualified_name = Vec::with_capacity(relations.len()); + let mut columns_list = Vec::new(); + for relation in relations { + if !relation.table_to_track.is_empty() { + for table_to_track in relation.table_to_track { + let fully_qualified_name = + format!("{}.{}", &relation.schema_name, table_to_track.table_name); + schema_or_fully_qualified_name.push(quote_literal(&fully_qualified_name)); + + let columns = if !table_to_track.columns_name.is_empty() { + quote_literal(&table_to_track.columns_name.join(",")) + } else { + "''".to_string() + }; + columns_list.push(columns); + } + continue; + } + + schema_or_fully_qualified_name.push(quote_literal(&relation.schema_name)); + columns_list.push(String::from("''")); + } + + let tables_name = schema_or_fully_qualified_name.join(","); + let columns_list = columns_list.join(","); + + let query = format!( + r#" + WITH table_column_mapping AS ( + SELECT + unnest(ARRAY[{}]) AS table_name, + unnest(ARRAY[{}]) AS column_list + ), + parsed_columns AS ( + SELECT + tcm.table_name, + CASE + WHEN tcm.column_list = '' THEN NULL + ELSE string_to_array(tcm.column_list, ',') + END AS columns + FROM + table_column_mapping tcm + ) + SELECT + ns.nspname AS table_schema, + cls.relname AS table_name, + attr.attname AS column_name, + attr.atttypid AS oid, + attr.attnotnull AS is_nullable + FROM + pg_attribute attr + JOIN + pg_class cls + ON attr.attrelid = cls.oid + JOIN + pg_namespace ns + ON cls.relnamespace = ns.oid + JOIN + parsed_columns pc + ON ns.nspname || '.' || cls.relname = pc.table_name + OR ns.nspname = pc.table_name + WHERE + attr.attnum > 0 -- Exclude system columns + AND NOT attr.attisdropped -- Exclude dropped columns + AND cls.relkind = 'r' -- Restrict to base tables + AND ( + pc.columns IS NULL + OR attr.attname = ANY(pc.columns) + ); + "#, + tables_name, columns_list + ); + + let rows: Vec = sqlx::query_as(&query) + .fetch_all(&mut pg_connection) + .await + .map_err(error::Error::SqlErr)?; + + let mut mapper: HashMap>> = HashMap::new(); + + for row in rows { + let ColumnInfo { table_schema, table_name, column_name, oid, is_nullable } = row; + + let entry = mapper.entry(table_schema.unwrap()); + + let mapped_info = + MappingInfo::new(column_name.unwrap(), Type::from_oid(oid.0), is_nullable); + + match entry { + Occupied(mut occupied) => { + let entry = occupied.get_mut().entry(table_name.unwrap()); + match entry { + Occupied(mut occuped) => { + let mapping_info = occuped.get_mut(); + mapping_info.push(mapped_info); + } + Vacant(vacant) => { + let mut mapping_info = Vec::with_capacity(10); + mapping_info.push(mapped_info); + vacant.insert(mapping_info); + } + } + } + Vacant(vacant) => { + let mut mapping_info = Vec::with_capacity(10); + mapping_info.push(mapped_info); + vacant.insert(HashMap::from([(table_name.unwrap(), mapping_info)])); + } + } + } + + let mapper = Mapper::new(mapper, language); + + let template = mapper.get_template(); + + Ok(template) +} diff --git a/backend/windmill-api/src/database_triggers/hex.rs b/backend/windmill-api/src/database_triggers/hex.rs new file mode 100644 index 0000000000000..1538588142fbc --- /dev/null +++ b/backend/windmill-api/src/database_triggers/hex.rs @@ -0,0 +1,35 @@ +use std::num::ParseIntError; + +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum ByteaHexParseError { + #[error("missing prefix '\\x'")] + InvalidPrefix, + + #[error("invalid byte")] + OddNumerOfDigits, + + #[error("parse int result: {0}")] + ParseInt(#[from] ParseIntError), +} + +pub fn from_bytea_hex(s: &str) -> Result, ByteaHexParseError> { + if s.len() < 2 || &s[..2] != "\\x" { + return Err(ByteaHexParseError::InvalidPrefix); + } + + let mut result = Vec::with_capacity((s.len() - 2) / 2); + let s = &s[2..]; + + if s.len() % 2 != 0 { + return Err(ByteaHexParseError::OddNumerOfDigits); + } + + for i in (0..s.len()).step_by(2) { + let val = u8::from_str_radix(&s[i..i + 2], 16)?; + result.push(val); + } + + Ok(result) +} diff --git a/backend/windmill-api/src/database_triggers/mapper.rs b/backend/windmill-api/src/database_triggers/mapper.rs new file mode 100644 index 0000000000000..1f4a04e15dcde --- /dev/null +++ b/backend/windmill-api/src/database_triggers/mapper.rs @@ -0,0 +1,127 @@ +use std::collections::HashMap; + +use rust_postgres::types::Type; + +use super::handler::Language; + +fn postgres_to_typescript_type(postgres_type: Option) -> String { + let data_type = match postgres_type { + Some(postgres_type) => match postgres_type { + Type::BOOL => "boolean", + Type::BOOL_ARRAY => "Array", + Type::CHAR | Type::BPCHAR | Type::VARCHAR | Type::NAME | Type::TEXT => "string", + Type::CHAR_ARRAY + | Type::BPCHAR_ARRAY + | Type::VARCHAR_ARRAY + | Type::NAME_ARRAY + | Type::TEXT_ARRAY => "Array", + Type::INT2 | Type::INT4 | Type::INT8 | Type::NUMERIC => "number", + Type::INT2_ARRAY | Type::INT4_ARRAY | Type::INT8_ARRAY => "Array", + Type::FLOAT4 | Type::FLOAT8 => "number", + Type::FLOAT8_ARRAY | Type::FLOAT4_ARRAY => "Array", + Type::NUMERIC_ARRAY => "Array", + Type::BYTEA => "Array", + Type::BYTEA_ARRAY => "Array>", + Type::DATE => "string", + Type::DATE_ARRAY => "Array", + Type::TIME => "string", + Type::TIME_ARRAY => "Array", + Type::TIMESTAMPTZ | Type::TIMESTAMP => "Date", + Type::TIMESTAMPTZ_ARRAY | Type::TIMESTAMP_ARRAY => "Array", + Type::UUID => "string", + Type::UUID_ARRAY => "Array", + Type::JSON | Type::JSONB | Type::JSON_ARRAY | Type::JSONB_ARRAY => "unknown", + Type::OID => "number", + Type::OID_ARRAY => "Array", + _ => "string", + }, + None => "string", + }; + + data_type.to_string() +} + +fn into_body_struct(language: Language, mapped_info: Vec) -> String { + let mut block = String::new(); + match language { + Language::Typescript => { + block.push_str("{\r\n"); + for field in mapped_info { + let typescript_type = postgres_to_typescript_type(field.data_type); + let mut key = field.column_name; + if field.is_nullable { + key.push('?'); + } + let full_field = format!("\t\t{}: {},\r\n", key, typescript_type); + block.push_str(&full_field); + } + block.push_str("\t}"); + } + } + block +} + +#[derive(Debug)] +pub struct MappingInfo { + data_type: Option, + is_nullable: bool, + column_name: String, +} + +impl MappingInfo { + pub fn new(column_name: String, data_type: Option, is_nullable: bool) -> Self { + Self { column_name, data_type, is_nullable } + } +} + +pub struct Mapper { + to_template: HashMap>>, + language: Language, +} + +impl Mapper { + pub fn new( + to_template: HashMap>>, + language: Language, + ) -> Self { + Self { to_template, language } + } + + fn into_typescript_template(self) -> Vec { + let mut struct_definitions = Vec::new(); + for (_, mapping_info) in self.to_template { + let last_elem = mapping_info.len() - 1; + for (i, (_, mapped_info)) in mapping_info.into_iter().enumerate() { + let mut struct_body = into_body_struct(Language::Typescript, mapped_info); + let struct_body = if i != last_elem { + struct_body.push_str("\r\n"); + struct_body + } else { + struct_body + }; + struct_definitions.push(struct_body); + } + } + struct_definitions + } + + pub fn get_template(self) -> String { + let struct_definition = match self.language { + Language::Typescript => self.into_typescript_template(), + }; + format!( + r#" + + +export async function main(database: {{ + transaction_type: "insert" | "update" | "delete"; + schema_name: string; + table_name: string; + row: {} +}}) {{ +}} + "#, + struct_definition.join("\t| "), + ) + } +} diff --git a/backend/windmill-api/src/database_triggers/mod.rs b/backend/windmill-api/src/database_triggers/mod.rs new file mode 100644 index 0000000000000..a44e819440493 --- /dev/null +++ b/backend/windmill-api/src/database_triggers/mod.rs @@ -0,0 +1,116 @@ +use serde_json::value::RawValue; +use std::collections::HashMap; + +use crate::{ + db::DB, + jobs::{run_flow_by_path_inner, run_script_by_path_inner, RunJobQuery}, + users::fetch_api_authed, +}; + +use axum::{ + routing::{delete, get, post}, + Router, +}; +use handler::{ + alter_publication, create_database_trigger, create_publication, create_slot, + delete_database_trigger, delete_publication, drop_slot_name, exists_database_trigger, + get_database_trigger, get_publication_info, get_template_script, list_database_publication, + list_database_triggers, list_slot_name, set_enabled, update_database_trigger, DatabaseTrigger, +}; +use windmill_common::{db::UserDB, utils::StripPath}; +use windmill_queue::PushArgsOwned; + +mod bool; +mod converter; +mod handler; +mod hex; +mod mapper; +mod relation; +mod replication_message; +mod trigger; + +pub use trigger::start_database; + +fn publication_service() -> Router { + Router::new() + .route("/get/:publication_name/*path", get(get_publication_info)) + .route("/create/:publication_name/*path", post(create_publication)) + .route("/update/:publication_name/*path", post(alter_publication)) + .route( + "/delete/:publication_name/*path", + delete(delete_publication), + ) + .route("/list/*path", get(list_database_publication)) +} + +fn slot_service() -> Router { + Router::new() + .route("/list/*path", get(list_slot_name)) + .route("/create/*path", post(create_slot)) + .route("/delete/*path", delete(drop_slot_name)) +} + +pub fn workspaced_service() -> Router { + Router::new() + .route("/create", post(create_database_trigger)) + .route("/list", get(list_database_triggers)) + .route("/get/*path", get(get_database_trigger)) + .route("/update/*path", post(update_database_trigger)) + .route("/delete/*path", delete(delete_database_trigger)) + .route("/exists/*path", get(exists_database_trigger)) + .route("/setenabled/*path", post(set_enabled)) + .route("/get-template-script", post(get_template_script)) + .nest("/publication", publication_service()) + .nest("/slot", slot_service()) +} + +async fn run_job( + args: Option>>, + extra: Option>>, + db: &DB, + trigger: &DatabaseTrigger, +) -> anyhow::Result<()> { + let args = PushArgsOwned { args: args.unwrap_or_default(), extra }; + let label_prefix = Some(format!("db-{}-", trigger.path)); + + let authed = fetch_api_authed( + trigger.edited_by.clone(), + trigger.email.clone(), + &trigger.workspace_id, + db, + Some("anonymous".to_string()), + ) + .await?; + + let user_db = UserDB::new(db.clone()); + + let run_query = RunJobQuery::default(); + + if trigger.is_flow { + run_flow_by_path_inner( + authed, + db.clone(), + user_db, + trigger.workspace_id.clone(), + StripPath(trigger.script_path.to_owned()), + run_query, + args, + label_prefix, + ) + .await?; + } else { + run_script_by_path_inner( + authed, + db.clone(), + user_db, + trigger.workspace_id.clone(), + StripPath(trigger.script_path.to_owned()), + run_query, + args, + label_prefix, + ) + .await?; + } + + Ok(()) +} diff --git a/backend/windmill-api/src/database_triggers/relation.rs b/backend/windmill-api/src/database_triggers/relation.rs new file mode 100644 index 0000000000000..b589726cb653d --- /dev/null +++ b/backend/windmill-api/src/database_triggers/relation.rs @@ -0,0 +1,76 @@ +use core::str; + +use serde_json::{Map, Value}; +use std::{collections::HashMap, str::Utf8Error}; + +use super::{ + converter::{Converter, ConverterError}, + replication_message::{Columns, RelationBody, TupleData}, +}; +use rust_postgres::types::Oid; +#[derive(Debug, thiserror::Error)] +pub enum RelationConversionError { + #[error("Could not find matching table")] + FailToFindMatchingTable, + + #[error("Binary data not supported")] + BinaryFormatNotSupported, + + #[error("decode error: {0}")] + FromBytes(#[from] ConverterError), + + #[error("invalid string value")] + InvalidStr(#[from] Utf8Error), +} + +pub struct RelationConverter(HashMap); + +impl RelationConverter { + pub fn new() -> Self { + Self(HashMap::new()) + } + + pub fn add_relation(&mut self, relation: RelationBody) { + self.0.insert(relation.o_id, relation); + } + + pub fn get_columns(&self, o_id: Oid) -> Result<&Columns, RelationConversionError> { + self.0 + .get(&o_id) + .map(|relation_body| &relation_body.columns) + .ok_or(RelationConversionError::FailToFindMatchingTable) + } + + pub fn get_relation(&self, o_id: Oid) -> Result<&RelationBody, RelationConversionError> { + self.0 + .get(&o_id) + .ok_or(RelationConversionError::FailToFindMatchingTable) + } + + pub fn body_to_json( + &self, + to_decode: (Oid, Vec), + ) -> Result, RelationConversionError> { + let (o_id, tuple_data) = to_decode; + let mut object: Map = Map::new(); + let columns = self.get_columns(o_id)?; + + for (i, column) in columns.iter().enumerate() { + let value = match &tuple_data[i] { + TupleData::Null | TupleData::UnchangedToast => Value::Null, + TupleData::Binary(_) => { + return Err(RelationConversionError::BinaryFormatNotSupported) + } + TupleData::Text(bytes) => { + let str = str::from_utf8(&bytes[..])?; + Converter::try_from_str(column.type_o_id.as_ref(), str)? + } + }; + + object.insert(column.name.clone(), value); + } + let mut res = Map::new(); + res.insert("row".to_string(), Value::Object(object)); + Ok(res) + } +} diff --git a/backend/windmill-api/src/database_triggers/replication_message.rs b/backend/windmill-api/src/database_triggers/replication_message.rs new file mode 100644 index 0000000000000..99d08af0f2081 --- /dev/null +++ b/backend/windmill-api/src/database_triggers/replication_message.rs @@ -0,0 +1,502 @@ +#![allow(unused)] + +use core::str; +use std::{ + cmp, + io::{self, Read}, + str::Utf8Error, +}; + +use byteorder::{BigEndian, ReadBytesExt}; +use bytes::Bytes; +use memchr::memchr; +use rust_postgres::types::{Oid, Type}; +use thiserror::Error; + +use super::trigger::LogicalReplicationSettings; +const PRIMARY_KEEPALIVE_BYTE: u8 = b'k'; +const X_LOG_DATA_BYTE: u8 = b'w'; + +#[derive(Debug)] +pub struct PrimaryKeepAliveBody { + pub wal_end: u64, + pub timestamp: i64, + pub reply: bool, +} + +impl PrimaryKeepAliveBody { + pub fn new(wal_end: u64, timestamp: i64, reply: bool) -> PrimaryKeepAliveBody { + PrimaryKeepAliveBody { wal_end, timestamp, reply } + } +} + +const BEGIN_BYTE: u8 = b'B'; +const COMMIT_BYTE: u8 = b'C'; +const ORIGIN_BYTE: u8 = b'O'; +const RELATION_BYTE: u8 = b'R'; +const TYPE_BYTE: u8 = b'Y'; +const INSERT_BYTE: u8 = b'I'; +const UPDATE_BYTE: u8 = b'U'; +const DELETE_BYTE: u8 = b'D'; +const TUPLE_NEW_BYTE: u8 = b'N'; +const TUPLE_KEY_BYTE: u8 = b'K'; +const TUPLE_OLD_BYTE: u8 = b'O'; +const TUPLE_DATA_NULL_BYTE: u8 = b'n'; +const TUPLE_DATA_TOAST_BYTE: u8 = b'u'; +const TUPLE_DATA_TEXT_BYTE: u8 = b't'; +const TUPLE_DATA_BINARY_BYTE: u8 = b'b'; + +const REPLICA_IDENTITY_DEFAULT_BYTE: i8 = 0x64; +const REPLICA_IDENTITY_NOTHING_BYTE: i8 = 0x6E; +const REPLICA_IDENTITY_FULL_BYTE: i8 = 0x66; +const REPLICA_IDENTITY_INDEX_BYTE: i8 = 0x69; + +#[derive(Debug)] +pub enum ReplicaIdentity { + Default, + Nothing, + Full, + Index, +} + +#[derive(Debug)] +pub struct Column { + pub flags: i8, + pub name: String, + pub type_o_id: Option, + pub type_modifier: i32, +} + +impl Column { + pub fn new(flags: i8, name: String, type_o_id: Option, type_modifier: i32) -> Self { + Self { flags, name, type_o_id, type_modifier } + } +} + +pub type Columns = Vec; + +#[derive(Debug)] +pub struct RelationBody { + pub transaction_id: Option, + pub o_id: Oid, + pub namespace: String, + pub name: String, + pub replica_identity: ReplicaIdentity, + pub columns: Columns, +} + +impl RelationBody { + pub fn new( + transaction_id: Option, + o_id: Oid, + namespace: String, + name: String, + replica_identity: ReplicaIdentity, + columns: Columns, + ) -> Self { + Self { transaction_id, o_id, namespace, name, replica_identity, columns } + } +} + +#[derive(Debug)] +pub struct InsertBody { + pub transaction_id: Option, + pub o_id: Oid, + pub tuple: Vec, +} + +impl InsertBody { + pub fn new(transaction_id: Option, o_id: Oid, tuple: Vec) -> Self { + Self { transaction_id, o_id, tuple } + } +} + +#[derive(Debug)] +pub struct UpdateBody { + transaction_id: Option, + pub o_id: Oid, + pub old_tuple: Option>, + pub key_tuple: Option>, + pub new_tuple: Vec, +} + +impl UpdateBody { + pub fn new( + transaction_id: Option, + o_id: Oid, + old_tuple: Option>, + key_tuple: Option>, + new_tuple: Vec, + ) -> Self { + Self { transaction_id, o_id, old_tuple, key_tuple, new_tuple } + } +} + +#[derive(Debug)] +pub struct DeleteBody { + transaction_id: Option, + pub o_id: Oid, + pub old_tuple: Option>, + pub key_tuple: Option>, +} + +impl DeleteBody { + pub fn new( + transaction_id: Option, + o_id: Oid, + old_tuple: Option>, + key_tuple: Option>, + ) -> Self { + Self { transaction_id, o_id, old_tuple, key_tuple } + } +} + +#[derive(Debug)] +pub enum TupleData { + Null, + UnchangedToast, + Text(Bytes), + Binary(Bytes), +} + +impl TupleData { + fn parse(buf: &mut Buffer) -> Result, ConversionError> { + let number_of_columns = buf.read_i16::()?; + let mut tuples = Vec::with_capacity(number_of_columns as usize); + for _ in 0..number_of_columns { + let byte = buf.read_u8()?; + let tuple_data = match byte { + TUPLE_DATA_NULL_BYTE => TupleData::Null, + TUPLE_DATA_TOAST_BYTE => TupleData::UnchangedToast, + TUPLE_DATA_TEXT_BYTE => { + let len = buf.read_i32::()?; + let mut data = vec![0; len as usize]; + buf.read_exact(&mut data)?; + TupleData::Text(data.into()) + } + TUPLE_DATA_BINARY_BYTE => { + let len = buf.read_i32::()?; + let mut data = vec![0; len as usize]; + buf.read_exact(&mut data)?; + TupleData::Binary(data.into()) + } + byte => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unknown replication message byte `{}`", byte), + ))); + } + }; + + tuples.push(tuple_data); + } + + Ok(tuples) + } +} + +#[derive(Debug)] +pub enum TransactionBody { + Insert(InsertBody), + Update(UpdateBody), + Delete(DeleteBody), +} + +#[non_exhaustive] +#[derive(Debug)] +pub enum LogicalReplicationMessage { + Begin, + Commit, + Relation(RelationBody), + Type, + Insert(InsertBody), + Update(UpdateBody), + Delete(DeleteBody), +} + +#[derive(Debug)] +pub struct XLogDataBody { + pub wal_start: u64, + pub wal_end: u64, + pub timestamp: i64, + pub data: Bytes, +} + +#[derive(Error, Debug)] +pub enum ConversionError { + #[error("Error: {0}")] + Io(#[from] io::Error), + #[error("Utf8Error conversion: {0}")] + Utf8(#[from] Utf8Error), +} + +struct Buffer { + bytes: Bytes, + idx: usize, +} + +impl Buffer { + pub fn new(bytes: Bytes, idx: usize) -> Buffer { + Buffer { bytes, idx } + } + + fn slice(&self) -> &[u8] { + &self.bytes[self.idx..] + } + + fn read_cstr(&mut self) -> Result { + match memchr(0, self.slice()) { + Some(pos) => { + let start = self.idx; + let end = start + pos; + let cstr = str::from_utf8(&self.bytes[start..end])?.to_owned(); + self.idx = end + 1; + Ok(cstr) + } + None => Err(ConversionError::Io(io::Error::new( + io::ErrorKind::UnexpectedEof, + "unexpected EOF", + ))), + } + } +} + +impl Read for Buffer { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let len = { + let slice = self.slice(); + let len = cmp::min(slice.len(), buf.len()); + buf[..len].copy_from_slice(&slice[..len]); + len + }; + self.idx += len; + Ok(len) + } +} + +impl XLogDataBody { + pub fn new(wal_start: u64, wal_end: u64, timestamp: i64, data: Bytes) -> XLogDataBody { + XLogDataBody { wal_start, wal_end, timestamp, data } + } + + pub fn parse( + self, + logical_replication_settings: &LogicalReplicationSettings, + ) -> Result { + let mut buf = Buffer::new(self.data.clone(), 0); + let byte = buf.read_u8()?; + + let logical_replication_message = match byte { + BEGIN_BYTE => { + buf.read_i64::()?; + buf.read_i64::()?; + buf.read_i32::()?; + + LogicalReplicationMessage::Begin + } + COMMIT_BYTE => { + buf.read_i8()?; + buf.read_u64::()?; + buf.read_u64::()?; + buf.read_i64::()?; + LogicalReplicationMessage::Commit + } + RELATION_BYTE => { + let transaction_id = match logical_replication_settings.streaming { + true => Some(buf.read_i32::()?), + false => None, + }; + + let o_id = buf.read_u32::()?; + let namespace = buf.read_cstr()?; + let name = buf.read_cstr()?; + let replica_identity = match buf.read_i8()? { + REPLICA_IDENTITY_DEFAULT_BYTE => ReplicaIdentity::Default, + REPLICA_IDENTITY_NOTHING_BYTE => ReplicaIdentity::Nothing, + REPLICA_IDENTITY_FULL_BYTE => ReplicaIdentity::Full, + REPLICA_IDENTITY_INDEX_BYTE => ReplicaIdentity::Index, + byte => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unknown replica identity byte `{}`", byte), + ))); + } + }; + + let num_of_column = buf.read_i16::()?; + + let mut columns = Vec::with_capacity(num_of_column as usize); + for _ in 0..num_of_column { + let flags = buf.read_i8()?; + let name = buf.read_cstr()?; + let o_id = buf.read_u32::()?; + let type_modifier = buf.read_i32::()?; + let type_o_id = Type::from_oid(o_id); + let column = Column::new(flags, name, type_o_id, type_modifier); + + columns.push(column); + } + + LogicalReplicationMessage::Relation(RelationBody::new( + transaction_id, + o_id, + namespace, + name, + replica_identity, + columns, + )) + } + TYPE_BYTE => { + buf.read_u32::()?; + buf.read_cstr()?; + buf.read_cstr()?; + + LogicalReplicationMessage::Type + } + INSERT_BYTE => { + let transaction_id = match logical_replication_settings.streaming { + true => Some(buf.read_i32::()?), + false => None, + }; + let o_id = buf.read_u32::()?; + let byte = buf.read_u8()?; + + let tuple = match byte { + TUPLE_NEW_BYTE => TupleData::parse(&mut buf)?, + byte => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unexpected tuple byte `{}`", byte), + ))); + } + }; + + LogicalReplicationMessage::Insert(InsertBody::new(transaction_id, o_id, tuple)) + } + UPDATE_BYTE => { + let transaction_id = match logical_replication_settings.streaming { + true => Some(buf.read_i32::()?), + false => None, + }; + let o_id = buf.read_u32::()?; + let byte = buf.read_u8()?; + let mut key_tuple = None; + let mut old_tuple = None; + + let new_tuple = match byte { + TUPLE_NEW_BYTE => TupleData::parse(&mut buf)?, + TUPLE_OLD_BYTE | TUPLE_KEY_BYTE => { + if byte == TUPLE_OLD_BYTE { + old_tuple = Some(TupleData::parse(&mut buf)?); + } else { + key_tuple = Some(TupleData::parse(&mut buf)?); + } + match buf.read_u8()? { + TUPLE_NEW_BYTE => TupleData::parse(&mut buf)?, + byte => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unexpected tuple byte `{}`", byte), + ))); + } + } + } + byte => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unknown tuple byte `{}`", byte), + ))); + } + }; + + LogicalReplicationMessage::Update(UpdateBody::new( + transaction_id, + o_id, + old_tuple, + key_tuple, + new_tuple, + )) + } + DELETE_BYTE => { + let transaction_id = match logical_replication_settings.streaming { + true => Some(buf.read_i32::()?), + false => None, + }; + let o_id = buf.read_u32::()?; + let tag = buf.read_u8()?; + + let mut key_tuple = None; + let mut old_tuple = None; + + match tag { + TUPLE_OLD_BYTE => old_tuple = Some(TupleData::parse(&mut buf)?), + TUPLE_KEY_BYTE => key_tuple = Some(TupleData::parse(&mut buf)?), + tag => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unknown tuple tag `{}`", tag), + ))); + } + } + + LogicalReplicationMessage::Delete(DeleteBody::new( + transaction_id, + o_id, + old_tuple, + key_tuple, + )) + } + byte => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unknown replication message tag `{}`", byte), + ))); + } + }; + + Ok(logical_replication_message) + } +} + +#[non_exhaustive] +#[derive(Debug)] +pub enum ReplicationMessage { + XLogData(XLogDataBody), + PrimaryKeepAlive(PrimaryKeepAliveBody), +} + +impl ReplicationMessage { + pub fn parse(buf: Bytes) -> io::Result { + let (byte, mut message) = buf.split_first().unwrap(); + + let replication_message = match *byte { + X_LOG_DATA_BYTE => { + let len = buf.len(); + let wal_start = message.read_u64::()?; + let wal_end = message.read_u64::()?; + let timestamp = message.read_i64::()?; + let len = len - message.len(); + let data = buf.slice(len..); + ReplicationMessage::XLogData(XLogDataBody::new(wal_start, wal_end, timestamp, data)) + } + PRIMARY_KEEPALIVE_BYTE => { + let wal_end = message.read_u64::()?; + let timestamp = message.read_i64::()?; + let reply = message.read_u8()?; + ReplicationMessage::PrimaryKeepAlive(PrimaryKeepAliveBody::new( + wal_end, + timestamp, + reply == 1, + )) + } + byte => { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unknown replication message byte `{}`", byte), + )); + } + }; + + Ok(replication_message) + } +} diff --git a/backend/windmill-api/src/database_triggers/trigger.rs b/backend/windmill-api/src/database_triggers/trigger.rs new file mode 100644 index 0000000000000..df941dc16dbcd --- /dev/null +++ b/backend/windmill-api/src/database_triggers/trigger.rs @@ -0,0 +1,474 @@ +use std::{collections::HashMap, pin::Pin}; + +use crate::{ + database_triggers::{ + relation::RelationConverter, + replication_message::{ + LogicalReplicationMessage::{Begin, Commit, Delete, Insert, Relation, Type, Update}, + ReplicationMessage, + }, + run_job, + }, + db::DB, +}; +use bytes::{BufMut, Bytes, BytesMut}; +use chrono::TimeZone; +use futures::{pin_mut, SinkExt, StreamExt}; +use pg_escape::{quote_identifier, quote_literal}; +use rand::seq::SliceRandom; +use rust_postgres::{Client, Config, CopyBothDuplex, NoTls, SimpleQueryMessage}; +use serde_json::to_value; +use windmill_common::{variables::get_variable_or_self, worker::to_raw_value, INSTANCE_NAME}; + +use super::{ + handler::{Database, DatabaseTrigger}, + replication_message::PrimaryKeepAliveBody, +}; + +pub struct LogicalReplicationSettings { + pub streaming: bool, + #[allow(unused)] + pub binary: bool, +} + +impl LogicalReplicationSettings { + pub fn new(binary: bool, streaming: bool) -> Self { + Self { binary, streaming } + } +} + +#[allow(unused)] +trait RowExist { + fn row_exist(&self) -> bool; +} + +impl RowExist for Vec { + fn row_exist(&self) -> bool { + self.iter() + .find_map(|element| { + if let SimpleQueryMessage::CommandComplete(value) = element { + Some(*value) + } else { + None + } + }) + .is_some_and(|value| value > 0) + } +} + +#[derive(thiserror::Error, Debug)] +enum Error { + #[error("Error from database: {0}")] + Postgres(rust_postgres::Error), + #[error("Error : {0}")] + Common(windmill_common::error::Error), +} + +pub struct PostgresSimpleClient(Client); + +impl PostgresSimpleClient { + async fn new(database: &Database) -> Result { + let mut config = Config::new(); + config + .dbname(&database.dbname) + .host(&database.host) + .port(database.port) + .user(&database.user) + .replication_mode(rust_postgres::config::ReplicationMode::Logical); + + if !database.password.is_empty() { + config.password(&database.password); + } + + let (client, connection) = config.connect(NoTls).await.map_err(Error::Postgres)?; + tokio::spawn(async move { + if let Err(e) = connection.await { + tracing::debug!("{:#?}", e); + }; + tracing::info!("Successfully Connected into database"); + }); + + Ok(PostgresSimpleClient(client)) + } + + async fn get_logical_replication_stream( + &self, + publication_name: &str, + logical_replication_slot_name: &str, + ) -> Result<(CopyBothDuplex, LogicalReplicationSettings), Error> { + let binary_format = true; + let options = match binary_format { + true => format!( + r#"("proto_version" '2', "publication_names" {})"#, + //r#"("proto_version" '2', "publication_names" {}, "binary")"#, + quote_literal(publication_name), + ), + false => format!( + r#"("proto_version" '2', "publication_names" {})"#, + quote_literal(publication_name), + ), + }; + + let query = format!( + r#"START_REPLICATION SLOT {} LOGICAL 0/0 {}"#, + quote_identifier(logical_replication_slot_name), + options + ); + + Ok(( + self.0 + .copy_both_simple::(query.as_str()) + .await + .map_err(Error::Postgres)?, + LogicalReplicationSettings::new(binary_format, false), + )) + } + + async fn send_status_update( + primary_keep_alive: PrimaryKeepAliveBody, + copy_both_stream: &mut Pin<&mut CopyBothDuplex>, + ) { + let mut buf = BytesMut::new(); + let ts = chrono::Utc.with_ymd_and_hms(2000, 1, 1, 0, 0, 0).unwrap(); + let ts = chrono::Utc::now() + .signed_duration_since(ts) + .num_microseconds() + .unwrap_or(0); + + buf.put_u8(b'r'); + buf.put_u64(primary_keep_alive.wal_end); + buf.put_u64(primary_keep_alive.wal_end); + buf.put_u64(primary_keep_alive.wal_end); + buf.put_i64(ts); + buf.put_u8(0); + copy_both_stream.send(buf.freeze()).await.unwrap(); + tracing::info!("Send update status message"); + } +} + +async fn update_ping( + db: &DB, + database_trigger: &DatabaseTrigger, + error: Option<&str>, +) -> Option<()> { + match sqlx::query_scalar!( + r#" + UPDATE + database_trigger + SET + last_server_ping = now(), + error = $1 + WHERE + workspace_id = $2 + AND path = $3 + AND server_id = $4 + AND enabled IS TRUE + RETURNING 1 + "#, + error, + database_trigger.workspace_id, + database_trigger.path, + *INSTANCE_NAME + ) + .fetch_optional(db) + .await + { + Ok(updated) => { + if updated.flatten().is_none() { + tracing::info!( + "Database {} changed, disabled, or deleted, stopping...", + database_trigger.path + ); + return None; + } + } + Err(err) => { + tracing::warn!( + "Error updating ping of database {}: {:?}", + database_trigger.path, + err + ); + return None; + } + }; + + Some(()) +} + +async fn loop_ping(db: &DB, database_trigger: &DatabaseTrigger, error: Option<&str>) { + loop { + if update_ping(db, database_trigger, error).await.is_none() { + return; + } + tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; + } +} + +async fn listen_to_transactions( + database_trigger: &DatabaseTrigger, + db: DB, + mut killpill_rx: tokio::sync::broadcast::Receiver<()>, +) -> Result<(), Error> { + let resource = sqlx::query_scalar!( + "SELECT value from resource WHERE path = $1 AND workspace_id = $2", + &database_trigger.database_resource_path, + &database_trigger.workspace_id + ) + .fetch_optional(&db) + .await + .map_err(|e| Error::Common(windmill_common::error::Error::SqlErr(e)))? + .flatten(); + + let mut resource = match resource { + Some(resource) => serde_json::from_value::(resource) + .map_err(|e| Error::Common(windmill_common::error::Error::SerdeJson(e)))?, + None => { + return { + Err(Error::Common(windmill_common::error::Error::NotFound( + "Database resource do not exist".to_string(), + ))) + } + } + }; + + resource.password = + get_variable_or_self(resource.password, &db, &database_trigger.workspace_id) + .await + .map_err(Error::Common)?; + + let client = PostgresSimpleClient::new(&resource).await?; + + let (logical_replication_stream, logicail_replication_settings) = client + .get_logical_replication_stream( + &database_trigger.publication_name, + &database_trigger.replication_slot_name, + ) + .await?; + + pin_mut!(logical_replication_stream); + + tokio::select! { + biased; + _ = killpill_rx.recv() => { + Ok(()) + } + _ = loop_ping(&db, database_trigger, None) => { + Ok(()) + } + _ = async { + let mut relations = RelationConverter::new(); + tracing::info!("Start to listen for database transaction"); + loop { + let message = logical_replication_stream.next().await; + + if message.is_none() { + tracing::info!("Stream is empty leaving...."); + return; + } + + let message = message.unwrap(); + + if let Err(err) = &message { + tracing::debug!("{}", err.to_string()); + update_ping(&db, database_trigger, Some(&err.to_string())).await; + return; + } + + let message = message.unwrap(); + + let logical_message = match ReplicationMessage::parse(message) { + Ok(logical_message) => logical_message, + Err(err) => { + tracing::debug!("{}", err.to_string()); + update_ping(&db, database_trigger, Some(&err.to_string())).await; + return; + } + }; + + + match logical_message { + ReplicationMessage::PrimaryKeepAlive(primary_keep_alive) => { + if primary_keep_alive.reply { + PostgresSimpleClient::send_status_update(primary_keep_alive, &mut logical_replication_stream).await; + } + } + ReplicationMessage::XLogData(x_log_data) => { + let logical_replication_message = match x_log_data.parse(&logicail_replication_settings) { + Ok(logical_replication_message) => logical_replication_message, + Err(err) => { + update_ping(&db, database_trigger, Some(&err.to_string())).await; + return; + } + }; + + let json = match logical_replication_message { + Relation(relation_body) => { + relations.add_relation(relation_body); + None + } + Begin | Type | Commit => { + None + } + Insert(insert) => { + Some((insert.o_id, relations.body_to_json((insert.o_id, insert.tuple)), "insert")) + } + Update(update) => { + Some((update.o_id, relations.body_to_json((update.o_id, update.new_tuple)), "update")) + } + Delete(delete) => { + let body = delete.old_tuple.unwrap_or(delete.key_tuple.unwrap()); + Some((delete.o_id, relations.body_to_json((delete.o_id, body)), "delete")) + } + }; + if let Some((o_id, Ok(mut body), transaction_type)) = json { + let relation = match relations.get_relation(o_id) { + Ok(relation) => relation, + Err(err) => { + update_ping(&db, database_trigger, Some(&err.to_string())).await; + return; + } + }; + let database_info = HashMap::from([("schema_name".to_string(), relation.namespace.as_str()), ("table_name".to_string(), relation.name.as_str()), ("transaction_type".to_string(), transaction_type)]); + let extra = Some(HashMap::from([( + "wm_trigger".to_string(), + to_raw_value(&serde_json::json!({"kind": "database", })), + )])); + let object = to_value(&database_info).unwrap(); + body.insert("trigger_info".to_string(), object); + let body = HashMap::from([("database".to_string(), to_raw_value(&serde_json::json!(body)))]); + let _ = run_job(Some(body), extra, &db, database_trigger).await; + continue; + } + + } + } + } + } => { + Ok(()) + } + } +} + +async fn try_to_listen_to_database_transactions( + db_trigger: DatabaseTrigger, + db: DB, + killpill_rx: tokio::sync::broadcast::Receiver<()>, +) { + let database_trigger = sqlx::query_scalar!( + r#" + UPDATE database_trigger + SET + server_id = $1, + last_server_ping = now() + WHERE + enabled IS TRUE + AND workspace_id = $2 + AND path = $3 + AND ( + server_id IS NULL + OR last_server_ping IS NULL + OR last_server_ping < now() - INTERVAL '15 seconds' + ) + RETURNING true + "#, + *INSTANCE_NAME, + db_trigger.workspace_id, + db_trigger.path, + ) + .fetch_optional(&db) + .await; + match database_trigger { + Ok(has_lock) => { + if has_lock.flatten().unwrap_or(false) { + tracing::info!("Spawning new task to listen_to_database_transaction"); + tokio::spawn(async move { + let result = listen_to_transactions(&db_trigger, db.clone(), killpill_rx).await; + if let Err(e) = result { + update_ping(&db, &db_trigger, Some(e.to_string().as_str())).await; + }; + }); + } else { + tracing::info!("Database {} already being listened to", db_trigger.path); + } + } + Err(err) => { + tracing::error!( + "Error acquiring lock for database {}: {:?}", + db_trigger.path, + err + ); + } + }; +} + +async fn listen_to_unlistened_database_events( + db: &DB, + killpill_rx: &tokio::sync::broadcast::Receiver<()>, +) { + let database_triggers = sqlx::query_as!( + DatabaseTrigger, + r#" + SELECT + workspace_id, + path, + script_path, + replication_slot_name, + publication_name, + is_flow, + edited_by, + email, + edited_at, + server_id, + last_server_ping, + extra_perms, + error, + enabled, + database_resource_path + FROM + database_trigger + WHERE + enabled IS TRUE + AND ( + server_id IS NULL OR + last_server_ping IS NULL OR + last_server_ping < now() - interval '15 seconds' + ) + "# + ) + .fetch_all(db) + .await; + + match database_triggers { + Ok(mut triggers) => { + triggers.shuffle(&mut rand::thread_rng()); + for trigger in triggers { + try_to_listen_to_database_transactions( + trigger, + db.clone(), + killpill_rx.resubscribe(), + ) + .await; + } + } + Err(err) => { + tracing::error!("Error fetching database triggers: {:?}", err); + } + }; +} + +pub async fn start_database(db: DB, mut killpill_rx: tokio::sync::broadcast::Receiver<()>) { + tokio::spawn(async move { + listen_to_unlistened_database_events(&db, &killpill_rx).await; + loop { + tokio::select! { + biased; + _ = killpill_rx.recv() => { + return; + } + _ = tokio::time::sleep(tokio::time::Duration::from_secs(15)) => { + listen_to_unlistened_database_events(&db, &killpill_rx).await + } + } + } + }); +} diff --git a/backend/windmill-api/src/lib.rs b/backend/windmill-api/src/lib.rs index 969c796c4c785..5ec20da95661b 100644 --- a/backend/windmill-api/src/lib.rs +++ b/backend/windmill-api/src/lib.rs @@ -59,6 +59,8 @@ mod auth; mod capture; mod concurrency_groups; mod configs; +#[cfg(feature = "database")] +mod database_triggers; mod db; mod drafts; pub mod ee; @@ -93,6 +95,7 @@ mod scim_ee; mod scripts; mod service_logs; mod settings; +mod slack_approvals; #[cfg(feature = "smtp")] mod smtp_server_ee; mod static_assets; @@ -109,7 +112,6 @@ mod websocket_triggers; mod workers; mod workspaces; mod workspaces_ee; -mod slack_approvals; mod workspaces_export; mod workspaces_extra; @@ -286,6 +288,11 @@ pub async fn run_server( let kafka_killpill_rx = rx.resubscribe(); kafka_triggers_ee::start_kafka_consumers(db.clone(), kafka_killpill_rx).await; } + #[cfg(feature = "database")] + { + let db_killpill_rx = rx.resubscribe(); + database_triggers::start_database(db.clone(), db_killpill_rx).await; + } } // build our application with a route @@ -354,7 +361,16 @@ pub async fn run_server( #[cfg(not(feature = "websocket"))] Router::new() }) - .nest("/kafka_triggers", kafka_triggers_service), + .nest("/kafka_triggers", kafka_triggers_service) + .nest("/database_triggers", { + #[cfg(feature = "database")] + { + database_triggers::workspaced_service() + } + + #[cfg(not(feature = "database"))] + Router::new() + }), ) .nest("/workspaces", workspaces::global_service()) .nest( @@ -414,7 +430,10 @@ pub async fn run_server( jobs::workspace_unauthed_service().layer(cors.clone()), ) .route("/slack", post(slack_approvals::slack_app_callback_handler)) - .route("/w/:workspace_id/jobs/slack_approval/:job_id", get(slack_approvals::request_slack_approval)) + .route( + "/w/:workspace_id/jobs/slack_approval/:job_id", + get(slack_approvals::request_slack_approval), + ) .nest( "/w/:workspace_id/resources_u", resources::public_service().layer(cors.clone()), diff --git a/backend/windmill-api/src/scripts.rs b/backend/windmill-api/src/scripts.rs index 15d95a031b3cd..2c8a2e5f7165b 100644 --- a/backend/windmill-api/src/scripts.rs +++ b/backend/windmill-api/src/scripts.rs @@ -28,6 +28,7 @@ use axum::{ Json, Router, }; use hyper::StatusCode; +use itertools::Itertools; use serde::{Deserialize, Serialize}; use serde_json::json; use serde_json::value::RawValue; @@ -199,7 +200,6 @@ async fn list_scripts( Query(lq): Query, ) -> JsonResult> { let (per_page, offset) = paginate(pagination); - let mut sqlb = SqlBuilder::select_from("script as o") .fields(&[ "hash", @@ -286,6 +286,15 @@ async fn list_scripts( sqlb.and_where_in("kind", lowercased_kinds.as_slice()); } } + + if let Some(languages) = lq.languages { + let languages = languages + .into_iter() + .map(quote) + .collect_vec(); + sqlb.and_where_in("language", languages.as_slice()); + } + if lq.starred_only.unwrap_or(false) { sqlb.and_where_is_not_null("favorite.path"); } diff --git a/backend/windmill-api/src/triggers.rs b/backend/windmill-api/src/triggers.rs index bc734fc6ea0f6..4fea6d3ae2c86 100644 --- a/backend/windmill-api/src/triggers.rs +++ b/backend/windmill-api/src/triggers.rs @@ -19,6 +19,7 @@ pub struct TriggersCount { email_count: i64, websocket_count: i64, kafka_count: i64, + database_count: i64 } pub(crate) async fn get_triggers_count_internal( db: &DB, @@ -75,6 +76,16 @@ pub(crate) async fn get_triggers_count_internal( .await? .unwrap_or(0); + let database_count = sqlx::query_scalar!( + "SELECT COUNT(*) FROM database_trigger WHERE script_path = $1 AND is_flow = $2 AND workspace_id = $3", + path, + is_flow, + w_id + ) + .fetch_one(db) + .await? + .unwrap_or(0); + let webhook_count = (if is_flow { sqlx::query_scalar!( "SELECT COUNT(*) FROM token WHERE label LIKE 'webhook-%' AND workspace_id = $1 AND scopes @> ARRAY['run:flow/' || $2]::text[]", @@ -117,6 +128,7 @@ pub(crate) async fn get_triggers_count_internal( email_count, websocket_count, kafka_count, + database_count })) } diff --git a/backend/windmill-api/src/variables.rs b/backend/windmill-api/src/variables.rs index 3c51694b9e6fa..d688f50ccadd6 100644 --- a/backend/windmill-api/src/variables.rs +++ b/backend/windmill-api/src/variables.rs @@ -26,12 +26,12 @@ use windmill_common::{ error::{Error, JsonResult, Result}, utils::{not_found_if_none, paginate, Pagination, StripPath}, variables::{ - build_crypt, get_reserved_variables, ContextualVariable, CreateVariable, ListableVariable, + build_crypt, decrypt, encrypt, get_reserved_variables, ContextualVariable, CreateVariable, + ListableVariable, }, }; use lazy_static::lazy_static; -use magic_crypt::{MagicCrypt256, MagicCryptError, MagicCryptTrait}; use serde::Deserialize; use sqlx::{Postgres, Transaction}; use windmill_git_sync::{handle_deployment_metadata, DeployedObject}; @@ -676,17 +676,3 @@ pub async fn get_value_internal<'c>( Ok(r) } - -pub fn encrypt(mc: &MagicCrypt256, value: &str) -> String { - mc.encrypt_str_to_base64(value) -} - -pub fn decrypt(mc: &MagicCrypt256, value: String) -> Result { - mc.decrypt_base64_to_string(value).map_err(|e| match e { - MagicCryptError::DecryptError(_) => Error::InternalErr( - "Could not decrypt value. The value may have been encrypted with a different key." - .to_string(), - ), - _ => Error::InternalErr(e.to_string()), - }) -} diff --git a/backend/windmill-api/src/websocket_triggers.rs b/backend/windmill-api/src/websocket_triggers.rs index 6fbdff49caa6a..3c2fa6b37cc3b 100644 --- a/backend/windmill-api/src/websocket_triggers.rs +++ b/backend/windmill-api/src/websocket_triggers.rs @@ -380,7 +380,7 @@ async fn exists_websocket_trigger( async fn listen_to_unlistened_websockets( db: &DB, killpill_rx: &tokio::sync::broadcast::Receiver<()>, -) -> () { +) { match sqlx::query_as::<_, WebsocketTrigger>( r#"SELECT * FROM websocket_trigger diff --git a/backend/windmill-api/src/workspaces.rs b/backend/windmill-api/src/workspaces.rs index ea07df6171c66..f01ba255c1a1f 100644 --- a/backend/windmill-api/src/workspaces.rs +++ b/backend/windmill-api/src/workspaces.rs @@ -35,7 +35,7 @@ use windmill_audit::ActionKind; use windmill_common::db::UserDB; use windmill_common::s3_helpers::LargeFileStorage; use windmill_common::users::username_to_permissioned_as; -use windmill_common::variables::build_crypt; +use windmill_common::variables::{build_crypt, decrypt, encrypt}; use windmill_common::worker::to_raw_value; #[cfg(feature = "enterprise")] use windmill_common::workspaces::WorkspaceDeploymentUISettings; @@ -52,7 +52,6 @@ use windmill_git_sync::handle_deployment_metadata; #[cfg(feature = "enterprise")] use windmill_common::utils::require_admin_or_devops; -use crate::variables::{decrypt, encrypt}; use hyper::StatusCode; use serde::{Deserialize, Serialize}; use sqlx::{FromRow, Postgres, Transaction}; @@ -1303,6 +1302,7 @@ struct UsedTriggers { pub websocket_used: bool, pub http_routes_used: bool, pub kafka_used: bool, + pub database_used: bool, } async fn get_used_triggers( @@ -1313,11 +1313,16 @@ async fn get_used_triggers( let mut tx = user_db.begin(&authed).await?; let websocket_used = sqlx::query_as!( UsedTriggers, - r#"SELECT - EXISTS(SELECT 1 FROM websocket_trigger WHERE workspace_id = $1) as "websocket_used!", - EXISTS(SELECT 1 FROM http_trigger WHERE workspace_id = $1) as "http_routes_used!", - EXISTS(SELECT 1 FROM kafka_trigger WHERE workspace_id = $1) as "kafka_used!""#, - w_id, + r#" + SELECT + + EXISTS(SELECT 1 FROM websocket_trigger WHERE workspace_id = $1) AS "websocket_used!", + + EXISTS(SELECT 1 FROM http_trigger WHERE workspace_id = $1) AS "http_routes_used!", + EXISTS(SELECT 1 FROM kafka_trigger WHERE workspace_id = $1) as "kafka_used!", + EXISTS(SELECT 1 FROM database_trigger WHERE workspace_id = $1) AS "database_used!" + "#, + w_id ) .fetch_one(&mut *tx) .await?; diff --git a/backend/windmill-api/src/workspaces_export.rs b/backend/windmill-api/src/workspaces_export.rs index 774bc97053e56..8c0cd1c85aefb 100644 --- a/backend/windmill-api/src/workspaces_export.rs +++ b/backend/windmill-api/src/workspaces_export.rs @@ -25,18 +25,15 @@ use axum::{ use http::HeaderName; use itertools::Itertools; -use windmill_common::db::UserDB; -use windmill_common::schedule::Schedule; -use windmill_common::variables::build_crypt; - use windmill_common::{ + db::UserDB, error::{to_anyhow, Error, Result}, flows::Flow, + schedule::Schedule, scripts::{Schema, Script, ScriptLang}, - variables::ExportableListableVariable, + variables::{build_crypt, decrypt, ExportableListableVariable}, }; -use crate::variables::decrypt; use hyper::header; use serde::{Deserialize, Serialize}; use serde_json::Value; diff --git a/backend/windmill-common/Cargo.toml b/backend/windmill-common/Cargo.toml index 2c689cecfbbbd..8f1bc299ea9d9 100644 --- a/backend/windmill-common/Cargo.toml +++ b/backend/windmill-common/Cargo.toml @@ -61,6 +61,7 @@ async-stream.workspace = true const_format.workspace = true crc.workspace = true windmill-macros.workspace = true + semver.workspace = true croner = "2.0.6" quick_cache.workspace = true diff --git a/backend/windmill-common/src/lib.rs b/backend/windmill-common/src/lib.rs index 4140f6e58b700..bbc252d80d7d7 100644 --- a/backend/windmill-common/src/lib.rs +++ b/backend/windmill-common/src/lib.rs @@ -247,18 +247,16 @@ pub async fn connect_db( Err(_) => { if server_mode { DEFAULT_MAX_CONNECTIONS_SERVER + } else if indexer_mode { + DEFAULT_MAX_CONNECTIONS_INDEXER } else { - if indexer_mode { - DEFAULT_MAX_CONNECTIONS_INDEXER - } else { - DEFAULT_MAX_CONNECTIONS_WORKER - + std::env::var("NUM_WORKERS") - .ok() - .map(|x| x.parse().ok()) - .flatten() - .unwrap_or(1) - - 1 - } + DEFAULT_MAX_CONNECTIONS_WORKER + + std::env::var("NUM_WORKERS") + .ok() + .map(|x| x.parse().ok()) + .flatten() + .unwrap_or(1) + - 1 } } }; diff --git a/backend/windmill-common/src/scripts.rs b/backend/windmill-common/src/scripts.rs index 0fb05e6f45c73..7b39b0ca00a43 100644 --- a/backend/windmill-common/src/scripts.rs +++ b/backend/windmill-common/src/scripts.rs @@ -19,6 +19,7 @@ use crate::{ use crate::worker::HUB_CACHE_DIR; use anyhow::Context; +use itertools::Itertools; use serde::de::Error as _; use serde::{ser::SerializeSeq, Deserialize, Deserializer, Serialize}; @@ -73,6 +74,35 @@ impl ScriptLang { } } +impl TryFrom<&str> for ScriptLang { + type Error = String; + fn try_from(value: &str) -> Result { + let language = match value { + "bun" => Self::Bun, + "bunnative" => Self::Bunnative, + "nativets" => Self::Nativets, + "deno" => Self::Deno, + "python3" => Self::Python3, + "go" => Self::Go, + "bash" => Self::Bash, + "powershell" => Self::Powershell, + "postgresql" => Self::Postgresql, + "mysql" => Self::Mysql, + "bigquery" => Self::Bigquery, + "snowflake" => Self::Snowflake, + "mssql" => Self::Mssql, + "graphql" => Self::Graphql, + "php" => Self::Php, + "rust" => Self::Rust, + "ansible" => Self::Ansible, + "csharp" => Self::CSharp, + _ => return Err("Language not supported".to_string()), + }; + + Ok(language) + } +} + #[derive(Eq, PartialEq, Debug, Hash, Clone, Copy, sqlx::Type)] #[sqlx(transparent)] pub struct ScriptHash(pub i64); @@ -366,6 +396,36 @@ pub struct ListScriptQuery { pub include_without_main: Option, pub include_draft_only: Option, pub with_deployment_msg: Option, + #[serde(default, deserialize_with = "check_if_valid_language")] + pub languages: Option>, +} + +fn check_if_valid_language<'de, D>( + language: D, +) -> std::result::Result>, D::Error> +where + D: Deserializer<'de>, +{ + let languages: Option = Option::deserialize(language)?; + + let languages = languages.unwrap(); + let languages = languages + .split(",") + .to_owned() + .collect_vec() + .into_iter() + .map(|language| language.to_string()) + .collect_vec(); + + for language in languages.iter() { + if ScriptLang::try_from(language.as_str()).is_err() { + return Err(serde::de::Error::custom(format!( + "language {} is not supported", + language + ))); + } + } + Ok(Some(languages)) } pub fn to_i64(s: &str) -> crate::error::Result { diff --git a/backend/windmill-common/src/variables.rs b/backend/windmill-common/src/variables.rs index 26cbca7f08413..bf1a79d25a421 100644 --- a/backend/windmill-common/src/variables.rs +++ b/backend/windmill-common/src/variables.rs @@ -6,12 +6,12 @@ * LICENSE-AGPL for a copy of the license. */ +use crate::error::{Error, Result}; +use crate::{worker::WORKER_GROUP, BASE_URL, DB}; use chrono::{SecondsFormat, Utc}; use magic_crypt::{MagicCrypt256, MagicCryptError, MagicCryptTrait}; use serde::{Deserialize, Serialize}; -use crate::{worker::WORKER_GROUP, BASE_URL, DB}; - lazy_static::lazy_static! { pub static ref SECRET_SALT: Option = std::env::var("SECRET_SALT").ok(); } @@ -109,6 +109,47 @@ pub async fn get_workspace_key(w_id: &str, db: &DB) -> crate::error::Result String { + mc.encrypt_str_to_base64(value) +} + +pub fn decrypt(mc: &MagicCrypt256, value: String) -> Result { + mc.decrypt_base64_to_string(value).map_err(|e| match e { + MagicCryptError::DecryptError(_) => Error::InternalErr( + "Could not decrypt value. The value may have been encrypted with a different key." + .to_string(), + ), + _ => Error::InternalErr(e.to_string()), + }) +} + +struct Variable { + value: String, + is_secret: bool, +} + +pub async fn get_variable_or_self(path: String, db: &DB, w_id: &str) -> Result { + if !path.starts_with("$var:") { + return Ok(path); + } + let path = path.strip_prefix("$var:").unwrap().to_string(); + let mut variable = sqlx::query_as!( + Variable, + "SELECT value, is_secret + FROM variable + WHERE path = $1 AND workspace_id = $2", + &path, + &w_id + ) + .fetch_one(db) + .await?; + if variable.is_secret { + let mc = build_crypt(db, w_id).await?; + variable.value = decrypt(&mc, variable.value)?; + } + Ok(variable.value) +} + pub async fn get_secret_value_as_admin( db: &DB, w_id: &str, @@ -132,7 +173,7 @@ pub async fn get_secret_value_as_admin( let r = if variable.is_secret { let value = variable.value; if !value.is_empty() { - let mc = build_crypt(&db, &w_id).await?; + let mc = build_crypt(db, w_id).await?; decrypt_value_with_mc(value, mc).await? } else { "".to_string() @@ -144,17 +185,14 @@ pub async fn get_secret_value_as_admin( Ok(r) } -pub async fn decrypt_value_with_mc( - value: String, - mc: MagicCrypt256, -) -> Result { - Ok(mc.decrypt_base64_to_string(value).map_err(|e| match e { +pub async fn decrypt_value_with_mc(value: String, mc: MagicCrypt256) -> Result { + mc.decrypt_base64_to_string(value).map_err(|e| match e { MagicCryptError::DecryptError(_) => crate::error::Error::InternalErr( "Could not decrypt value. The value may have been encrypted with a different key." .to_string(), ), _ => crate::error::Error::InternalErr(e.to_string()), - })?) + }) } pub const WM_SCHEDULED_FOR: &str = "WM_SCHEDULED_FOR"; diff --git a/backend/windmill-queue/Cargo.toml b/backend/windmill-queue/Cargo.toml index a89a9d56fac91..a90fa6ccf74b5 100644 --- a/backend/windmill-queue/Cargo.toml +++ b/backend/windmill-queue/Cargo.toml @@ -39,7 +39,6 @@ futures-core.workspace = true futures.workspace = true itertools.workspace = true async-recursion.workspace = true -bigdecimal.workspace = true axum.workspace = true serde_urlencoded.workspace = true regex.workspace = true diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 1cb3b75f390de..61da3386b03ac 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -1,12 +1,12 @@ { "name": "windmill-components", - "version": "1.444.0", + "version": "1.444.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "windmill-components", - "version": "1.444.0", + "version": "1.444.2", "license": "AGPL-3.0", "dependencies": { "@anthropic-ai/sdk": "^0.32.1", diff --git a/frontend/src/lib/components/ApiConnectForm.svelte b/frontend/src/lib/components/ApiConnectForm.svelte index 680b54850e8c2..33c3ffb5e6a9d 100644 --- a/frontend/src/lib/components/ApiConnectForm.svelte +++ b/frontend/src/lib/components/ApiConnectForm.svelte @@ -24,8 +24,10 @@ let supabaseWizard = false async function isSupabaseAvailable() { - supabaseWizard = - ((await OauthService.listOauthConnects()) ?? {})['supabase_wizard'] != undefined + try { + supabaseWizard = + ((await OauthService.listOauthConnects()) ?? {})['supabase_wizard'] != undefined + } catch (error) {} } async function loadSchema() { if (!resourceTypeInfo) return diff --git a/frontend/src/lib/components/AppConnectInner.svelte b/frontend/src/lib/components/AppConnectInner.svelte index bd10721792e0b..e0109fb451f76 100644 --- a/frontend/src/lib/components/AppConnectInner.svelte +++ b/frontend/src/lib/components/AppConnectInner.svelte @@ -74,7 +74,7 @@ let scopes: string[] = [] let extra_params: [string, string][] = [] - + let oauthApi = true let path: string let description = '' @@ -107,8 +107,12 @@ } async function loadConnects() { - if (!connects) { - connects = (await OauthService.listOauthConnects()).filter((x) => x != 'supabase_wizard') + try { + if (!connects) { + connects = (await OauthService.listOauthConnects()).filter((x) => x != 'supabase_wizard') + } + } catch (error) { + oauthApi = false } } @@ -387,30 +391,32 @@ /> -

OAuth APIs

-
- {#if filteredConnects} - {#each filteredConnects as { key }} - - {/each} - {:else} - {#each new Array(3) as _} - - {/each} - {/if} -
+ {#if oauthApi} +

OAuth APIs

+
+ {#if filteredConnects} + {#each filteredConnects as { key }} + + {/each} + {:else} + {#each new Array(3) as _} + + {/each} + {/if} +
+ {/if} {#if connects && connects.length == 0}
No OAuth APIs has been setup on the instance. To add oauth APIs, first sync the resource diff --git a/frontend/src/lib/components/FlowBuilder.svelte b/frontend/src/lib/components/FlowBuilder.svelte index 97c0bce3504a9..e6d4002af2190 100644 --- a/frontend/src/lib/components/FlowBuilder.svelte +++ b/frontend/src/lib/components/FlowBuilder.svelte @@ -478,7 +478,7 @@ const selectedIdStore = writable(selectedId ?? 'settings-metadata') const selectedTriggerStore = writable< - 'webhooks' | 'emails' | 'schedules' | 'cli' | 'routes' | 'websockets' | 'scheduledPoll' + 'webhooks' | 'emails' | 'schedules' | 'cli' | 'routes' | 'websockets' | 'database' | 'scheduledPoll' >('webhooks') export function getSelectedId() { @@ -507,6 +507,7 @@ | 'cli' | 'routes' | 'websockets' + | 'database' | 'scheduledPoll' ) { selectedTriggerStore.set(selectedTrigger) diff --git a/frontend/src/lib/components/Path.svelte b/frontend/src/lib/components/Path.svelte index 0ff3ff56bc09c..c1aacf0ae806c 100644 --- a/frontend/src/lib/components/Path.svelte +++ b/frontend/src/lib/components/Path.svelte @@ -15,7 +15,10 @@ HttpTriggerService, VariableService, WebsocketTriggerService, - KafkaTriggerService + KafkaTriggerService, + + DatabaseTriggerService + } from '$lib/gen' import { superadmin, userStore, workspaceStore } from '$lib/stores' import { createEventDispatcher, getContext } from 'svelte' @@ -39,6 +42,7 @@ | 'http_trigger' | 'websocket_trigger' | 'kafka_trigger' + | 'database_trigger' let meta: Meta | undefined = undefined export let fullNamePlaceholder: string | undefined = undefined export let namePlaceholder = '' @@ -232,6 +236,11 @@ workspace: $workspaceStore!, path: path }) + } else if (kind == 'database_trigger') { + return await DatabaseTriggerService.existsDatabaseTrigger({ + workspace: $workspaceStore!, + path: path + }) } else { return false } diff --git a/frontend/src/lib/components/ScriptBuilder.svelte b/frontend/src/lib/components/ScriptBuilder.svelte index 85a928a2e81f8..6fc4de54b57e4 100644 --- a/frontend/src/lib/components/ScriptBuilder.svelte +++ b/frontend/src/lib/components/ScriptBuilder.svelte @@ -10,7 +10,13 @@ } from '$lib/gen' import { inferArgs } from '$lib/infer' import { initialCode } from '$lib/script_helpers' - import { defaultScripts, enterpriseLicense, userStore, workspaceStore } from '$lib/stores' + import { + databaseTrigger, + defaultScripts, + enterpriseLicense, + userStore, + workspaceStore + } from '$lib/stores' import { cleanValueProperties, emptySchema, @@ -228,10 +234,24 @@ function initContent( language: SupportedLanguage, kind: Script['kind'] | undefined, - template: 'pgsql' | 'mysql' | 'script' | 'docker' | 'powershell' | 'bunnative' + template: + | 'pgsql' + | 'mysql' + | 'flow' + | 'script' + | 'fetch' + | 'docker' + | 'powershell' + | 'bunnative' + | 'preprocessor' + | undefined ) { scriptEditor?.disableCollaboration() - script.content = initialCode(language, kind, template) + let getInitBlockTemplate = $databaseTrigger?.codeTemplate != undefined + script.content = initialCode(language, kind, template, getInitBlockTemplate) + if (getInitBlockTemplate) { + script.content += '\r\n' + $databaseTrigger?.codeTemplate + } scriptEditor?.inferSchema(script.content, language, true) if (script.content != editor?.getCode()) { setCode(script.content) diff --git a/frontend/src/lib/components/ScriptPicker.svelte b/frontend/src/lib/components/ScriptPicker.svelte index 80ecbe54728bf..1fabfd5abfe64 100644 --- a/frontend/src/lib/components/ScriptPicker.svelte +++ b/frontend/src/lib/components/ScriptPicker.svelte @@ -29,6 +29,7 @@ export let allowRefresh = false export let allowEdit = true export let allowView = true + export let languages: string | undefined = undefined let items: { value: string; label: string }[] = [] let drawerViewer: Drawer @@ -48,7 +49,11 @@ })) } else if (itemKind == 'script') { items = ( - await ScriptService.listScripts({ workspace: $workspaceStore!, kinds: kinds.join(',') }) + await ScriptService.listScripts({ + workspace: $workspaceStore!, + kinds: kinds.join(','), + languages + }) ).map((script) => ({ value: script.path, label: `${script.path}${script.summary ? ` | ${truncate(script.summary, 20)}` : ''}` diff --git a/frontend/src/lib/components/details/DetailPageDetailPanel.svelte b/frontend/src/lib/components/details/DetailPageDetailPanel.svelte index f9e4f927b9f1e..3f61a1b6b73a2 100644 --- a/frontend/src/lib/components/details/DetailPageDetailPanel.svelte +++ b/frontend/src/lib/components/details/DetailPageDetailPanel.svelte @@ -12,6 +12,7 @@ | 'cli' | 'routes' | 'websockets' + | 'database' | 'scheduledPoll' = 'webhooks' export let flow_json: any | undefined = undefined export let simplfiedPoll: boolean = false @@ -52,6 +53,7 @@ + diff --git a/frontend/src/lib/components/details/DetailPageLayout.svelte b/frontend/src/lib/components/details/DetailPageLayout.svelte index d951f757411c4..97458eefa6c10 100644 --- a/frontend/src/lib/components/details/DetailPageLayout.svelte +++ b/frontend/src/lib/components/details/DetailPageLayout.svelte @@ -20,7 +20,14 @@ const primaryScheduleStore = writable(undefined) const selectedTriggerStore = writable< - 'webhooks' | 'emails' | 'schedules' | 'cli' | 'routes' | 'websockets' | 'scheduledPoll' + | 'webhooks' + | 'emails' + | 'schedules' + | 'cli' + | 'routes' + | 'websockets' + | 'database' + | 'scheduledPoll' >('webhooks') const simplifiedPoll = writable(false) @@ -55,6 +62,7 @@ + @@ -100,6 +108,7 @@ + diff --git a/frontend/src/lib/components/details/DetailPageTriggerPanel.svelte b/frontend/src/lib/components/details/DetailPageTriggerPanel.svelte index e0a3d0b7f02d8..9eaf0dd54c199 100644 --- a/frontend/src/lib/components/details/DetailPageTriggerPanel.svelte +++ b/frontend/src/lib/components/details/DetailPageTriggerPanel.svelte @@ -13,6 +13,7 @@ | 'routes' | 'websockets' | 'kafka' + | 'database' | 'scheduledPoll' = 'webhooks' export let simplfiedPoll: boolean = false @@ -51,6 +52,12 @@ Kafka + + + + Database + + @@ -79,6 +86,8 @@ {:else if triggerSelected === 'kafka'} + {:else if triggerSelected === 'database'} + {:else if triggerSelected === 'cli'} {/if} diff --git a/frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte b/frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte index cb930bfdf81c2..677ec183caa13 100644 --- a/frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte +++ b/frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte @@ -1,5 +1,5 @@ diff --git a/frontend/src/lib/components/sidebar/SidebarContent.svelte b/frontend/src/lib/components/sidebar/SidebarContent.svelte index 1ce863ff32c25..3b4d989697070 100644 --- a/frontend/src/lib/components/sidebar/SidebarContent.svelte +++ b/frontend/src/lib/components/sidebar/SidebarContent.svelte @@ -34,7 +34,8 @@ UserCog, Plus, Unplug, - AlertCircle + AlertCircle, + Database } from 'lucide-svelte' import Menu from '../common/menu/MenuV2.svelte' import MenuButton from './MenuButton.svelte' @@ -79,7 +80,6 @@ (link) => $usedTriggerKinds.includes(link.kind) || $page.url.pathname.includes(link.href) ) ] - async function leaveWorkspace() { await WorkspaceService.leaveWorkspace({ workspace: $workspaceStore ?? '' }) sendUserToast('You left the workspace') @@ -108,13 +108,19 @@ icon: KafkaIcon, disabled: $userStore?.operator || !$enterpriseLicense, kind: 'kafka' + }, + { + label: 'Database', + href: '/database_triggers', + icon: Database, + disabled: $userStore?.operator, + kind: 'database' } ] $: extraTriggerLinks = defaultExtraTriggerLinks.filter((link) => { return !$page.url.pathname.includes(link.href) && !$usedTriggerKinds.includes(link.kind) }) - $: secondaryMenuLinks = [ // { // label: 'Workspace', @@ -336,7 +342,6 @@ {#if subItem.icon} {/if} - {subItem.label}
diff --git a/frontend/src/lib/components/triggers.ts b/frontend/src/lib/components/triggers.ts index e585c7d989621..d69e2bde6b7e2 100644 --- a/frontend/src/lib/components/triggers.ts +++ b/frontend/src/lib/components/triggers.ts @@ -48,6 +48,7 @@ export type TriggerKind = | 'websockets' | 'scheduledPoll' | 'kafka' + | 'database' export function captureTriggerKindToTriggerKind(kind: CaptureTriggerKind): TriggerKind { switch (kind) { diff --git a/frontend/src/lib/components/triggers/CaptureWrapper.svelte b/frontend/src/lib/components/triggers/CaptureWrapper.svelte index 03625869a9f31..0614c2f596e59 100644 --- a/frontend/src/lib/components/triggers/CaptureWrapper.svelte +++ b/frontend/src/lib/components/triggers/CaptureWrapper.svelte @@ -5,11 +5,11 @@ import { capitalize, isObject, sendUserToast, sleep } from '$lib/utils' import { isCloudHosted } from '$lib/cloud' import Alert from '../common/alert/Alert.svelte' - import RouteEditorConfigSection from './RouteEditorConfigSection.svelte' - import WebsocketEditorConfigSection from './WebsocketEditorConfigSection.svelte' - import WebhooksConfigSection from './WebhooksConfigSection.svelte' + import RouteEditorConfigSection from './http/RouteEditorConfigSection.svelte' + import WebsocketEditorConfigSection from './websocket/WebsocketEditorConfigSection.svelte' + import WebhooksConfigSection from './webhook/WebhooksConfigSection.svelte' import EmailTriggerConfigSection from '../details/EmailTriggerConfigSection.svelte' - import KafkaTriggersConfigSection from './KafkaTriggersConfigSection.svelte' + import KafkaTriggersConfigSection from './kafka/KafkaTriggersConfigSection.svelte' import type { ConnectionInfo } from '../common/alert/ConnectionIndicator.svelte' import type { CaptureInfo } from './CaptureSection.svelte' import CaptureTable from './CaptureTable.svelte' diff --git a/frontend/src/lib/components/triggers/TriggersEditor.svelte b/frontend/src/lib/components/triggers/TriggersEditor.svelte index f82b1d0e4ac70..b2ae036d2b3d0 100644 --- a/frontend/src/lib/components/triggers/TriggersEditor.svelte +++ b/frontend/src/lib/components/triggers/TriggersEditor.svelte @@ -1,18 +1,19 @@ + + + +{#if open} + + + {#if drawerLoading} + + {:else} +
+
+

+ Choose what kind of database transaction you want to track allowed operations are + Insert, Update, Delete + +

+ + +
+ +
+

+ Enter an existing replication slot name or provide a new one to be created. A + replication slot persistently tracks database changes and ensures no data is missed + even when your application is offline. Each subscriber should use a unique slot to + prevent data loss. If you enter a new name, the slot will be created automatically + during connection. +

+ +
+ { + replication_slot_name = '' + }} + > + + + + {#if selectedSlotAction === 'create'} +
+ + +
+ {:else} + + {/if} +
+
+ +
+
+

+ Specify the PostgreSQL publication to track changes in your database.
A publication + defines which tables to monitor this can include specific tables, all tables in a schema, + or even all tables across the database. +

+

+ Specify the columns to track for the selected tables in the publication. If no + columns are specified, all columns will be tracked by default.
Ensure that any + specified columns are part of the table's replica identity when tracking UPDATE and DELETE + transactions. +

+

+ Specify a condition to filter rows for the selected transaction types (INSERT, + UPDATE, DELETE) within the published tables. Note: Do not include the WHERE keyword + only write the condition (e.g., status = 'active' AND price >= 100).
The condition + allows only simple expressions and cannot include user-defined functions, operators, + types, or collations, system column references, or non-immutable built-in functions. + To filter UPDATE or DELETE transactions, ensure the table's replica identity is appropriately + configured (e.g., set to FULL or include the necessary columns). Use logical operators + like AND, OR, and NOT. Leave empty to track all rows. +

+ { + publication_name = '' + relations = [] + transaction_to_track = [] + transaction_to_track = transaction_to_track + }} + > + + + + {#if selectedPublicAction === 'create'} +
+ + +
+ {:else} + + {/if} + + + + + + + {#if showAddSchema} +
+ {#if relations && relations.length > 0} + {#each relations as v, i} +
+
+ + {#each v.table_to_track as table_to_track, j} +
+
+ + + + + +
+
+ {/each} + +
+ +
+ {/each} + {/if} +
+ +
+
+ {/if} +
+
+
+ {/if} +
+
+{/if} diff --git a/frontend/src/lib/components/triggers/database/DatabaseTriggerEditor.svelte b/frontend/src/lib/components/triggers/database/DatabaseTriggerEditor.svelte new file mode 100644 index 0000000000000..55c13f949c8f9 --- /dev/null +++ b/frontend/src/lib/components/triggers/database/DatabaseTriggerEditor.svelte @@ -0,0 +1,23 @@ + + +{#if open} + +{/if} diff --git a/frontend/src/lib/components/triggers/database/DatabaseTriggerEditorInner.svelte b/frontend/src/lib/components/triggers/database/DatabaseTriggerEditorInner.svelte new file mode 100644 index 0000000000000..190d41c04fb97 --- /dev/null +++ b/frontend/src/lib/components/triggers/database/DatabaseTriggerEditorInner.svelte @@ -0,0 +1,332 @@ + + + + + + + + {#if !drawerLoading && can_write} + {#if edit} +
+ { + await DatabaseTriggerService.setDatabaseTriggerEnabled({ + path: initialPath, + workspace: $workspaceStore ?? '', + requestBody: { enabled: e.detail } + }) + sendUserToast( + `${e.detail ? 'enabled' : 'disabled'} database trigger ${initialPath}` + ) + }} + /> +
+ {/if} + + {/if} +
+ {#if drawerLoading} + + {:else} + + {#if edit} + Changes can take up to 30 seconds to take effect. + {:else} + New database triggers can take up to 30 seconds to start listening. + {/if} + +
+
+ +
+ +
+

+ Pick a database to connect to +

+
+ +
+
+ +
+

+ Choose which table of your database to track as well as what kind of transaction should + fire the script.
+ You must pick a database resource first to make the configuration of your trigger + +

+ +
+ +
+

+ Pick a script or flow to be triggered +

+
+ + + {#if script_path === undefined && is_flow === false} +
+ + To enable that features,Select a database resource, and inside database config + create or get a publication from your database +
+ {/if} +
+
+
+ {/if} +
+
diff --git a/frontend/src/lib/components/triggers/database/DatabaseTriggersPanel.svelte b/frontend/src/lib/components/triggers/database/DatabaseTriggersPanel.svelte new file mode 100644 index 0000000000000..a7f92beb9f88c --- /dev/null +++ b/frontend/src/lib/components/triggers/database/DatabaseTriggersPanel.svelte @@ -0,0 +1,103 @@ + + + { + loadTriggers() + }} + bind:this={databaseTriggerEditor} +/> + +
+ {#if !newItem} + {#if isCloudHosted()} + + Database triggers are disabled in the multi-tenant cloud. + + {:else if $userStore?.is_admin || $userStore?.is_super_admin} + + {:else} + + {/if} + {/if} + + {#if databaseTriggers} + {#if databaseTriggers.length == 0} +
No Database triggers
+ {:else} +
+ {#each databaseTriggers as databaseTriggers (databaseTriggers.path)} +
+
{databaseTriggers.path}
+
+ +
+
+ {/each} +
+ {/if} + {:else} + + {/if} + + {#if newItem} + + Deploy the {isFlow ? 'flow' : 'script'} to add Database triggers. + + {/if} +
diff --git a/frontend/src/lib/components/triggers/database/PublicationPicker.svelte b/frontend/src/lib/components/triggers/database/PublicationPicker.svelte new file mode 100644 index 0000000000000..90307b3565a2e --- /dev/null +++ b/frontend/src/lib/components/triggers/database/PublicationPicker.svelte @@ -0,0 +1,131 @@ + + + + +
+ + +
diff --git a/frontend/src/lib/components/triggers/RouteEditor.svelte b/frontend/src/lib/components/triggers/http/RouteEditor.svelte similarity index 100% rename from frontend/src/lib/components/triggers/RouteEditor.svelte rename to frontend/src/lib/components/triggers/http/RouteEditor.svelte diff --git a/frontend/src/lib/components/triggers/RouteEditorConfigSection.svelte b/frontend/src/lib/components/triggers/http/RouteEditorConfigSection.svelte similarity index 95% rename from frontend/src/lib/components/triggers/RouteEditorConfigSection.svelte rename to frontend/src/lib/components/triggers/http/RouteEditorConfigSection.svelte index 7526dac0439f3..1f827bb6cc417 100644 --- a/frontend/src/lib/components/triggers/RouteEditorConfigSection.svelte +++ b/frontend/src/lib/components/triggers/http/RouteEditorConfigSection.svelte @@ -12,10 +12,10 @@ // import { page } from '$app/stores' import { isCloudHosted } from '$lib/cloud' import { base } from '$lib/base' - import type { CaptureInfo } from './CaptureSection.svelte' - import CaptureSection from './CaptureSection.svelte' - import CaptureTable from './CaptureTable.svelte' - import ClipboardPanel from '../details/ClipboardPanel.svelte' + import type { CaptureInfo } from '../CaptureSection.svelte' + import CaptureSection from '../CaptureSection.svelte' + import CaptureTable from '../CaptureTable.svelte' + import ClipboardPanel from '../../details/ClipboardPanel.svelte' export let dirtyRoutePath: boolean = false export let route_path = '' diff --git a/frontend/src/lib/components/triggers/RouteEditorInner.svelte b/frontend/src/lib/components/triggers/http/RouteEditorInner.svelte similarity index 95% rename from frontend/src/lib/components/triggers/RouteEditorInner.svelte rename to frontend/src/lib/components/triggers/http/RouteEditorInner.svelte index 14f7332bd35eb..47da62dfe6351 100644 --- a/frontend/src/lib/components/triggers/RouteEditorInner.svelte +++ b/frontend/src/lib/components/triggers/http/RouteEditorInner.svelte @@ -12,16 +12,16 @@ import Section from '$lib/components/Section.svelte' import { Loader2, Save, Pipette } from 'lucide-svelte' import Label from '$lib/components/Label.svelte' - import ToggleButton from '../common/toggleButton-v2/ToggleButton.svelte' - import ToggleButtonGroup from '../common/toggleButton-v2/ToggleButtonGroup.svelte' - import S3FilePicker from '../S3FilePicker.svelte' - import Toggle from '../Toggle.svelte' - import JsonEditor from '../apps/editor/settingsPanel/inputEditor/JsonEditor.svelte' - import FileUpload from '../common/fileUpload/FileUpload.svelte' - import SimpleEditor from '../SimpleEditor.svelte' import { json } from 'svelte-highlight/languages' import { Highlight } from 'svelte-highlight' + import JsonEditor from '$lib/components/apps/editor/settingsPanel/inputEditor/JsonEditor.svelte' + import FileUpload from '$lib/components/common/fileUpload/FileUpload.svelte' + import ToggleButton from '$lib/components/common/toggleButton-v2/ToggleButton.svelte' + import ToggleButtonGroup from '$lib/components/common/toggleButton-v2/ToggleButtonGroup.svelte' + import S3FilePicker from '$lib/components/S3FilePicker.svelte' + import Toggle from '$lib/components/Toggle.svelte' import RouteEditorConfigSection from './RouteEditorConfigSection.svelte' + import SimpleEditor from '$lib/components/SimpleEditor.svelte' let is_flow: boolean = false let initialPath = '' diff --git a/frontend/src/lib/components/triggers/RoutesPanel.svelte b/frontend/src/lib/components/triggers/http/RoutesPanel.svelte similarity index 91% rename from frontend/src/lib/components/triggers/RoutesPanel.svelte rename to frontend/src/lib/components/triggers/http/RoutesPanel.svelte index 4bb8f97d4dcad..b9e11b7be1fba 100644 --- a/frontend/src/lib/components/triggers/RoutesPanel.svelte +++ b/frontend/src/lib/components/triggers/http/RoutesPanel.svelte @@ -1,15 +1,14 @@ + + + + (x.summary ?? '') + ' ' + x.path + ' (' + x.script_path + ')'} +/> + + + + + + + {#if isCloudHosted()} + + Database triggers are disabled in the multi-tenant cloud. + +
+ {/if} +
+
+ +
+
Filter by path of
+ + + + +
+ + +
+ {#if $userStore?.is_super_admin && $userStore.username.includes('@')} + + {:else if $userStore?.is_admin || $userStore?.is_super_admin} + + {/if} +
+
+ {#if loading} + {#each new Array(6) as _} + + {/each} + {:else if !triggers?.length} +
No database triggers
+ {:else if items?.length} +
+ {#each items.slice(0, nbDisplayed) as { path, edited_by, error, edited_at, script_path, is_flow, extra_perms, canWrite, enabled } (path)} + {@const href = `${is_flow ? '/flows/get' : '/scripts/get'}/${script_path}`} + {@const ping = new Date()} + +
+
+ + + databaseTriggerEditor?.openEdit(path, is_flow)} + class="min-w-0 grow hover:underline decoration-gray-400" + > +
+ {path} +
+
+ runnable: {script_path} +
+
+ + + +
+ {#if (enabled && (!ping || ping.getTime() < new Date().getTime() - 15 * 1000 || error)) || (!enabled && error)} + + + + + +
+ {#if enabled} + Could not connect to database{error ? ': ' + error : ''} + {:else} + Disabled because of an error: {error} + {/if} +
+
+ {:else if enabled} + + + + +
Connected to database
+
+ {/if} +
+ + { + setTriggerEnabled(path, e.detail) + }} + /> + +
+ + { + goto(href) + } + }, + { + displayName: 'Delete', + type: 'delete', + icon: Trash, + disabled: !canWrite, + action: async () => { + await DatabaseTriggerService.deleteDatabaseTrigger({ + workspace: $workspaceStore ?? '', + path + }) + loadTriggers() + } + }, + { + displayName: canWrite ? 'Edit' : 'View', + icon: canWrite ? Pen : Eye, + action: () => { + databaseTriggerEditor?.openEdit(path, is_flow) + } + }, + { + displayName: 'Audit logs', + icon: Eye, + href: `${base}/audit_logs?resource=${path}` + }, + { + displayName: canWrite ? 'Share' : 'See Permissions', + icon: Share, + action: () => { + shareModal.openDrawer(path, 'websocket_trigger') + } + } + ]} + /> +
+
+
+
edited by {edited_by}
the {displayDate(edited_at)}
+
+ {/each} +
+ {:else} + + {/if} +
+ {#if items && items?.length > 15 && nbDisplayed < items.length} + {nbDisplayed} items out of {items.length} + + {/if} + + + { + loadTriggers() + }} +/> diff --git a/frontend/src/routes/(root)/(logged)/flows/get/[...path]/+page.svelte b/frontend/src/routes/(root)/(logged)/flows/get/[...path]/+page.svelte index cfc4c6b96f7ba..d2056b152cd36 100644 --- a/frontend/src/routes/(root)/(logged)/flows/get/[...path]/+page.svelte +++ b/frontend/src/routes/(root)/(logged)/flows/get/[...path]/+page.svelte @@ -42,7 +42,7 @@ } from 'lucide-svelte' import DetailPageHeader from '$lib/components/details/DetailPageHeader.svelte' - import WebhooksPanel from '$lib/components/triggers/WebhooksPanel.svelte' + import WebhooksPanel from '$lib/components/triggers/webhook/WebhooksPanel.svelte' import CliHelpBox from '$lib/components/CliHelpBox.svelte' import FlowGraphViewer from '$lib/components/FlowGraphViewer.svelte' import RunPageSchedules from '$lib/components/RunPageSchedules.svelte' @@ -55,13 +55,14 @@ import FlowHistory from '$lib/components/flows/FlowHistory.svelte' import EmailTriggerPanel from '$lib/components/details/EmailTriggerPanel.svelte' import Star from '$lib/components/Star.svelte' - import RoutesPanel from '$lib/components/triggers/RoutesPanel.svelte' + import RoutesPanel from '$lib/components/triggers/http/RoutesPanel.svelte' import { Highlight } from 'svelte-highlight' import json from 'svelte-highlight/languages/json' import { writable } from 'svelte/store' import TriggersBadge from '$lib/components/graph/renderers/triggers/TriggersBadge.svelte' - import WebsocketTriggersPanel from '$lib/components/triggers/WebsocketTriggersPanel.svelte' - import KafkaTriggersPanel from '$lib/components/triggers/KafkaTriggersPanel.svelte' + import WebsocketTriggersPanel from '$lib/components/triggers/websocket/WebsocketTriggersPanel.svelte' + import DatabaseTriggersPanel from '$lib/components/triggers/database/DatabaseTriggersPanel.svelte' + import KafkaTriggersPanel from '$lib/components/triggers/kafka/KafkaTriggersPanel.svelte' let flow: Flow | undefined let can_write = false @@ -547,6 +548,11 @@
+ +
+ +
+
+ +
+ +
+