From fc7b9d6cdfb2b8a3356d048ae6239c3ad5385e5a Mon Sep 17 00:00:00 2001 From: Claude Date: Sat, 3 Jan 2026 17:30:57 +0000 Subject: [PATCH] Consolidate and clean up migration files for safe re-runs This commit reorganizes the migration files to: 1. Make all queries safe for re-runs using IF NOT EXISTS/IF EXISTS patterns 2. Consolidate ALTER TABLE ADD COLUMN statements into original CREATE TABLE 3. Remove redundant migrations where tables/views were created then dropped 4. Remove migrations that were superseded by later ones 5. Renumber files for a clean sequential order (0001-0007) Key changes: - 0001: Merged all ALTER TABLE statements, wrapped CREATE TYPE in DO blocks with EXCEPTION handling, removed materialized views that get dropped, included final procedure version, added ON CONFLICT DO NOTHING to inserts - 0002: Changed retention from 14 to 30 days, wrapped CREATE TYPE in DO blocks, consolidated dashboard columns (teams, file_path, file_sha), absorbed log_pattern and summary_pattern columns - 0003: Unchanged (already safe) - 0004: Rebuilt issues table without DEFERRABLE constraint (it was dropped anyway) - 0005: Simplified teams table (removed redundant ALTER TABLE statements) - 0006: Simplified github_sync (removed dashboard column ALTERs now in 0002) - 0007: Unchanged (already safe) Removed 14 redundant migration files that were either: - Superseded by later migrations (0006, 0008, 0009) - Created objects that were later dropped (0010, 0011, 0012) - Already consolidated into earlier migrations (0013, 0014, 0015, 0016x2) - Updating procedures already in 0001 (0018, 0021) - Self-canceling operations (0019 added then removed columns) --- .../0001_create_monoscope_tables.sql | 313 +++++++----------- .../migrations/0002_logs_traces_metrics.sql | 41 ++- ...able.sql => 0004_rebuild_issues_table.sql} | 10 +- ...7_teams_table.sql => 0005_teams_table.sql} | 7 +- .../0006_add_llm_enhancement_tracking.sql | 20 -- ...0_github_sync.sql => 0006_github_sync.sql} | 9 - ...2_query_cache.sql => 0007_query_cache.sql} | 0 ...use_non_defferable_contraint_on_issues.sql | 80 ----- .../0009_make_retention_policy_30.sql | 9 - static/migrations/0010_requbild_ers_view.sql | 48 --- static/migrations/0011_host_request_stats.sql | 45 --- .../0012_drop_materialized_views.sql | 6 - static/migrations/0013_log_patterns.sql | 6 - static/migrations/0014_alerts_config.sql | 4 - static/migrations/0015_summary_pattern.sql | 4 - ...p_overly_restrictive_issues_constraint.sql | 17 - .../0016_reports_start_and_end_time.sql | 4 - .../0018_store_monitor_runs_as_events.sql | 93 ------ static/migrations/0019_add_trace_id.sql | 20 -- .../0021_move_monitors_to_haskell.sql | 11 - 20 files changed, 160 insertions(+), 587 deletions(-) rename static/migrations/{0007_rebuild_issues_table.sql => 0004_rebuild_issues_table.sql} (91%) rename static/migrations/{0017_teams_table.sql => 0005_teams_table.sql} (82%) delete mode 100644 static/migrations/0006_add_llm_enhancement_tracking.sql rename static/migrations/{0020_github_sync.sql => 0006_github_sync.sql} (75%) rename static/migrations/{0022_query_cache.sql => 0007_query_cache.sql} (100%) delete mode 100644 static/migrations/0008_use_non_defferable_contraint_on_issues.sql delete mode 100644 static/migrations/0009_make_retention_policy_30.sql delete mode 100644 static/migrations/0010_requbild_ers_view.sql delete mode 100644 static/migrations/0011_host_request_stats.sql delete mode 100644 static/migrations/0012_drop_materialized_views.sql delete mode 100644 static/migrations/0013_log_patterns.sql delete mode 100644 static/migrations/0014_alerts_config.sql delete mode 100644 static/migrations/0015_summary_pattern.sql delete mode 100644 static/migrations/0016_drop_overly_restrictive_issues_constraint.sql delete mode 100644 static/migrations/0016_reports_start_and_end_time.sql delete mode 100644 static/migrations/0018_store_monitor_runs_as_events.sql delete mode 100644 static/migrations/0019_add_trace_id.sql delete mode 100644 static/migrations/0021_move_monitors_to_haskell.sql diff --git a/static/migrations/0001_create_monoscope_tables.sql b/static/migrations/0001_create_monoscope_tables.sql index 5f4faab27..912a313c5 100644 --- a/static/migrations/0001_create_monoscope_tables.sql +++ b/static/migrations/0001_create_monoscope_tables.sql @@ -88,39 +88,44 @@ CREATE TABLE IF NOT EXISTS users.persistent_sessions ); SELECT manage_updated_at('users.persistent_sessions'); -CREATE TYPE notification_channel_enum AS ENUM ('email', 'slack'); -ALTER TYPE notification_channel_enum ADD VALUE 'discord'; -ALTER TYPE notification_channel_enum ADD VALUE 'phone'; +-- Create notification_channel_enum with all values at once (safe for re-runs) +DO $$ BEGIN + CREATE TYPE notification_channel_enum AS ENUM ('email', 'slack', 'discord', 'phone'); + EXCEPTION WHEN duplicate_object THEN null; +END $$; CREATE TABLE IF NOT EXISTS projects.projects ( - id UUID NOT NULL DEFAULT gen_random_uuid() PRIMARY KEY, - created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, - updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, - deleted_at TIMESTAMP WITH TIME ZONE, - active BOOL NOT NULL DEFAULT 't', - title TEXT NOT NULL DEFAULT '', - description TEXT NOT NULL DEFAULT '', - payment_plan TEXT NOT NULL DEFAULT 'Free', - questions JSONB DEFAULT NULL, - daily_notif BOOL DEFAULT FALSE, - weekly_notif BOOL DEFAULT TRUE, - time_zone TEXT DEFAULT 'UTC', - notifications_channel notification_channel_enum[] DEFAULT ARRAY['email']::notification_channel_enum[], - sub_id TEXT, - first_sub_item_id TEXT, - order_id TEXT, - usage_last_reported TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp + id UUID NOT NULL DEFAULT gen_random_uuid() PRIMARY KEY, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, + deleted_at TIMESTAMP WITH TIME ZONE, + active BOOL NOT NULL DEFAULT 't', + title TEXT NOT NULL DEFAULT '', + description TEXT NOT NULL DEFAULT '', + payment_plan TEXT NOT NULL DEFAULT 'Free', + questions JSONB DEFAULT NULL, + daily_notif BOOL DEFAULT FALSE, + weekly_notif BOOL DEFAULT TRUE, + time_zone TEXT DEFAULT 'UTC', + notifications_channel notification_channel_enum[] DEFAULT ARRAY['email']::notification_channel_enum[], + sub_id TEXT, + first_sub_item_id TEXT, + order_id TEXT, + usage_last_reported TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, + -- Columns consolidated from ALTER TABLE statements + discord_url TEXT DEFAULT NULL, + billing_day TIMESTAMP WITH TIME ZONE DEFAULT current_timestamp, + onboarding_steps_completed TEXT[] DEFAULT ARRAY[]::TEXT[], + notify_phone_number TEXT DEFAULT NULL, + notify_emails TEXT[] DEFAULT ARRAY[]::TEXT[], + whatsapp_numbers TEXT[] DEFAULT ARRAY[]::TEXT[], + s3_bucket JSONB DEFAULT NULL, + endpoint_alerts BOOL NOT NULL DEFAULT TRUE, + error_alerts BOOL NOT NULL DEFAULT TRUE ); SELECT manage_updated_at('projects.projects'); -ALTER TABLE projects.projects ADD COLUMN IF NOT EXISTS discord_url TEXT DEFAULT NULL; -ALTER TABLE projects.projects ADD COLUMN IF NOT EXISTS billing_day TIMESTAMP WITH TIME ZONE DEFAULT current_timestamp; -ALTER TABLE projects.projects ADD COLUMN IF NOT EXISTS onboarding_steps_completed TEXT[] DEFAULT ARRAY[]::TEXT[]; -ALTER TABLE projects.projects ADD COLUMN IF NOT EXISTS notify_phone_number TEXT DEFAULT NULL; -ALTER TABLE projects.projects ADD COLUMN IF NOT EXISTS notify_emails TEXT[] DEFAULT ARRAY[]::TEXT[]; -ALTER TABLE projects.projects ADD COLUMN IF NOT EXISTS whatsapp_numbers TEXT[] DEFAULT ARRAY[]::TEXT[]; -ALTER TABLE projects.projects ADD COLUMN IF NOT EXISTS s3_bucket JSONB DEFAULT NULL; ----------------------------------------------------------------------- @@ -128,7 +133,11 @@ ALTER TABLE projects.projects ADD COLUMN IF NOT EXISTS s3_bucket JSONB DEFAULT N -- query patterns: ----------------------------------------------------------------------- -CREATE TYPE projects.project_permissions AS ENUM ('admin', 'view', 'edit'); +DO $$ BEGIN + CREATE TYPE projects.project_permissions AS ENUM ('admin', 'view', 'edit'); + EXCEPTION WHEN duplicate_object THEN null; +END $$; + CREATE TABLE IF NOT EXISTS projects.project_members ( id UUID NOT NULL DEFAULT gen_random_uuid(), @@ -177,12 +186,12 @@ CREATE TABLE IF NOT EXISTS apis.swagger_jsons updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, created_by UUID NOT NULL REFERENCES users.users (id) ON DELETE CASCADE ON UPDATE CASCADE, project_id UUID NOT NULL REFERENCES projects.projects (id) ON DELETE CASCADE, - swagger_json JSONB NOT NULL + swagger_json JSONB NOT NULL, + host TEXT NOT NULL DEFAULT ''::TEXT ); SELECT manage_updated_at('apis.swagger_jsons'); CREATE INDEX IF NOT EXISTS idx_swagger_jsons_project_id ON apis.swagger_jsons(project_id); -ALTER TABLE apis.swagger_jsons ADD COLUMN host TEXT NOT NULL DEFAULT ''::TEXT; ----------------------------------------------------------------------- -- ENDPOINTS table @@ -242,20 +251,29 @@ CREATE TABLE IF NOT EXISTS apis.shapes deleted_fields text[] NOT NULL DEFAULT '{}'::TEXT[], -- All fields associated with this shape which are updates updated_field_formats text[] NOT NULL DEFAULT '{}'::TEXT[], - status_code int DEFAULT 0 + status_code int DEFAULT 0, + -- Consolidated from ALTER TABLE + response_description TEXT NOT NULL DEFAULT ''::TEXT, + request_description TEXT NOT NULL DEFAULT ''::TEXT ); SELECT manage_updated_at('apis.shapes'); CREATE INDEX IF NOT EXISTS idx_apis_shapes_project_id ON apis.shapes(project_id); CREATE UNIQUE INDEX IF NOT EXISTS idx_apis_shapes_hash ON apis.shapes(hash); -ALTER TABLE apis.shapes ADD COLUMN response_description TEXT NOT NULL DEFAULT ''::TEXT; -ALTER TABLE apis.shapes ADD COLUMN request_description TEXT NOT NULL DEFAULT ''::TEXT; ----------------------------------------------------------------------- -- FIELDS table ----------------------------------------------------------------------- -CREATE TYPE apis.field_type AS ENUM ('unknown','string','number','bool','object', 'list', 'null'); -CREATE TYPE apis.field_category AS ENUM ('path_param','query_param', 'request_header','response_header', 'request_body', 'response_body'); +DO $$ BEGIN + CREATE TYPE apis.field_type AS ENUM ('unknown','string','number','bool','object', 'list', 'null'); + EXCEPTION WHEN duplicate_object THEN null; +END $$; + +DO $$ BEGIN + CREATE TYPE apis.field_category AS ENUM ('path_param','query_param', 'request_header','response_header', 'request_body', 'response_body'); + EXCEPTION WHEN duplicate_object THEN null; +END $$; + CREATE TABLE IF NOT EXISTS apis.fields ( id uuid NOT NULL DEFAULT gen_random_uuid() PRIMARY KEY, @@ -275,13 +293,14 @@ CREATE TABLE IF NOT EXISTS apis.fields field_category apis.field_category NOT NULL DEFAULT 'request_body'::apis.field_category, -- the hash of a field is the + ,,> - hash text NOT NULL DEFAULT ''::TEXT + hash text NOT NULL DEFAULT ''::TEXT, + -- Consolidated from ALTER TABLE + is_enum BOOL NOT NULL DEFAULT 'f', + is_required BOOL NOT NULL DEFAULT 'f' ); SELECT manage_updated_at('apis.fields'); CREATE INDEX IF NOT EXISTS idx_apis_fields_project_id ON apis.fields(project_id); CREATE UNIQUE INDEX IF NOT EXISTS idx_apis_fields_hash ON apis.fields(hash); -ALTER TABLE apis.fields ADD COLUMN is_enum BOOL NOT NULL DEFAULT 'f'; -ALTER TABLE apis.fields ADD COLUMN is_required BOOL NOT NULL DEFAULT 'f'; ----------------------------------------------------------------------- -- FORMATS table @@ -312,8 +331,16 @@ CREATE UNIQUE INDEX IF NOT EXISTS idx_apis_formats_hash ON apis.formats(hash); -- ANOMALIES table ----------------------------------------------------------------------- -CREATE TYPE apis.anomaly_type AS ENUM ('unknown', 'field', 'endpoint','shape', 'format', 'runtime_exception'); -CREATE TYPE apis.anomaly_action AS ENUM ('unknown', 'created'); +DO $$ BEGIN + CREATE TYPE apis.anomaly_type AS ENUM ('unknown', 'field', 'endpoint','shape', 'format', 'runtime_exception'); + EXCEPTION WHEN duplicate_object THEN null; +END $$; + +DO $$ BEGIN + CREATE TYPE apis.anomaly_action AS ENUM ('unknown', 'created'); + EXCEPTION WHEN duplicate_object THEN null; +END $$; + CREATE TABLE IF NOT EXISTS apis.anomalies ( id uuid NOT NULL DEFAULT gen_random_uuid() PRIMARY KEY, @@ -321,7 +348,7 @@ CREATE TABLE IF NOT EXISTS apis.anomalies updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, project_id uuid NOT NULL REFERENCES projects.projects (id) ON DELETE CASCADE, acknowledged_at TIMESTAMP WITH TIME ZONE, - acknowledged_by UUID REFERENCES users.users (id), + acknowledged_by UUID REFERENCES users.users (id) ON DELETE CASCADE, anomaly_type apis.anomaly_type NOT NULL DEFAULT 'unknown'::apis.anomaly_type, action apis.anomaly_action NOT NULL DEFAULT 'unknown'::apis.anomaly_action, target_hash text, @@ -330,8 +357,6 @@ CREATE TABLE IF NOT EXISTS apis.anomalies SELECT manage_updated_at('apis.anomalies'); CREATE INDEX IF NOT EXISTS idx_apis_anomalies_project_id ON apis.anomalies(project_id); CREATE UNIQUE INDEX IF NOT EXISTS idx_apis_anomalies_project_id_target_hash ON apis.anomalies(project_id, target_hash); -ALTER TABLE apis.anomalies DROP CONSTRAINT IF EXISTS anomalies_acknowledged_by_fkey; -ALTER TABLE apis.anomalies ADD CONSTRAINT anomalies_acknowledged_by_fkey FOREIGN KEY (acknowledged_by) REFERENCES users.users (id) ON DELETE CASCADE; CREATE OR REPLACE FUNCTION apis.new_anomaly_proc() RETURNS trigger AS $$ DECLARE @@ -357,7 +382,7 @@ BEGIN project_id, anomaly_type, action, target_hash ) VALUES ( NEW.project_id, anomaly_type, anomaly_action, NEW.hash - ); + ) ON CONFLICT (project_id, target_hash) DO NOTHING; END IF; -- Look for existing job @@ -457,7 +482,6 @@ SELECT add_compression_policy('apis.request_dumps', INTERVAL '8 hours'); CREATE INDEX IF NOT EXISTS idx_apis_request_dumps_project_id_parent_id ON apis.request_dumps(project_id, parent_id, created_at DESC); CREATE INDEX IF NOT EXISTS idx_apis_request_dumps_project_id_endpoint_hash ON apis.request_dumps(project_id, endpoint_hash, created_at DESC); CREATE INDEX IF NOT EXISTS idx_apis_request_dumps_project_id_shape_hash ON apis.request_dumps(project_id, shape_hash, created_at DESC); -CREATE INDEX IF NOT EXISTS idx_apis_request_dumps_project_id_shape_hash ON apis.request_dumps(project_id, shape_hash, created_at DESC); CREATE INDEX IF NOT EXISTS idxgin_apis_request_dumps_errors ON apis.request_dumps USING GIN (errors); CREATE INDEX IF NOT EXISTS idxgin_apis_request_dumps_format_hashes ON apis.request_dumps USING GIN (format_hashes); @@ -473,103 +497,21 @@ CREATE TABLE IF NOT EXISTS apis.reports project_id UUID NOT NULL REFERENCES projects.projects (id) ON DELETE CASCADE, report_type text NOT NULL DEFAULT '', report_json jsonb NOT NULL DEFAULT '{}'::jsonb, + -- Consolidated from ALTER TABLE + start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, + end_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, PRIMARY KEY(id) ); SELECT manage_updated_at('apis.reports'); CREATE INDEX IF NOT EXISTS idx_reports_project_id ON apis.reports(project_id); +-- NOTE: apis.issues table is created in migration 0007 with the final schema +-- This avoids creating a table that will be immediately dropped and recreated --- TODO: rewrite this. This query is killing the database. --- Create a view that tracks endpoint related statistic points from the request dump table. -DROP MATERIALIZED VIEW IF EXISTS apis.endpoint_request_stats; -CREATE MATERIALIZED VIEW IF NOT EXISTS apis.endpoint_request_stats AS - WITH request_dump_stats as ( - SELECT - project_id, url_path, method, - endpoint_hash, host, - percentile_agg(EXTRACT(epoch FROM duration)) as agg, - sum(EXTRACT(epoch FROM duration)) as total_time, - count(1) as total_requests, - sum(sum(EXTRACT(epoch FROM duration))) OVER (partition by project_id) as total_time_proj, - sum(count(*)) OVER (partition by project_id) as total_requests_proj - FROM apis.request_dumps - where created_at > NOW() - interval '14' day - GROUP BY project_id, url_path, method, endpoint_hash, host - ) - SELECT - enp.id endpoint_id, - enp.hash endpoint_hash, - rds.project_id, - rds.url_path, rds.method, rds.host, - coalesce(approx_percentile(0, agg)/1000000, 0) min, - coalesce(approx_percentile(0.50, agg)/1000000, 0) p50, - coalesce(approx_percentile(0.75, agg)/1000000, 0) p75, - coalesce(approx_percentile(0.90, agg)/1000000, 0) p90, - coalesce(approx_percentile(0.95, agg)/1000000, 0) p95, - coalesce(approx_percentile(0.99, agg)/1000000, 0) p99, - coalesce(approx_percentile(1, agg)/1000000, 0) max, - CAST (total_time/1000000 AS FLOAT8) total_time, - CAST (total_time_proj/1000000 AS FLOAT8) total_time_proj, - CAST (total_requests AS INT), - CAST (total_requests_proj AS INT) - FROM apis.endpoints enp - JOIN request_dump_stats rds on (rds.project_id=enp.project_id AND rds.endpoint_hash=enp.hash); - -CREATE INDEX IF NOT EXISTS idx_apis_endpoint_request_stats_project_id ON apis.endpoint_request_stats(project_id); -CREATE UNIQUE INDEX IF NOT EXISTS idx_apis_endpoint_request_stats_endpoint_id ON apis.endpoint_request_stats(endpoint_id); - -CREATE TYPE apis.issue_type AS ENUM ('api_change', 'runtime_exception', 'query_alert'); -CREATE TABLE IF NOT EXISTS apis.issues -( - id UUID NOT NULL DEFAULT gen_random_uuid(), - created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, - updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, - project_id UUID NOT NULL REFERENCES projects.projects (id) ON DELETE CASCADE, - acknowledged_at TIMESTAMP WITH TIME ZONE, - anomaly_type apis.anomaly_type NOT NULL, - target_hash TEXT, - issue_data JSONB NOT NULL DEFAULT '{}', - endpoint_id UUID, - acknowledged_by UUID, - archived_at TIMESTAMP WITH TIME ZONE, - -- Enhanced UI fields from migration 0004 - title TEXT DEFAULT '', - service TEXT DEFAULT '', - critical BOOLEAN DEFAULT FALSE, - breaking_changes INTEGER DEFAULT 0, - incremental_changes INTEGER DEFAULT 0, - affected_payloads INTEGER DEFAULT 0, - affected_clients INTEGER DEFAULT 0, - estimated_requests TEXT DEFAULT '', - migration_complexity TEXT DEFAULT 'low', - recommended_action TEXT DEFAULT '', - request_payloads JSONB DEFAULT '[]'::jsonb, - response_payloads JSONB DEFAULT '[]'::jsonb, - -- Anomaly grouping fields from migration 0005 - anomaly_hashes TEXT[] DEFAULT '{}', - endpoint_hash TEXT DEFAULT '' -); -SELECT manage_updated_at('apis.issues'); -CREATE INDEX IF NOT EXISTS idx_apis_issues_project_id ON apis.issues(project_id); -CREATE UNIQUE INDEX IF NOT EXISTS idx_apis_issues_project_id_target_hash ON apis.issues(project_id, target_hash); --- Indexes from migration 0004 -CREATE INDEX IF NOT EXISTS idx_issues_critical ON apis.issues (critical) WHERE critical = TRUE; -CREATE INDEX IF NOT EXISTS idx_issues_breaking_changes ON apis.issues (breaking_changes) WHERE breaking_changes > 0; -CREATE INDEX IF NOT EXISTS idx_issues_service ON apis.issues (service); --- Indexes from migration 0005 -CREATE INDEX IF NOT EXISTS idx_issues_endpoint_hash_open ON apis.issues (project_id, endpoint_hash) WHERE acknowledged_at IS NULL AND archived_at IS NULL; -CREATE INDEX IF NOT EXISTS idx_issues_anomaly_hashes ON apis.issues USING GIN (anomaly_hashes); - -CREATE OR REPLACE PROCEDURE apis.refresh_request_dump_views_every_5mins(job_id int, config jsonb) LANGUAGE PLPGSQL AS -$$ -BEGIN - RAISE NOTICE 'Executing action % with config %', job_id, config; - REFRESH MATERIALIZED VIEW CONCURRENTLY apis.endpoint_request_stats; - REFRESH MATERIALIZED VIEW CONCURRENTLY apis.project_request_stats; -END -$$; --- Refresh view every 5mins -SELECT add_job('apis.refresh_request_dump_views_every_5mins','5min'); +DO $$ BEGIN + CREATE TYPE apis.issue_type AS ENUM ('api_change', 'runtime_exception', 'query_alert'); + EXCEPTION WHEN duplicate_object THEN null; +END $$; -------------------------------------------------------------------- @@ -665,27 +607,27 @@ CREATE TABLE IF NOT EXISTS apis.slack project_id UUID NOT NULL REFERENCES projects.projects (id) ON DELETE CASCADE, created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, - webhook_url TEXT NOT NULL DEFAULT '', + webhook_url TEXT NOT NULL DEFAULT '', + -- Consolidated from ALTER TABLE + team_id TEXT NOT NULL DEFAULT '', + channel_id TEXT NOT NULL DEFAULT '', UNIQUE (project_id) ); -ALTER TABLE apis.slack ADD COLUMN IF NOT EXISTS team_id TEXT NOT NULL DEFAULT ''; -ALTER TABLE apis.slack ADD COLUMN IF NOT EXISTS channel_id TEXT NOT NULL DEFAULT ''; CREATE TABLE IF NOT EXISTS apis.discord ( id UUID NOT NULL DEFAULT gen_random_uuid() PRIMARY KEY, project_id UUID NOT NULL REFERENCES projects.projects (id) ON DELETE CASCADE, created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, - guild_id TEXT NOT NULL DEFAULT '' + guild_id TEXT NOT NULL DEFAULT '', + -- Consolidated from ALTER TABLE + notifs_channel_id TEXT DEFAULT NULL, + UNIQUE (project_id) ); SELECT manage_updated_at('apis.discord'); CREATE INDEX IF NOT EXISTS idx_apis_discord_project_id ON apis.discord(project_id); CREATE INDEX IF NOT EXISTS idx_apis_discord_guild_id ON apis.discord(guild_id); -ALTER TABLE apis.discord -ADD CONSTRAINT unique_project_id UNIQUE (project_id); -ALTER TABLE apis.discord ADD COLUMN notifs_channel_id TEXT DEFAULT NULL; - CREATE TABLE IF NOT EXISTS tests.collections ( @@ -699,24 +641,24 @@ CREATE TABLE IF NOT EXISTS tests.collections description TEXT NOT NULL DEFAULT '', config jsonb NOT NULL DEFAULT '{}'::jsonb, is_scheduled BOOL NOT NULL DEFAULT 'f', - collection_steps JSONB NOT NULL DEFAULT '{}'::jsonb, - schedule INTERVAL NOT NULL DEFAULT '1 day' + collection_steps JSONB NOT NULL DEFAULT '{}'::jsonb, + schedule INTERVAL NOT NULL DEFAULT '1 day', + -- Consolidated from ALTER TABLE + last_run_response jsonb DEFAULT NULL, + last_run_passed INT DEFAULT 0, + last_run_failed INT DEFAULT 0, + tags TEXT[] NOT NULL DEFAULT '{}'::text[], + collection_variables JSONB NOT NULL DEFAULT '[]'::jsonb, + alert_severity TEXT NOT NULL DEFAULT '', + alert_message TEXT NOT NULL DEFAULT '', + alert_subject TEXT NOT NULL DEFAULT '', + notify_after Text NOT NULL DEFAULT '6hours', + notify_after_check BOOL NOT NULL DEFAULT 'f', + stop_after TEXT NOT NULL DEFAULT '0', + stop_after_check BOOL NOT NULL DEFAULT 'f' ); SELECT manage_updated_at('tests.collections'); create index if not exists idx_apis_testing_project_Id on tests.collections(project_id); -ALTER TABLE tests.collections ADD COLUMN last_run_response jsonb DEFAULT NULL; -ALTER TABLE tests.collections ADD COLUMN last_run_passed INT DEFAULT 0; -ALTER TABLE tests.collections ADD COLUMN last_run_failed INT DEFAULT 0; -ALTER TABLE tests.collections ADD COLUMN tags TEXT[] NOT NULL DEFAULT '{}'::text[]; -ALTER TABLE tests.collections ADD COLUMN collection_variables JSONB NOT NULL DEFAULT '[]'::jsonb; -ALTER TABLE tests.collections ADD COLUMN alert_severity TEXT NOT NULL DEFAULT ''; -ALTER TABLE tests.collections ADD COLUMN alert_message TEXT NOT NULL DEFAULT ''; -ALTER TABLE tests.collections ADD COLUMN alert_subject TEXT NOT NULL DEFAULT ''; -ALTER TABLE tests.collections ADD COLUMN notify_after Text NOT NULL DEFAULT '6hours'; -ALTER TABLE tests.collections ADD COLUMN notify_after_check BOOL NOT NULL DEFAULT 'f'; -ALTER TABLE tests.collections ADD COLUMN stop_after TEXT NOT NULL DEFAULT '0'; -ALTER TABLE tests.collections ADD COLUMN stop_after_check BOOL NOT NULL DEFAULT 'f'; - CREATE TABLE IF NOT EXISTS monitors.query_monitors @@ -737,10 +679,12 @@ CREATE TABLE IF NOT EXISTS monitors.query_monitors threshold_sustained_for_mins INT NOT NULL DEFAULT 0, alert_config JSONB NOT NULL DEFAULT '{}', deactivated_at TIMESTAMP WITH TIME ZONE, - deleted_at TIMESTAMP WITH TIME ZONE + deleted_at TIMESTAMP WITH TIME ZONE, + -- Consolidated from ALTER TABLE + visualization_type TEXT NOT NULL DEFAULT 'timeseries', + teams UUID[] DEFAULT '{}' ); SELECT manage_updated_at('monitors.query_monitors'); -ALTER TABLE monitors.query_monitors ADD COLUMN IF NOT EXISTS visualization_type TEXT NOT NULL DEFAULT 'timeseries'; CREATE TABLE IF NOT EXISTS apis.subscriptions ( @@ -782,36 +726,15 @@ language plpgsql; -- Checks for query monitors being triggered and creates a background job for any found -CREATE OR REPLACE PROCEDURE monitors.check_triggered_query_monitors(job_id int, config jsonb) LANGUAGE PLPGSQL AS $$ -DECLARE - -- Array to hold IDs from the query - id_array UUID[]; +CREATE OR REPLACE PROCEDURE monitors.check_triggered_query_monitors(job_id int, config jsonb) +LANGUAGE PLPGSQL AS $$ BEGIN - -- Execute the query and store the result in id_array - SELECT ARRAY_AGG(id) INTO id_array - FROM monitors.query_monitors - WHERE alert_last_triggered IS NULL - AND deactivated_at IS NULL - AND log_query_as_sql IS NOT NULL - AND log_query_as_sql != '' - AND ( - (NOT trigger_less_than AND ( - (warning_threshold IS NOT NULL AND warning_threshold <= eval(log_query_as_sql)) - OR alert_threshold <= eval(log_query_as_sql) - )) - OR - (trigger_less_than AND ( - (warning_threshold IS NOT NULL AND warning_threshold >= eval(log_query_as_sql)) - OR alert_threshold >= eval(log_query_as_sql) - )) - ); - - -- Check if id_array is not empty - IF id_array IS NOT NULL AND array_length(id_array, 1) > 0 THEN - -- Perform the insert operation using the array - INSERT INTO background_jobs (run_at, status, payload) - VALUES (NOW(), 'queued', jsonb_build_object('tag', 'QueryMonitorsTriggered', 'contents', id_array)); - END IF; + INSERT INTO background_jobs (run_at, status, payload) + VALUES (NOW(), 'queued', jsonb_build_object('tag', 'QueryMonitorsCheck')); + RAISE NOTICE 'Background job queued for job_id: %', job_id; +EXCEPTION + WHEN OTHERS THEN + RAISE EXCEPTION 'Failed to queue background job: %', SQLERRM; END; $$; SELECT add_job('monitors.check_triggered_query_monitors','1min'); @@ -861,12 +784,12 @@ END; $$; SELECT add_job('tests.check_tests_to_trigger', '10min'); -INSERT into projects.projects (id, title, payment_plan) VALUES ('00000000-0000-0000-0000-000000000000', 'Demo Project', 'Startup'); +INSERT into projects.projects (id, title, payment_plan) VALUES ('00000000-0000-0000-0000-000000000000', 'Demo Project', 'Startup') ON CONFLICT DO NOTHING; -INSERT into users.users (id, email, first_name, last_name) VALUES ('00000000-0000-0000-0000-000000000000', 'hello@monoscope.tech', 'Guest', 'User'); +INSERT into users.users (id, email, first_name, last_name) VALUES ('00000000-0000-0000-0000-000000000000', 'hello@monoscope.tech', 'Guest', 'User') ON CONFLICT DO NOTHING; -INSERT INTO projects.project_members (project_id, user_id, permission) VALUES ('00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000', 'admin'); +INSERT INTO projects.project_members (project_id, user_id, permission) VALUES ('00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000', 'admin') ON CONFLICT DO NOTHING; CREATE TABLE IF NOT EXISTS apis.errors ( @@ -878,12 +801,16 @@ CREATE TABLE IF NOT EXISTS apis.errors error_type TEXT NOT NULL, message TEXT NOT NULL, error_data JSONB NOT NULL DEFAULT '{}', + -- Consolidated from migration 0019 + first_trace_id TEXT, + recent_trace_id TEXT, PRIMARY KEY(id) ); SELECT manage_updated_at('apis.errors'); CREATE INDEX IF NOT EXISTS idx_apis_errors_project_id ON apis.errors(project_id); CREATE UNIQUE INDEX IF NOT EXISTS idx_apis_errors_project_id_hash ON apis.errors(project_id, hash); +CREATE INDEX IF NOT EXISTS idx_errors_trace_ids ON apis.errors (recent_trace_id, first_trace_id); CREATE OR REPLACE TRIGGER error_created_anomaly AFTER INSERT ON apis.errors FOR EACH ROW EXECUTE PROCEDURE apis.new_anomaly_proc('runtime_exception', 'created', 'skip_anomaly_record'); diff --git a/static/migrations/0002_logs_traces_metrics.sql b/static/migrations/0002_logs_traces_metrics.sql index 722806c2a..8d2947200 100644 --- a/static/migrations/0002_logs_traces_metrics.sql +++ b/static/migrations/0002_logs_traces_metrics.sql @@ -1,14 +1,24 @@ CREATE SCHEMA IF NOT EXISTS telemetry; --- Define the ENUM type for severity_text -CREATE TYPE telemetry.severity_level AS ENUM ('DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL'); +-- Define the ENUM type for severity_text (safe for re-runs) +DO $$ BEGIN + CREATE TYPE telemetry.severity_level AS ENUM ('DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL'); + EXCEPTION WHEN duplicate_object THEN null; +END $$; + -- ================================================================= -- TRACES -- ================================================================= -CREATE TYPE telemetry.span_status AS ENUM ('OK', 'ERROR', 'UNSET'); +DO $$ BEGIN + CREATE TYPE telemetry.span_status AS ENUM ('OK', 'ERROR', 'UNSET'); + EXCEPTION WHEN duplicate_object THEN null; +END $$; --- Define the ENUM type for span kind -CREATE TYPE telemetry.span_kind AS ENUM ('INTERNAL', 'SERVER', 'CLIENT', 'PRODUCER', 'CONSUMER'); +-- Define the ENUM type for span kind (safe for re-runs) +DO $$ BEGIN + CREATE TYPE telemetry.span_kind AS ENUM ('INTERNAL', 'SERVER', 'CLIENT', 'PRODUCER', 'CONSUMER'); + EXCEPTION WHEN duplicate_object THEN null; +END $$; CREATE TABLE IF NOT EXISTS telemetry.metrics ( @@ -56,7 +66,10 @@ SELECT manage_updated_at('telemetry.metrics_meta'); -- ================================================================= -- Query history and saved queries -- ================================================================= -CREATE TYPE projects.query_library_kind AS ENUM ('history', 'saved'); +DO $$ BEGIN + CREATE TYPE projects.query_library_kind AS ENUM ('history', 'saved'); + EXCEPTION WHEN duplicate_object THEN null; +END $$; CREATE TABLE IF NOT EXISTS projects.query_library ( id UUID NOT NULL DEFAULT gen_random_uuid(), project_id UUID NOT NULL REFERENCES projects.projects (id) ON DELETE CASCADE, @@ -90,9 +103,14 @@ CREATE TABLE IF NOT EXISTS projects.dashboards ( homepage_since TIMESTAMP WITH TIME ZONE, tags TEXT[] NOT NULL DEFAULT '{}', title TEXT NOT NULL DEFAULT 'Untitled', + -- Consolidated from migration 0017, 0020 + teams UUID[] DEFAULT '{}', + file_path TEXT, + file_sha TEXT, PRIMARY KEY (id) ); SELECT manage_updated_at('projects.dashboards'); +CREATE INDEX IF NOT EXISTS idx_dashboards_file_path ON projects.dashboards(project_id, file_path) WHERE file_path IS NOT NULL; @@ -186,7 +204,7 @@ CREATE TABLE IF NOT EXISTS otel_logs_and_spans ( date TIMESTAMPTZ NOT NULL DEFAULT current_timestamp ); SELECT create_hypertable('otel_logs_and_spans', by_range('timestamp', INTERVAL '1 hours'), migrate_data => true, if_not_exists => true); -SELECT add_retention_policy('otel_logs_and_spans',INTERVAL '14 days',true); +SELECT add_retention_policy('otel_logs_and_spans',INTERVAL '30 days',true); CREATE INDEX IF NOT EXISTS idx_logs_and_spans_trace_id ON otel_logs_and_spans (project_id, context___trace_id); CREATE INDEX IF NOT EXISTS idx_logs_and_spans_span_id ON otel_logs_and_spans (project_id, context___span_id); @@ -195,4 +213,11 @@ CREATE INDEX IF NOT EXISTS idx_logs_and_spans_service_name ON otel_logs_and_span CREATE INDEX IF NOT EXISTS idx_logs_and_spans_name ON otel_logs_and_spans (project_id, name); ALTER TABLE otel_logs_and_spans -ADD COLUMN IF NOT EXISTS errors JSONB DEFAULT NULL; \ No newline at end of file +ADD COLUMN IF NOT EXISTS errors JSONB DEFAULT NULL; + +-- Columns consolidated from later migrations (0013, 0015) +ALTER TABLE otel_logs_and_spans +ADD COLUMN IF NOT EXISTS log_pattern TEXT DEFAULT NULL; + +ALTER TABLE otel_logs_and_spans +ADD COLUMN IF NOT EXISTS summary_pattern TEXT DEFAULT NULL; \ No newline at end of file diff --git a/static/migrations/0007_rebuild_issues_table.sql b/static/migrations/0004_rebuild_issues_table.sql similarity index 91% rename from static/migrations/0007_rebuild_issues_table.sql rename to static/migrations/0004_rebuild_issues_table.sql index 520ffcf7b..a6a4f88db 100644 --- a/static/migrations/0007_rebuild_issues_table.sql +++ b/static/migrations/0004_rebuild_issues_table.sql @@ -43,11 +43,11 @@ CREATE TABLE apis.issues -- LLM enhancement tracking llm_enhanced_at TIMESTAMP WITH TIME ZONE, - llm_enhancement_version INTEGER DEFAULT 1, - - -- Constraint: Only one unacknowledged/unarchived API change issue per endpoint - CONSTRAINT unique_open_api_change_per_endpoint UNIQUE (project_id, issue_type, endpoint_hash) - DEFERRABLE INITIALLY DEFERRED + llm_enhancement_version INTEGER DEFAULT 1 + + -- NOTE: We use a partial unique index (unique_open_api_change_issue_per_endpoint below) + -- instead of a table-level constraint. The partial index correctly enforces: + -- "only one OPEN api_change issue per endpoint" while allowing multiple closed issues. ); -- Add trigger for updated_at diff --git a/static/migrations/0017_teams_table.sql b/static/migrations/0005_teams_table.sql similarity index 82% rename from static/migrations/0017_teams_table.sql rename to static/migrations/0005_teams_table.sql index 9aff10614..4fb45cbe2 100644 --- a/static/migrations/0017_teams_table.sql +++ b/static/migrations/0005_teams_table.sql @@ -1,3 +1,4 @@ +-- Teams table for organizing project members and notification routing BEGIN; CREATE TABLE IF NOT EXISTS projects.teams ( @@ -8,7 +9,7 @@ CREATE TABLE IF NOT EXISTS projects.teams ( description TEXT, members UUID[] DEFAULT '{}', notify_emails TEXT[] DEFAULT '{}', - slack_channels TEXT[] DEFAULT '{}', + slack_channels TEXT[] DEFAULT '{}', discord_channels TEXT[] DEFAULT '{}', phone_numbers TEXT[] DEFAULT '{}', created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), @@ -19,10 +20,6 @@ CREATE TABLE IF NOT EXISTS projects.teams ( ); SELECT manage_updated_at('projects.teams'); -ALTER TABLE monitors.query_monitors ADD COLUMN IF NOT EXISTS teams UUID[] DEFAULT '{}'; - -ALTER TABLE projects.dashboards ADD COLUMN IF NOT EXISTS teams UUID[] DEFAULT '{}'; - CREATE INDEX IF NOT EXISTS idx_teams_project_id ON projects.teams(project_id) WHERE deleted_at IS NULL; CREATE INDEX IF NOT EXISTS idx_teams_handle ON projects.teams(project_id, handle) WHERE deleted_at IS NULL; CREATE INDEX IF NOT EXISTS idx_teams_members_gin ON projects.teams USING GIN(members); diff --git a/static/migrations/0006_add_llm_enhancement_tracking.sql b/static/migrations/0006_add_llm_enhancement_tracking.sql deleted file mode 100644 index e5d001ed0..000000000 --- a/static/migrations/0006_add_llm_enhancement_tracking.sql +++ /dev/null @@ -1,20 +0,0 @@ --- Add fields to track LLM enhancement status for issues -BEGIN; - -ALTER TABLE apis.issues -ADD COLUMN IF NOT EXISTS llm_enhanced_at TIMESTAMP WITH TIME ZONE, -ADD COLUMN IF NOT EXISTS llm_enhancement_version INTEGER DEFAULT 1; - --- Create an index on llm_enhanced_at to efficiently find unenhanced issues -CREATE INDEX IF NOT EXISTS idx_apis_issues_llm_enhanced_at -ON apis.issues(llm_enhanced_at) -WHERE llm_enhanced_at IS NULL; - --- Create an index to find recently created issues that need enhancement -CREATE INDEX IF NOT EXISTS idx_apis_issues_created_at_unenhanced -ON apis.issues(created_at) -WHERE llm_enhanced_at IS NULL - AND acknowledged_at IS NULL - AND archived_at IS NULL; - -COMMIT; \ No newline at end of file diff --git a/static/migrations/0020_github_sync.sql b/static/migrations/0006_github_sync.sql similarity index 75% rename from static/migrations/0020_github_sync.sql rename to static/migrations/0006_github_sync.sql index b3ed1cd6f..a865e6150 100644 --- a/static/migrations/0020_github_sync.sql +++ b/static/migrations/0006_github_sync.sql @@ -22,12 +22,3 @@ CREATE TABLE IF NOT EXISTS projects.github_sync ( CREATE INDEX IF NOT EXISTS idx_github_sync_project_id ON projects.github_sync(project_id); CREATE INDEX IF NOT EXISTS idx_github_sync_owner_repo ON projects.github_sync(owner, repo); CREATE INDEX IF NOT EXISTS idx_github_sync_installation ON projects.github_sync(installation_id) WHERE installation_id IS NOT NULL; - --- Add file path and content hash columns to dashboards --- file_path: canonical path like "folder/dashboard-title.yaml" --- file_sha: SHA256 hash of the YAML content for change detection -ALTER TABLE projects.dashboards - ADD COLUMN IF NOT EXISTS file_path TEXT, - ADD COLUMN IF NOT EXISTS file_sha TEXT; - -CREATE INDEX IF NOT EXISTS idx_dashboards_file_path ON projects.dashboards(project_id, file_path) WHERE file_path IS NOT NULL; diff --git a/static/migrations/0022_query_cache.sql b/static/migrations/0007_query_cache.sql similarity index 100% rename from static/migrations/0022_query_cache.sql rename to static/migrations/0007_query_cache.sql diff --git a/static/migrations/0008_use_non_defferable_contraint_on_issues.sql b/static/migrations/0008_use_non_defferable_contraint_on_issues.sql deleted file mode 100644 index 4ea7bca0f..000000000 --- a/static/migrations/0008_use_non_defferable_contraint_on_issues.sql +++ /dev/null @@ -1,80 +0,0 @@ -BEGIN; - --- Drop the old deferrable constraint -ALTER TABLE apis.issues -DROP CONSTRAINT unique_open_api_change_per_endpoint; - --- Add the new, non-deferrable constraint -ALTER TABLE apis.issues -ADD CONSTRAINT unique_open_api_change_per_endpoint -UNIQUE (project_id, issue_type, endpoint_hash); - - -CREATE OR REPLACE FUNCTION apis.new_anomaly_proc() RETURNS trigger AS $$ -DECLARE - anomaly_type apis.anomaly_type; - anomaly_action apis.anomaly_action; - should_record_anomaly BOOLEAN := true; - existing_job_id INT; - existing_target_hashes JSONB; -BEGIN - IF TG_WHEN <> 'AFTER' THEN - RAISE EXCEPTION 'apis.new_anomaly_proc() may only run as an AFTER trigger'; - END IF; - - anomaly_type := TG_ARGV[0]; - anomaly_action := TG_ARGV[1]; - - IF array_length(TG_ARGV, 1) >= 3 AND TG_ARGV[2] = 'skip_anomaly_record' THEN - should_record_anomaly := false; - END IF; - - IF should_record_anomaly THEN - INSERT INTO apis.anomalies ( - project_id, anomaly_type, action, target_hash - ) VALUES ( - NEW.project_id, anomaly_type, anomaly_action, NEW.hash - ) ON CONFLICT (project_id, target_hash) DO NOTHING; - END IF; - - -- Look for existing job - SELECT id, payload->'targetHashes' - INTO existing_job_id, existing_target_hashes - FROM background_jobs - WHERE payload->>'tag' = 'NewAnomaly' - AND payload->>'projectId' = NEW.project_id::TEXT - AND payload->>'anomalyType' = anomaly_type::TEXT - AND status = 'queued' - ORDER BY run_at ASC - LIMIT 1; - - IF existing_job_id IS NOT NULL THEN - UPDATE background_jobs SET payload = jsonb_build_object( - 'tag', 'NewAnomaly', - 'projectId', NEW.project_id, - 'createdAt', to_jsonb(NEW.created_at), - 'anomalyType', anomaly_type::TEXT, - 'anomalyAction', anomaly_action::TEXT, - 'targetHashes', existing_target_hashes || to_jsonb(NEW.hash) - ) WHERE id = existing_job_id; - ELSE - INSERT INTO background_jobs (run_at, status, payload) - VALUES ( - now(), - 'queued', - jsonb_build_object( - 'tag', 'NewAnomaly', - 'projectId', NEW.project_id, - 'createdAt', to_jsonb(NEW.created_at), - 'anomalyType', anomaly_type::TEXT, - 'anomalyAction', anomaly_action::TEXT, - 'targetHashes', jsonb_build_array(NEW.hash) - ) - ); - END IF; - - RETURN NULL; -END; -$$ LANGUAGE plpgsql; - -COMMIT; \ No newline at end of file diff --git a/static/migrations/0009_make_retention_policy_30.sql b/static/migrations/0009_make_retention_policy_30.sql deleted file mode 100644 index ae4320179..000000000 --- a/static/migrations/0009_make_retention_policy_30.sql +++ /dev/null @@ -1,9 +0,0 @@ -BEGIN; - --- Remove old policy -SELECT remove_retention_policy('otel_logs_and_spans'); - --- Add new 30-day policy -SELECT add_retention_policy('otel_logs_and_spans', INTERVAL '30 days', true); - -COMMIT; \ No newline at end of file diff --git a/static/migrations/0010_requbild_ers_view.sql b/static/migrations/0010_requbild_ers_view.sql deleted file mode 100644 index 9bc22dc1d..000000000 --- a/static/migrations/0010_requbild_ers_view.sql +++ /dev/null @@ -1,48 +0,0 @@ -BEGIN; - -DROP MATERIALIZED VIEW IF EXISTS apis.endpoint_request_stats; - -CREATE MATERIALIZED VIEW IF NOT EXISTS apis.endpoint_request_stats AS -WITH request_stats AS ( - SELECT - project_id, - attributes___url___path AS url_path, - attributes___http___request___method AS method, - hashes[1] AS endpoint_hash, - coalesce(attributes->'net'->'host'->>'name', '') AS host, - percentile_agg(duration) AS agg, - sum(duration) AS total_time, - count(*) AS total_requests, - sum(sum(duration)) OVER (PARTITION BY project_id) AS total_time_proj, - sum(count(*)) OVER (PARTITION BY project_id) AS total_requests_proj - FROM otel_logs_and_spans - WHERE name = 'monoscope.http' OR name = 'apitoolkit-http-span' - GROUP BY project_id, url_path, method, hashes[1], host -) -SELECT DISTINCT ON (enp.id) - enp.id AS endpoint_id, - enp.hash AS endpoint_hash, - rds.project_id, - rds.url_path, - rds.method, - rds.host, - coalesce(approx_percentile(0, agg) / 1e9, 0) AS min, - coalesce(approx_percentile(0.50, agg) / 1e9, 0) AS p50, - coalesce(approx_percentile(0.75, agg) / 1e9, 0) AS p75, - coalesce(approx_percentile(0.90, agg) / 1e9, 0) AS p90, - coalesce(approx_percentile(0.95, agg) / 1e9, 0) AS p95, - coalesce(approx_percentile(0.99, agg) / 1e9, 0) AS p99, - coalesce(approx_percentile(1, agg) / 1e9, 0) AS max, - CAST(total_time / 1e9 AS FLOAT8) AS total_time, - CAST(total_time_proj / 1e9 AS FLOAT8) AS total_time_proj, - CAST(total_requests AS INT), - CAST(total_requests_proj AS INT) -FROM apis.endpoints enp -JOIN request_stats rds - ON rds.project_id = enp.project_id::text - AND enp.hash = rds.endpoint_hash; - -CREATE INDEX IF NOT EXISTS idx_apis_endpoint_request_stats_project_id ON apis.endpoint_request_stats(project_id); -CREATE UNIQUE INDEX IF NOT EXISTS idx_apis_endpoint_request_stats_endpoint_id ON apis.endpoint_request_stats(endpoint_id); - -COMMIT; \ No newline at end of file diff --git a/static/migrations/0011_host_request_stats.sql b/static/migrations/0011_host_request_stats.sql deleted file mode 100644 index 6f2cd29ad..000000000 --- a/static/migrations/0011_host_request_stats.sql +++ /dev/null @@ -1,45 +0,0 @@ -BEGIN; - -DROP MATERIALIZED VIEW IF EXISTS apis.host_requests_stats; - -CREATE MATERIALIZED VIEW apis.host_requests_stats AS -WITH filtered_requests AS ( - SELECT attributes->'net'->'host'->>'name' AS host, - COUNT(*) AS events_count, - MAX(timestamp) AS last_seen, - MIN(timestamp) AS first_seen, - project_id - FROM otel_logs_and_spans - WHERE name IN ('monoscope.http', 'apitoolkit-http-span') - GROUP BY project_id, host -) -SELECT DISTINCT ON (ep.project_id, ep.host) - ep.project_id, - ep.host, - COALESCE(fr.events_count, 0) AS events_count, - fr.last_seen, - fr.first_seen, - ep.outgoing -FROM apis.endpoints ep -LEFT JOIN filtered_requests fr - ON ep.host = fr.host - AND ep.project_id::text = fr.project_id -WHERE ep.host != ''; - -CREATE UNIQUE INDEX idx_host_requests_stats_project_host - ON apis.host_requests_stats(project_id, host); - -CREATE OR REPLACE PROCEDURE apis.refresh_host_request_stats_every_5mins(job_id int, config jsonb) -LANGUAGE plpgsql -AS $$ -BEGIN - RAISE NOTICE 'Refreshing host_requests_stats (job_id %, config %)', job_id, config; - REFRESH MATERIALIZED VIEW CONCURRENTLY apis.host_requests_stats; -END; -$$; - --- Schedule it to run every 5 minutes -SELECT add_job('apis.refresh_host_request_stats_every_5mins', '5min'); - - -COMMIT; \ No newline at end of file diff --git a/static/migrations/0012_drop_materialized_views.sql b/static/migrations/0012_drop_materialized_views.sql deleted file mode 100644 index 4d664daa5..000000000 --- a/static/migrations/0012_drop_materialized_views.sql +++ /dev/null @@ -1,6 +0,0 @@ -BEGIN; - -DROP MATERIALIZED VIEW IF EXISTS apis.host_requests_stats; -DROP MATERIALIZED VIEW IF EXISTS apis.endpoint_request_stats; - -COMMIT; \ No newline at end of file diff --git a/static/migrations/0013_log_patterns.sql b/static/migrations/0013_log_patterns.sql deleted file mode 100644 index 8c366f8bc..000000000 --- a/static/migrations/0013_log_patterns.sql +++ /dev/null @@ -1,6 +0,0 @@ -BEGIN; - -ALTER TABLE otel_logs_and_spans -ADD COLUMN IF NOT EXISTS log_pattern TEXT DEFAULT NULL; - -COMMIT; \ No newline at end of file diff --git a/static/migrations/0014_alerts_config.sql b/static/migrations/0014_alerts_config.sql deleted file mode 100644 index 9dbb7f56c..000000000 --- a/static/migrations/0014_alerts_config.sql +++ /dev/null @@ -1,4 +0,0 @@ -BEGIN; -ALTER TABLE projects.projects ADD COLUMN IF NOT EXISTS endpoint_alerts BOOL NOT NULL DEFAULT TRUE; -ALTER TABLE projects.projects ADD COLUMN IF NOT EXISTS error_alerts BOOL NOT NULL DEFAULT TRUE; -COMMIT; \ No newline at end of file diff --git a/static/migrations/0015_summary_pattern.sql b/static/migrations/0015_summary_pattern.sql deleted file mode 100644 index a9901c993..000000000 --- a/static/migrations/0015_summary_pattern.sql +++ /dev/null @@ -1,4 +0,0 @@ -BEGIN; -ALTER TABLE otel_logs_and_spans -ADD COLUMN IF NOT EXISTS summary_pattern TEXT DEFAULT NULL; -COMMIT; \ No newline at end of file diff --git a/static/migrations/0016_drop_overly_restrictive_issues_constraint.sql b/static/migrations/0016_drop_overly_restrictive_issues_constraint.sql deleted file mode 100644 index 184f516c3..000000000 --- a/static/migrations/0016_drop_overly_restrictive_issues_constraint.sql +++ /dev/null @@ -1,17 +0,0 @@ --- Drop overly restrictive table-level constraint on issues --- The partial unique index unique_open_api_change_issue_per_endpoint already --- enforces the correct business logic: only one open api_change issue per endpoint -BEGIN; - --- Drop the table-level constraint that prevents ANY duplicate (project_id, issue_type, endpoint_hash) --- This was too restrictive - it prevented: --- 1. Multiple query_alert issues for the same project --- 2. Creating a new api_change issue after acknowledging an old one -ALTER TABLE apis.issues -DROP CONSTRAINT IF EXISTS unique_open_api_change_per_endpoint; - --- The partial unique index unique_open_api_change_issue_per_endpoint (created in migration 0007) --- already enforces the real business requirement: --- Only one OPEN (unacknowledged, unarchived) api_change issue per (project_id, endpoint_hash) - -COMMIT; diff --git a/static/migrations/0016_reports_start_and_end_time.sql b/static/migrations/0016_reports_start_and_end_time.sql deleted file mode 100644 index 632d585b4..000000000 --- a/static/migrations/0016_reports_start_and_end_time.sql +++ /dev/null @@ -1,4 +0,0 @@ -BEGIN; -ALTER TABLE apis.reports ADD COLUMN IF NOT EXISTS start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp; -ALTER TABLE apis.reports ADD COLUMN IF NOT EXISTS end_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp; -COMMIT; \ No newline at end of file diff --git a/static/migrations/0018_store_monitor_runs_as_events.sql b/static/migrations/0018_store_monitor_runs_as_events.sql deleted file mode 100644 index 3db177522..000000000 --- a/static/migrations/0018_store_monitor_runs_as_events.sql +++ /dev/null @@ -1,93 +0,0 @@ -CREATE OR REPLACE PROCEDURE monitors.check_triggered_query_monitors(job_id int, config jsonb) -LANGUAGE PLPGSQL AS $$ -DECLARE - monitor_rec RECORD; - value NUMERIC; - start TIMESTAMP; - finish TIMESTAMP; - status TEXT; - duration_ns BIGINT; -BEGIN - FOR monitor_rec IN - SELECT * - FROM monitors.query_monitors - WHERE deactivated_at IS NULL - AND log_query_as_sql IS NOT NULL - AND log_query_as_sql != '' - LOOP - BEGIN - -- Compute actual value from the SQL - RAISE NOTICE 'Executing SQL: %', monitor_rec.log_query_as_sql; - - start := clock_timestamp(); - EXECUTE monitor_rec.log_query_as_sql INTO value; - finish := clock_timestamp(); - RAISE NOTICE 'value SQL: %', value; - -- Calculate duration in nanoseconds - duration_ns := EXTRACT(EPOCH FROM (finish - start)) * 1000000000; - - -- Check if monitor was triggered - IF (NOT monitor_rec.trigger_less_than AND - ((monitor_rec.warning_threshold IS NOT NULL AND monitor_rec.warning_threshold <= value) - OR monitor_rec.alert_threshold <= value)) - OR - (monitor_rec.trigger_less_than AND - ((monitor_rec.warning_threshold IS NOT NULL AND monitor_rec.warning_threshold >= value) - OR monitor_rec.alert_threshold >= value)) - THEN - -- Determine status - IF (NOT monitor_rec.trigger_less_than AND monitor_rec.alert_threshold <= value) - OR (monitor_rec.trigger_less_than AND monitor_rec.alert_threshold >= value) - THEN - status := 'Alerting'; - ELSIF monitor_rec.warning_threshold IS NOT NULL AND - ((NOT monitor_rec.trigger_less_than AND monitor_rec.warning_threshold <= value) - OR (monitor_rec.trigger_less_than AND monitor_rec.warning_threshold >= value)) - THEN - status := 'Warning'; - ELSE - status := 'Normal'; - END IF; - - -- Insert into otel_logs_and_spans as an alert - INSERT INTO otel_logs_and_spans ( - project_id, - kind, - timestamp, - name, - duration, - summary, - status_message, - context___trace_id, - body - ) - VALUES ( - monitor_rec.project_id, - 'alert', - start, - COALESCE(monitor_rec.alert_config->>'title', 'Untitled Monitor'), - duration_ns, -- Duration in nanoseconds - ARRAY['Query monitor triggered', COALESCE(monitor_rec.alert_config->>'title', 'Untitled Monitor')]::TEXT[], - status, - monitor_rec.id::text, - jsonb_build_object('value', value, 'monitor_id', monitor_rec.id) - ); - - -- Also queue the job - INSERT INTO background_jobs (run_at, status, payload) - VALUES ( - NOW(), - 'queued', - jsonb_build_object( - 'tag', 'QueryMonitorsTriggered', - 'contents', ARRAY[monitor_rec.id] - ) - ); - END IF; - - EXCEPTION WHEN OTHERS THEN - RAISE NOTICE 'Error processing monitor %: %', monitor_rec.id, SQLERRM; - END; - END LOOP; -END; -$$; \ No newline at end of file diff --git a/static/migrations/0019_add_trace_id.sql b/static/migrations/0019_add_trace_id.sql deleted file mode 100644 index 647284fe2..000000000 --- a/static/migrations/0019_add_trace_id.sql +++ /dev/null @@ -1,20 +0,0 @@ -BEGIN; - -ALTER TABLE apis.issues -ADD COLUMN first_trace_id TEXT, -ADD COLUMN recent_trace_id TEXT; - -ALTER TABLE apis.errors -ADD COLUMN first_trace_id TEXT, -ADD COLUMN recent_trace_id TEXT; - --- remove first_trace_id and recent_trace_id from apis.issues - -ALTER TABLE apis.issues -DROP COLUMN IF EXISTS first_trace_id, -DROP COLUMN IF EXISTS recent_trace_id; - -CREATE INDEX idx_errors_trace_ids -ON apis.errors (recent_trace_id, first_trace_id); - -COMMIT; \ No newline at end of file diff --git a/static/migrations/0021_move_monitors_to_haskell.sql b/static/migrations/0021_move_monitors_to_haskell.sql deleted file mode 100644 index 3cd1fe69d..000000000 --- a/static/migrations/0021_move_monitors_to_haskell.sql +++ /dev/null @@ -1,11 +0,0 @@ -CREATE OR REPLACE PROCEDURE monitors.check_triggered_query_monitors(job_id int, config jsonb) -LANGUAGE PLPGSQL AS $$ -BEGIN - INSERT INTO background_jobs (run_at, status, payload) - VALUES (NOW(), 'queued', jsonb_build_object('tag', 'QueryMonitorsCheck')); - RAISE NOTICE 'Background job queued for job_id: %', job_id; -EXCEPTION - WHEN OTHERS THEN - RAISE EXCEPTION 'Failed to queue background job: %', SQLERRM; -END; -$$; \ No newline at end of file