diff --git a/CHANGELOG.md b/CHANGELOG.md index e761d4c..41a8e04 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,51 @@ # Changelog +## Unreleased + +**Phase 1: JSON Contracts Foundation** + +### Breaking Changes + +- JSON output now nests command-specific data under a `data` field instead of flattening at the top level +- Added new envelope fields: `tool_version`, `generated_at`, `severity` +- Consumers must update to read from `response.data` instead of directly from `response` + +### New Commands + +- **`pgcrate context`**: Connection context, server info, extensions, and privileges +- **`pgcrate capabilities`**: Permission-aware feature discovery + +### JSON Envelope v2.0.0 + +All diagnostic commands now use a consistent envelope: + +```json +{ + "ok": true, + "schema_id": "pgcrate.diagnostics.triage", + "schema_version": "2.0.0", + "tool_version": "0.3.0", + "generated_at": "2026-01-19T12:00:00Z", + "severity": "healthy", + "warnings": [], + "errors": [], + "data": { ... } +} +``` + +### Reason Codes + +- Added 27 stable reason codes across 3 categories (operational, policy, capability) +- Skipped checks now use `reason_code` from the ReasonCode enum for automation +- Error responses use the same envelope structure as success responses + +### Improvements + +- `data_directory` in context output now gated behind `--no-redact` flag +- SQLSTATE 57014 correctly disambiguates `statement_timeout` vs `query_cancelled` +- `indexes` command returns `severity: warning` when findings exist (was always `healthy`) +- JSON schemas use `$ref` composition with `envelope.schema.json` for consistency + ## v0.3.0 Production-safe diagnostic commands with safety rails. diff --git a/README.md b/README.md index 32e299a..4499702 100644 --- a/README.md +++ b/README.md @@ -129,6 +129,26 @@ pgcrate extension list # Installed extensions pgcrate extension list --available # Extensions available to install ``` +### Diagnostics + +Agent-friendly health checks with JSON output for automation: + +```bash +pgcrate triage # Quick health overview (locks, xid, sequences) +pgcrate triage --json # Machine-readable JSON output +pgcrate context --json # Connection context, server info, privileges +pgcrate capabilities --json # What can this connection do? +pgcrate locks # Blocking locks and long transactions +pgcrate xid # Transaction ID wraparound analysis +pgcrate sequences # Sequence exhaustion check +pgcrate indexes # Missing, unused, duplicate indexes +``` + +All diagnostic commands support timeout flags for production safety: +- `--connect-timeout ` - Connection timeout (default: 5000ms) +- `--statement-timeout ` - Query timeout (default: 30000ms) +- `--lock-timeout ` - Lock wait timeout (default: 500ms) + ### Data Operations ```bash @@ -145,17 +165,31 @@ pgcrate doctor # Health checks for CI ### CI/CD Integration -Query commands support `--json` for machine-readable output: +Commands support `--json` for machine-readable output with versioned schemas: ```bash pgcrate migrate status --json # JSON migration status -pgcrate doctor --json # JSON health report +pgcrate triage --json # JSON health check with severity +pgcrate context --json # JSON connection/server info +pgcrate capabilities --json # JSON capability discovery pgcrate diff --from $PROD --to $DEV --json # JSON diff pgcrate snapshot list --json # JSON snapshot list -pgcrate snapshot info dev --json # JSON snapshot details ``` -Exit codes: `0` = success, `1` = action needed (e.g., pending migrations), `2` = error. +JSON output uses a consistent envelope: +```json +{ + "ok": true, + "schema_id": "pgcrate.diagnostics.triage", + "schema_version": "2.0.0", + "tool_version": "0.3.0", + "generated_at": "2026-01-19T12:00:00Z", + "severity": "warning", + "data": { ... } +} +``` + +Exit codes for diagnostics: `0` = healthy, `1` = warning, `2` = critical, `10+` = operational failure. ## Why pgcrate? @@ -231,6 +265,13 @@ DROP TABLE users; | `pgcrate sql` | Run ad-hoc SQL (alias: `query`) | | `pgcrate seed ` | List, run, validate, or diff seed data | | `pgcrate model ` | Run, compile, test, lint, graph, new, or show models | +| `pgcrate triage` | Quick health check (locks, xid, sequences) | +| `pgcrate context` | Connection context, server info, privileges | +| `pgcrate capabilities` | What can this connection do? | +| `pgcrate locks` | Blocking locks and long transactions | +| `pgcrate xid` | Transaction ID wraparound analysis | +| `pgcrate sequences` | Sequence exhaustion check | +| `pgcrate indexes` | Missing, unused, duplicate indexes | | `pgcrate doctor` | Run health checks | | `pgcrate bootstrap` | Setup environment with anonymized data from source | | `pgcrate snapshot ` | Save (with profiles), restore, list, or delete snapshots | diff --git a/schemas/action.schema.json b/schemas/action.schema.json new file mode 100644 index 0000000..1bf7e0a --- /dev/null +++ b/schemas/action.schema.json @@ -0,0 +1,145 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "pgcrate.diagnostics.action", + "title": "pgcrate action schema", + "description": "Schema for structured actions in diagnostic outputs (used with --include-fixes)", + "type": "object", + "additionalProperties": false, + "required": [ + "action_id", + "action_type", + "command", + "args", + "command_string", + "description", + "available", + "mutates", + "risk", + "gates" + ], + "properties": { + "action_id": { + "type": "string", + "pattern": "^[a-z0-9-]+", + "description": "Unique action identifier" + }, + "action_type": { + "$ref": "#/$defs/actionType" + }, + "command": { + "type": "string", + "description": "Command to run (e.g., pgcrate)" + }, + "args": { + "type": "array", + "items": { "type": "string" }, + "description": "Command arguments" + }, + "command_string": { + "type": "string", + "description": "Full command string for display" + }, + "description": { + "type": "string", + "description": "Human-readable description of what this action does" + }, + "requires_capability_id": { + "type": "string", + "description": "Which capability is required" + }, + "available": { + "type": "boolean", + "description": "Whether this action is available in the current context" + }, + "unavailable_reason": { + "type": "string", + "description": "Reason why action is unavailable" + }, + "mutates": { + "type": "boolean", + "description": "Whether this action mutates data" + }, + "sql_preview": { + "type": "array", + "items": { "type": "string" }, + "description": "SQL statements that would be executed" + }, + "lock_risk": { + "$ref": "#/$defs/lockRisk" + }, + "risk": { + "type": "string", + "enum": ["none", "low", "medium", "high", "extreme"], + "description": "Overall risk level" + }, + "evidence": { + "type": "object", + "description": "Evidence that led to this recommendation" + }, + "gates": { + "$ref": "#/$defs/actionGates" + }, + "verify": { + "type": "array", + "items": { "$ref": "#/$defs/verifyStep" }, + "description": "Steps to verify success after execution" + } + }, + "$defs": { + "actionType": { + "type": "string", + "enum": ["investigate", "fix", "monitor"], + "description": "Type of action" + }, + "lockRisk": { + "type": "string", + "enum": ["none", "low", "medium", "high", "extreme"], + "description": "Risk level for lock acquisition" + }, + "actionGates": { + "type": "object", + "additionalProperties": false, + "properties": { + "requires_write": { + "type": "boolean", + "description": "Requires --read-write mode" + }, + "requires_primary": { + "type": "boolean", + "description": "Requires --primary flag" + }, + "requires_confirmation": { + "type": "boolean", + "description": "Requires --yes flag for confirmation" + }, + "requires_capability": { + "type": "string", + "description": "Required capability ID" + }, + "min_pg_version": { + "type": "integer", + "description": "Minimum PostgreSQL version required" + } + } + }, + "verifyStep": { + "type": "object", + "additionalProperties": false, + "required": ["description", "command", "expected"], + "properties": { + "description": { + "type": "string", + "description": "Human-readable description of what to verify" + }, + "command": { + "type": "string", + "description": "Command to run for verification" + }, + "expected": { + "type": "string", + "description": "Expected outcome" + } + } + } + } +} diff --git a/schemas/capabilities.schema.json b/schemas/capabilities.schema.json new file mode 100644 index 0000000..80bba2f --- /dev/null +++ b/schemas/capabilities.schema.json @@ -0,0 +1,111 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "pgcrate.diagnostics.capabilities", + "title": "pgcrate capabilities output", + "description": "Available capabilities based on privileges and connection mode", + "allOf": [ + { "$ref": "envelope.schema.json" } + ], + "properties": { + "schema_id": { "const": "pgcrate.diagnostics.capabilities" }, + "data": { "$ref": "#/$defs/capabilitiesData" } + }, + "$defs": { + "capabilitiesData": { + "type": "object", + "additionalProperties": false, + "required": ["capabilities", "summary"], + "properties": { + "capabilities": { + "type": "array", + "items": { "$ref": "#/$defs/capabilityInfo" }, + "description": "List of all capabilities and their status" + }, + "summary": { "$ref": "#/$defs/capabilitySummary" } + } + }, + "capabilityInfo": { + "type": "object", + "additionalProperties": false, + "required": ["id", "name", "description", "status", "requirements"], + "properties": { + "id": { + "type": "string", + "pattern": "^[a-z]+\\.[a-z_]+$", + "description": "Capability identifier (e.g., diagnostics.triage)" + }, + "name": { + "type": "string", + "description": "Human-readable name" + }, + "description": { + "type": "string", + "description": "Brief description of the capability" + }, + "status": { "$ref": "#/$defs/capabilityStatus" }, + "reasons": { + "type": "array", + "items": { "$ref": "envelope.schema.json#/$defs/reasonInfo" }, + "description": "Reasons for degraded/unavailable status" + }, + "requirements": { + "type": "array", + "items": { "$ref": "#/$defs/requirement" }, + "description": "Requirements and their status" + }, + "limitations": { + "type": "array", + "items": { "type": "string" }, + "description": "Limitations when degraded" + } + } + }, + "capabilityStatus": { + "type": "string", + "enum": ["available", "degraded", "unavailable", "unknown"], + "description": "Availability status of the capability" + }, + "requirement": { + "type": "object", + "additionalProperties": false, + "required": ["what", "met"], + "properties": { + "what": { + "type": "string", + "description": "What is required" + }, + "met": { + "type": "boolean", + "description": "Whether the requirement is met" + } + } + }, + "capabilitySummary": { + "type": "object", + "additionalProperties": false, + "required": ["available", "degraded", "unavailable", "unknown"], + "properties": { + "available": { + "type": "integer", + "minimum": 0, + "description": "Count of available capabilities" + }, + "degraded": { + "type": "integer", + "minimum": 0, + "description": "Count of degraded capabilities" + }, + "unavailable": { + "type": "integer", + "minimum": 0, + "description": "Count of unavailable capabilities" + }, + "unknown": { + "type": "integer", + "minimum": 0, + "description": "Count of capabilities with unknown status" + } + } + } + } +} diff --git a/schemas/context.schema.json b/schemas/context.schema.json new file mode 100644 index 0000000..93cb13d --- /dev/null +++ b/schemas/context.schema.json @@ -0,0 +1,125 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "pgcrate.diagnostics.context", + "title": "pgcrate context output", + "description": "Connection context, server information, extensions, and privileges", + "allOf": [ + { "$ref": "envelope.schema.json" } + ], + "properties": { + "schema_id": { "const": "pgcrate.diagnostics.context" }, + "data": { "$ref": "#/$defs/contextData" } + }, + "$defs": { + "contextData": { + "type": "object", + "additionalProperties": false, + "required": ["target", "server", "extensions", "privileges"], + "properties": { + "target": { "$ref": "#/$defs/targetInfo" }, + "server": { "$ref": "#/$defs/serverInfo" }, + "extensions": { + "type": "object", + "additionalProperties": { "type": "string" }, + "description": "Installed extensions (name -> version)" + }, + "privileges": { "$ref": "#/$defs/privilegeInfo" } + } + }, + "targetInfo": { + "type": "object", + "additionalProperties": false, + "required": ["host", "port", "database", "user", "readonly"], + "properties": { + "host": { + "type": "string", + "description": "Host (redacted by default)" + }, + "port": { + "type": "integer", + "minimum": 1, + "maximum": 65535, + "description": "Port number" + }, + "database": { + "type": "string", + "description": "Database name" + }, + "user": { + "type": "string", + "description": "Connected user" + }, + "readonly": { + "type": "boolean", + "description": "Whether connection is read-only" + } + } + }, + "serverInfo": { + "type": "object", + "additionalProperties": false, + "required": ["version", "version_num", "version_major", "in_recovery"], + "properties": { + "version": { + "type": "string", + "description": "Full version string" + }, + "version_num": { + "type": "integer", + "description": "Numeric version (e.g., 160001)" + }, + "version_major": { + "type": "integer", + "description": "Major version (e.g., 16)" + }, + "in_recovery": { + "type": "boolean", + "description": "Whether server is a replica" + }, + "data_directory": { + "type": "string", + "description": "Data directory path (only with --no-redact)" + } + } + }, + "privilegeInfo": { + "type": "object", + "additionalProperties": false, + "required": [ + "pg_stat_activity_select", + "pg_cancel_backend_execute", + "pg_terminate_backend_execute", + "pg_stat_statements_select", + "is_superuser", + "roles" + ], + "properties": { + "pg_stat_activity_select": { + "type": "boolean", + "description": "Can read pg_stat_activity" + }, + "pg_cancel_backend_execute": { + "type": "boolean", + "description": "Can call pg_cancel_backend" + }, + "pg_terminate_backend_execute": { + "type": "boolean", + "description": "Can call pg_terminate_backend" + }, + "pg_stat_statements_select": { + "type": "boolean", + "description": "Can read pg_stat_statements" + }, + "is_superuser": { + "type": "boolean", + "description": "Whether user is superuser" + }, + "roles": { + "type": "array", + "items": { "type": "string" }, + "description": "Granted roles" + } + } + } + } +} diff --git a/schemas/envelope.schema.json b/schemas/envelope.schema.json new file mode 100644 index 0000000..6ca74ef --- /dev/null +++ b/schemas/envelope.schema.json @@ -0,0 +1,139 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "pgcrate.diagnostics.envelope", + "title": "pgcrate diagnostic envelope", + "description": "Common envelope structure for all diagnostic command outputs (v2.0.0)", + "type": "object", + "additionalProperties": true, + "required": ["ok", "schema_id", "schema_version", "tool_version", "generated_at", "severity", "data"], + "properties": { + "ok": { + "type": "boolean", + "description": "True if command succeeded, false if errors occurred" + }, + "schema_id": { + "type": "string", + "pattern": "^pgcrate\\..+$", + "description": "Identifier for the schema of this output" + }, + "schema_version": { + "type": "string", + "pattern": "^[0-9]+\\.[0-9]+\\.[0-9]+$", + "description": "Semver version of the schema" + }, + "tool_version": { + "type": "string", + "pattern": "^[0-9]+\\.[0-9]+\\.[0-9]+", + "description": "Version of pgcrate that generated this output" + }, + "generated_at": { + "type": "string", + "format": "date-time", + "description": "ISO 8601 timestamp when this output was generated" + }, + "severity": { + "$ref": "#/$defs/severity", + "description": "Overall severity of findings" + }, + "partial": { + "type": "boolean", + "description": "True if some checks were skipped (output is incomplete)" + }, + "warnings": { + "type": "array", + "items": { "$ref": "#/$defs/reasonInfo" }, + "description": "Non-fatal issues encountered during execution" + }, + "errors": { + "type": "array", + "items": { "$ref": "#/$defs/reasonInfo" }, + "description": "Fatal issues that prevented full execution" + }, + "timeouts": { + "$ref": "#/$defs/timeouts", + "description": "Effective timeout configuration used" + }, + "data": { + "type": "object", + "description": "Command-specific data payload" + } + }, + "$defs": { + "severity": { + "type": "string", + "enum": ["healthy", "warning", "critical", "error"], + "description": "Overall severity: healthy (no issues), warning (attention needed), critical (urgent), error (execution failed)" + }, + "reasonCode": { + "type": "string", + "enum": [ + "connection_timeout", + "statement_timeout", + "lock_timeout", + "connection_failed", + "query_cancelled", + "server_shutdown", + "too_many_connections", + "out_of_memory", + "disk_full", + "internal_error", + "primary_requires_ack", + "requires_read_write", + "dangerous_operation", + "replica_not_allowed", + "primary_not_allowed", + "feature_disabled", + "missing_extension", + "missing_privilege", + "missing_role", + "missing_table", + "missing_schema", + "missing_function", + "unsupported_version", + "not_applicable", + "missing_config", + "requires_superuser", + "requires_replication" + ], + "description": "Stable reason code for automation" + }, + "reasonInfo": { + "type": "object", + "additionalProperties": false, + "required": ["code", "message"], + "properties": { + "code": { "$ref": "#/$defs/reasonCode" }, + "message": { + "type": "string", + "description": "Human-readable message" + }, + "details": { + "type": "object", + "description": "Optional structured details" + } + } + }, + "timeouts": { + "type": "object", + "additionalProperties": false, + "required": ["connect_ms", "statement_ms", "lock_ms"], + "properties": { + "connect_ms": { + "type": "integer", + "minimum": 0, + "description": "Connection timeout in milliseconds" + }, + "statement_ms": { + "type": "integer", + "minimum": 0, + "description": "Statement timeout in milliseconds" + }, + "lock_ms": { + "type": "integer", + "minimum": 0, + "description": "Lock timeout in milliseconds" + } + } + } + } +} diff --git a/schemas/indexes.schema.json b/schemas/indexes.schema.json index 1e0a733..4856934 100644 --- a/schemas/indexes.schema.json +++ b/schemas/indexes.schema.json @@ -3,40 +3,37 @@ "$id": "pgcrate.diagnostics.indexes", "title": "pgcrate indexes output", "description": "Index health analysis: missing, unused, and duplicate indexes", - "type": "object", - "additionalProperties": false, - "required": ["ok", "schema_id", "schema_version", "missing", "unused", "duplicates", "total_unused_bytes", "total_unused_size", "total_duplicate_bytes", "total_duplicate_size"], + "allOf": [ + { "$ref": "envelope.schema.json" } + ], "properties": { - "ok": { - "type": "boolean", - "const": true - }, - "schema_id": { - "type": "string", - "const": "pgcrate.diagnostics.indexes" - }, - "schema_version": { - "type": "string", - "pattern": "^[0-9]+\\.[0-9]+\\.[0-9]+$" - }, - "missing": { - "type": "array", - "items": { "$ref": "#/$defs/missingIndexCandidate" } - }, - "unused": { - "type": "array", - "items": { "$ref": "#/$defs/unusedIndex" } - }, - "duplicates": { - "type": "array", - "items": { "$ref": "#/$defs/duplicateIndexSet" } - }, - "total_unused_bytes": { "type": "integer" }, - "total_unused_size": { "type": "string" }, - "total_duplicate_bytes": { "type": "integer" }, - "total_duplicate_size": { "type": "string" } + "schema_id": { "const": "pgcrate.diagnostics.indexes" }, + "data": { "$ref": "#/$defs/indexesData" } }, "$defs": { + "indexesData": { + "type": "object", + "additionalProperties": false, + "required": ["missing", "unused", "duplicates", "total_unused_bytes", "total_unused_size", "total_duplicate_bytes", "total_duplicate_size"], + "properties": { + "missing": { + "type": "array", + "items": { "$ref": "#/$defs/missingIndexCandidate" } + }, + "unused": { + "type": "array", + "items": { "$ref": "#/$defs/unusedIndex" } + }, + "duplicates": { + "type": "array", + "items": { "$ref": "#/$defs/duplicateIndexSet" } + }, + "total_unused_bytes": { "type": "integer" }, + "total_unused_size": { "type": "string" }, + "total_duplicate_bytes": { "type": "integer" }, + "total_duplicate_size": { "type": "string" } + } + }, "missingIndexCandidate": { "type": "object", "additionalProperties": false, diff --git a/schemas/locks.schema.json b/schemas/locks.schema.json index aad904b..5d8b0e0 100644 --- a/schemas/locks.schema.json +++ b/schemas/locks.schema.json @@ -3,36 +3,33 @@ "$id": "pgcrate.diagnostics.locks", "title": "pgcrate locks output", "description": "Blocking locks and long-running transaction analysis", - "type": "object", - "additionalProperties": false, - "required": ["ok", "schema_id", "schema_version", "blocking_chains", "long_transactions", "idle_in_transaction"], + "allOf": [ + { "$ref": "envelope.schema.json" } + ], "properties": { - "ok": { - "type": "boolean", - "const": true - }, - "schema_id": { - "type": "string", - "const": "pgcrate.diagnostics.locks" - }, - "schema_version": { - "type": "string", - "pattern": "^[0-9]+\\.[0-9]+\\.[0-9]+$" - }, - "blocking_chains": { - "type": "array", - "items": { "$ref": "#/$defs/blockingChain" } - }, - "long_transactions": { - "type": "array", - "items": { "$ref": "#/$defs/lockProcess" } - }, - "idle_in_transaction": { - "type": "array", - "items": { "$ref": "#/$defs/lockProcess" } - } + "schema_id": { "const": "pgcrate.diagnostics.locks" }, + "data": { "$ref": "#/$defs/locksData" } }, "$defs": { + "locksData": { + "type": "object", + "additionalProperties": false, + "required": ["blocking_chains", "long_transactions", "idle_in_transaction"], + "properties": { + "blocking_chains": { + "type": "array", + "items": { "$ref": "#/$defs/blockingChain" } + }, + "long_transactions": { + "type": "array", + "items": { "$ref": "#/$defs/lockProcess" } + }, + "idle_in_transaction": { + "type": "array", + "items": { "$ref": "#/$defs/lockProcess" } + } + } + }, "lockProcess": { "type": "object", "additionalProperties": false, diff --git a/schemas/sequences.schema.json b/schemas/sequences.schema.json index 2823000..c0d2fd8 100644 --- a/schemas/sequences.schema.json +++ b/schemas/sequences.schema.json @@ -3,31 +3,28 @@ "$id": "pgcrate.diagnostics.sequences", "title": "pgcrate sequences output", "description": "Sequence exhaustion analysis", - "type": "object", - "additionalProperties": false, - "required": ["ok", "schema_id", "schema_version", "sequences", "overall_status", "warning_threshold", "critical_threshold"], + "allOf": [ + { "$ref": "envelope.schema.json" } + ], "properties": { - "ok": { - "type": "boolean", - "const": true - }, - "schema_id": { - "type": "string", - "const": "pgcrate.diagnostics.sequences" - }, - "schema_version": { - "type": "string", - "pattern": "^[0-9]+\\.[0-9]+\\.[0-9]+$" - }, - "overall_status": { "$ref": "#/$defs/seqStatus" }, - "warning_threshold": { "type": "integer" }, - "critical_threshold": { "type": "integer" }, - "sequences": { - "type": "array", - "items": { "$ref": "#/$defs/sequenceInfo" } - } + "schema_id": { "const": "pgcrate.diagnostics.sequences" }, + "data": { "$ref": "#/$defs/sequencesData" } }, "$defs": { + "sequencesData": { + "type": "object", + "additionalProperties": false, + "required": ["sequences", "overall_status", "warning_threshold", "critical_threshold"], + "properties": { + "overall_status": { "$ref": "#/$defs/seqStatus" }, + "warning_threshold": { "type": "integer" }, + "critical_threshold": { "type": "integer" }, + "sequences": { + "type": "array", + "items": { "$ref": "#/$defs/sequenceInfo" } + } + } + }, "seqStatus": { "type": "string", "enum": ["healthy", "warning", "critical"] diff --git a/schemas/triage.schema.json b/schemas/triage.schema.json index 3a7275a..d86220d 100644 --- a/schemas/triage.schema.json +++ b/schemas/triage.schema.json @@ -3,57 +3,39 @@ "$id": "pgcrate.diagnostics.triage", "title": "pgcrate triage output", "description": "Quick health check showing critical issues first, with graceful degradation", - "type": "object", - "additionalProperties": false, - "required": ["ok", "schema_id", "schema_version", "checks", "skipped_checks", "overall_status"], + "allOf": [ + { "$ref": "envelope.schema.json" } + ], "properties": { - "ok": { - "type": "boolean", - "const": true, - "description": "Always true for successful output" - }, - "schema_id": { - "type": "string", - "const": "pgcrate.diagnostics.triage" - }, - "schema_version": { - "type": "string", - "pattern": "^[0-9]+\\.[0-9]+\\.[0-9]+$", - "description": "Semver version of this schema" - }, - "overall_status": { - "$ref": "#/$defs/checkStatus", - "description": "Worst status across all checks" - }, - "checks": { - "type": "array", - "items": { "$ref": "#/$defs/checkResult" }, - "description": "Individual health checks, sorted by severity (critical first)" - }, - "skipped_checks": { - "type": "array", - "items": { "$ref": "#/$defs/skippedCheck" }, - "description": "Checks that could not run due to permissions, timeouts, etc." - } + "schema_id": { "const": "pgcrate.diagnostics.triage" }, + "data": { "$ref": "#/$defs/triageData" } }, "$defs": { + "triageData": { + "type": "object", + "additionalProperties": false, + "required": ["checks", "skipped_checks", "overall_status"], + "properties": { + "overall_status": { + "$ref": "#/$defs/checkStatus", + "description": "Worst status across all checks" + }, + "checks": { + "type": "array", + "items": { "$ref": "#/$defs/checkResult" }, + "description": "Individual health checks, sorted by severity (critical first)" + }, + "skipped_checks": { + "type": "array", + "items": { "$ref": "#/$defs/skippedCheck" }, + "description": "Checks that could not run due to permissions, timeouts, etc." + } + } + }, "checkStatus": { "type": "string", "enum": ["healthy", "warning", "critical"] }, - "skipReason": { - "type": "string", - "enum": [ - "insufficient_privilege", - "missing_extension", - "unsupported_postgres_version", - "statement_timeout", - "lock_timeout", - "not_applicable", - "internal_error" - ], - "description": "Why a check was skipped (stable enum for automation)" - }, "checkResult": { "type": "object", "additionalProperties": false, @@ -92,7 +74,7 @@ "type": "string", "description": "Identifier of the skipped check" }, - "reason_code": { "$ref": "#/$defs/skipReason" }, + "reason_code": { "$ref": "envelope.schema.json#/$defs/reasonCode" }, "reason_human": { "type": "string", "description": "Human-readable explanation of why the check was skipped" diff --git a/schemas/xid.schema.json b/schemas/xid.schema.json index 9d317a9..503c312 100644 --- a/schemas/xid.schema.json +++ b/schemas/xid.schema.json @@ -3,37 +3,34 @@ "$id": "pgcrate.diagnostics.xid", "title": "pgcrate xid output", "description": "Transaction ID wraparound analysis", - "type": "object", - "additionalProperties": false, - "required": ["ok", "schema_id", "schema_version", "databases", "tables", "vacuum_progress", "overall_status"], + "allOf": [ + { "$ref": "envelope.schema.json" } + ], "properties": { - "ok": { - "type": "boolean", - "const": true - }, - "schema_id": { - "type": "string", - "const": "pgcrate.diagnostics.xid" - }, - "schema_version": { - "type": "string", - "pattern": "^[0-9]+\\.[0-9]+\\.[0-9]+$" - }, - "overall_status": { "$ref": "#/$defs/xidStatus" }, - "databases": { - "type": "array", - "items": { "$ref": "#/$defs/databaseXid" } - }, - "tables": { - "type": "array", - "items": { "$ref": "#/$defs/tableXid" } - }, - "vacuum_progress": { - "type": "array", - "items": { "$ref": "#/$defs/vacuumProgress" } - } + "schema_id": { "const": "pgcrate.diagnostics.xid" }, + "data": { "$ref": "#/$defs/xidData" } }, "$defs": { + "xidData": { + "type": "object", + "additionalProperties": false, + "required": ["databases", "tables", "vacuum_progress", "overall_status"], + "properties": { + "overall_status": { "$ref": "#/$defs/xidStatus" }, + "databases": { + "type": "array", + "items": { "$ref": "#/$defs/databaseXid" } + }, + "tables": { + "type": "array", + "items": { "$ref": "#/$defs/tableXid" } + }, + "vacuum_progress": { + "type": "array", + "items": { "$ref": "#/$defs/vacuumProgress" } + } + } + }, "xidStatus": { "type": "string", "enum": ["healthy", "warning", "critical"] diff --git a/src/commands/capabilities.rs b/src/commands/capabilities.rs new file mode 100644 index 0000000..6c35c97 --- /dev/null +++ b/src/commands/capabilities.rs @@ -0,0 +1,606 @@ +//! Capabilities command: Permission-aware feature discovery. +//! +//! Reports which pgcrate diagnostic and fix capabilities are available +//! in the current environment based on privileges, extensions, and +//! connection mode. + +use anyhow::Result; +use serde::Serialize; +use tokio_postgres::Client; + +use crate::reason_codes::{ReasonCode, ReasonInfo}; + +/// Status of a capability +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum CapabilityStatus { + /// Capability is fully available + Available, + /// Capability works but with limitations + Degraded, + /// Capability is not available + Unavailable, + /// Could not determine availability + Unknown, +} + +/// A requirement for a capability +#[derive(Debug, Clone, Serialize)] +pub struct Requirement { + /// What is required (e.g., "pg_stat_activity SELECT") + pub what: String, + /// Whether this requirement is met + pub met: bool, +} + +/// Information about a single capability +#[derive(Debug, Clone, Serialize)] +pub struct CapabilityInfo { + /// Capability identifier (e.g., "diagnostics.triage") + pub id: &'static str, + /// Human-readable name + pub name: &'static str, + /// Brief description + pub description: &'static str, + /// Availability status + pub status: CapabilityStatus, + /// Reasons for degraded/unavailable status + #[serde(skip_serializing_if = "Vec::is_empty")] + pub reasons: Vec, + /// Requirements and their status + pub requirements: Vec, + /// Limitations when degraded + #[serde(skip_serializing_if = "Vec::is_empty")] + pub limitations: Vec, +} + +/// Full capabilities results +#[derive(Debug, Serialize)] +pub struct CapabilitiesResult { + pub capabilities: Vec, + /// Count of capabilities by status + pub summary: CapabilitySummary, +} + +#[derive(Debug, Serialize)] +pub struct CapabilitySummary { + pub available: usize, + pub degraded: usize, + pub unavailable: usize, + pub unknown: usize, +} + +/// Check capabilities based on privileges and connection mode +pub async fn run_capabilities(client: &Client, read_only: bool) -> Result { + // Probe privileges + let has_pg_stat_activity = check_privilege(client, "pg_stat_activity", "SELECT").await; + let has_pg_stat_user_tables = check_privilege(client, "pg_stat_user_tables", "SELECT").await; + let has_pg_stat_user_indexes = check_privilege(client, "pg_stat_user_indexes", "SELECT").await; + let has_pg_sequences = check_privilege(client, "pg_sequences", "SELECT").await; + let has_pg_database = check_privilege(client, "pg_database", "SELECT").await; + let has_pg_cancel = check_function_privilege(client, "pg_cancel_backend(int)").await; + let has_pg_terminate = check_function_privilege(client, "pg_terminate_backend(int)").await; + let has_pg_stat_statements = check_extension_and_privilege(client, "pg_stat_statements").await; + + let capabilities = vec![ + // diagnostics.triage - always available (uses minimal queries) + CapabilityInfo { + id: "diagnostics.triage", + name: "Triage", + description: "Quick database health check", + status: CapabilityStatus::Available, + reasons: vec![], + requirements: vec![], + limitations: vec![], + }, + // diagnostics.locks - needs pg_stat_activity + check_locks_capability( + has_pg_stat_activity, + has_pg_cancel, + has_pg_terminate, + read_only, + ), + // diagnostics.sequences - needs pg_sequences + check_sequences_capability(has_pg_sequences, read_only), + // diagnostics.indexes - needs pg_stat_user_indexes + check_indexes_capability(has_pg_stat_user_indexes, has_pg_stat_user_tables), + // diagnostics.xid - needs pg_database + check_xid_capability(has_pg_database), + // diagnostics.context - always available + CapabilityInfo { + id: "diagnostics.context", + name: "Context", + description: "Connection and server information", + status: CapabilityStatus::Available, + reasons: vec![], + requirements: vec![], + limitations: vec![], + }, + // diagnostics.queries - needs pg_stat_statements (Phase 3, not yet implemented) + check_queries_capability(has_pg_stat_statements), + // fix.sequence - needs write access and pg_sequences + check_fix_sequence_capability(has_pg_sequences, read_only), + // fix.cancel - needs pg_cancel_backend + check_fix_cancel_capability(has_pg_cancel, read_only), + // fix.terminate - needs pg_terminate_backend + check_fix_terminate_capability(has_pg_terminate, read_only), + ]; + + // Calculate summary + let summary = CapabilitySummary { + available: capabilities + .iter() + .filter(|c| c.status == CapabilityStatus::Available) + .count(), + degraded: capabilities + .iter() + .filter(|c| c.status == CapabilityStatus::Degraded) + .count(), + unavailable: capabilities + .iter() + .filter(|c| c.status == CapabilityStatus::Unavailable) + .count(), + unknown: capabilities + .iter() + .filter(|c| c.status == CapabilityStatus::Unknown) + .count(), + }; + + Ok(CapabilitiesResult { + capabilities, + summary, + }) +} + +async fn check_privilege(client: &Client, table: &str, privilege: &str) -> bool { + client + .query_one("SELECT has_table_privilege($1, $2)", &[&table, &privilege]) + .await + .map(|r| r.get::<_, bool>(0)) + .unwrap_or(false) +} + +async fn check_function_privilege(client: &Client, function: &str) -> bool { + client + .query_one("SELECT has_function_privilege($1, 'EXECUTE')", &[&function]) + .await + .map(|r| r.get::<_, bool>(0)) + .unwrap_or(false) +} + +async fn check_extension_and_privilege(client: &Client, extension: &str) -> bool { + // Check if extension exists and we can read from it + client + .query_one( + r#" + SELECT EXISTS ( + SELECT 1 FROM pg_extension WHERE extname = $1 + ) AND has_table_privilege($1, 'SELECT') + "#, + &[&extension], + ) + .await + .map(|r| r.get::<_, bool>(0)) + .unwrap_or(false) +} + +fn check_locks_capability( + has_pg_stat_activity: bool, + has_pg_cancel: bool, + has_pg_terminate: bool, + read_only: bool, +) -> CapabilityInfo { + let mut requirements = vec![Requirement { + what: "pg_stat_activity SELECT".to_string(), + met: has_pg_stat_activity, + }]; + + let mut reasons = vec![]; + let mut limitations = vec![]; + + let status = if !has_pg_stat_activity { + reasons.push(ReasonInfo::new( + ReasonCode::MissingPrivilege, + "Cannot read pg_stat_activity", + )); + CapabilityStatus::Unavailable + } else { + // Check if cancel/terminate are available + if !has_pg_cancel || !has_pg_terminate || read_only { + if read_only { + limitations + .push("Cancel/terminate actions not available in read-only mode".to_string()); + } + if !has_pg_cancel { + limitations.push("pg_cancel_backend not available".to_string()); + } + if !has_pg_terminate { + limitations.push("pg_terminate_backend not available".to_string()); + } + CapabilityStatus::Degraded + } else { + CapabilityStatus::Available + } + }; + + requirements.push(Requirement { + what: "pg_cancel_backend EXECUTE".to_string(), + met: has_pg_cancel, + }); + requirements.push(Requirement { + what: "pg_terminate_backend EXECUTE".to_string(), + met: has_pg_terminate, + }); + if !read_only { + requirements.push(Requirement { + what: "read-write mode".to_string(), + met: true, + }); + } + + CapabilityInfo { + id: "diagnostics.locks", + name: "Locks", + description: "Blocking lock detection and analysis", + status, + reasons, + requirements, + limitations, + } +} + +fn check_sequences_capability(has_pg_sequences: bool, read_only: bool) -> CapabilityInfo { + let mut requirements = vec![Requirement { + what: "pg_sequences SELECT".to_string(), + met: has_pg_sequences, + }]; + + let mut reasons = vec![]; + let mut limitations = vec![]; + + let status = if !has_pg_sequences { + reasons.push(ReasonInfo::new( + ReasonCode::MissingPrivilege, + "Cannot read pg_sequences", + )); + CapabilityStatus::Unavailable + } else { + if read_only { + limitations.push("Cannot suggest ALTER SEQUENCE in read-only mode".to_string()); + } + CapabilityStatus::Available + }; + + if read_only { + requirements.push(Requirement { + what: "read-write mode for fixes".to_string(), + met: false, + }); + } + + CapabilityInfo { + id: "diagnostics.sequences", + name: "Sequences", + description: "Sequence exhaustion monitoring", + status, + reasons, + requirements, + limitations, + } +} + +fn check_indexes_capability( + has_pg_stat_user_indexes: bool, + has_pg_stat_user_tables: bool, +) -> CapabilityInfo { + let requirements = vec![ + Requirement { + what: "pg_stat_user_indexes SELECT".to_string(), + met: has_pg_stat_user_indexes, + }, + Requirement { + what: "pg_stat_user_tables SELECT".to_string(), + met: has_pg_stat_user_tables, + }, + ]; + + let mut reasons = vec![]; + let mut limitations = vec![]; + + let status = if !has_pg_stat_user_indexes || !has_pg_stat_user_tables { + if !has_pg_stat_user_indexes { + reasons.push(ReasonInfo::new( + ReasonCode::MissingPrivilege, + "Cannot read pg_stat_user_indexes", + )); + } + if !has_pg_stat_user_tables { + reasons.push(ReasonInfo::new( + ReasonCode::MissingPrivilege, + "Cannot read pg_stat_user_tables", + )); + limitations.push("Missing index detection not available".to_string()); + } + if has_pg_stat_user_indexes { + CapabilityStatus::Degraded + } else { + CapabilityStatus::Unavailable + } + } else { + CapabilityStatus::Available + }; + + CapabilityInfo { + id: "diagnostics.indexes", + name: "Indexes", + description: "Index health analysis", + status, + reasons, + requirements, + limitations, + } +} + +fn check_xid_capability(has_pg_database: bool) -> CapabilityInfo { + let requirements = vec![Requirement { + what: "pg_database SELECT".to_string(), + met: has_pg_database, + }]; + + let (status, reasons) = if !has_pg_database { + ( + CapabilityStatus::Unavailable, + vec![ReasonInfo::new( + ReasonCode::MissingPrivilege, + "Cannot read pg_database", + )], + ) + } else { + (CapabilityStatus::Available, vec![]) + }; + + CapabilityInfo { + id: "diagnostics.xid", + name: "XID Age", + description: "Transaction ID wraparound monitoring", + status, + reasons, + requirements, + limitations: vec![], + } +} + +fn check_queries_capability(has_pg_stat_statements: bool) -> CapabilityInfo { + let requirements = vec![Requirement { + what: "pg_stat_statements extension".to_string(), + met: has_pg_stat_statements, + }]; + + let (status, reasons) = if !has_pg_stat_statements { + ( + CapabilityStatus::Unavailable, + vec![ReasonInfo::new( + ReasonCode::MissingExtension, + "pg_stat_statements extension not installed or accessible", + )], + ) + } else { + // Even with the extension, this capability is not yet implemented + ( + CapabilityStatus::Unavailable, + vec![ReasonInfo::new( + ReasonCode::NotApplicable, + "Query analysis not yet implemented (planned for Phase 3)", + )], + ) + }; + + CapabilityInfo { + id: "diagnostics.queries", + name: "Query Analysis", + description: "Slow query identification (pg_stat_statements)", + status, + reasons, + requirements, + limitations: vec!["Not yet implemented".to_string()], + } +} + +fn check_fix_sequence_capability(has_pg_sequences: bool, read_only: bool) -> CapabilityInfo { + let requirements = vec![ + Requirement { + what: "pg_sequences SELECT".to_string(), + met: has_pg_sequences, + }, + Requirement { + what: "read-write mode".to_string(), + met: !read_only, + }, + ]; + + let mut reasons = vec![]; + + let status = if !has_pg_sequences { + reasons.push(ReasonInfo::new( + ReasonCode::MissingPrivilege, + "Cannot read pg_sequences", + )); + CapabilityStatus::Unavailable + } else if read_only { + reasons.push(ReasonInfo::new( + ReasonCode::RequiresReadWrite, + "Sequence fixes require read-write mode", + )); + CapabilityStatus::Unavailable + } else { + // Not yet implemented + reasons.push(ReasonInfo::new( + ReasonCode::NotApplicable, + "Sequence fixes not yet implemented", + )); + CapabilityStatus::Unavailable + }; + + CapabilityInfo { + id: "fix.sequence", + name: "Fix Sequence", + description: "Upgrade sequences to bigint", + status, + reasons, + requirements, + limitations: vec!["Not yet implemented".to_string()], + } +} + +fn check_fix_cancel_capability(has_pg_cancel: bool, read_only: bool) -> CapabilityInfo { + let requirements = vec![ + Requirement { + what: "pg_cancel_backend EXECUTE".to_string(), + met: has_pg_cancel, + }, + Requirement { + what: "read-write mode".to_string(), + met: !read_only, + }, + ]; + + let mut reasons = vec![]; + + let status = if !has_pg_cancel { + reasons.push(ReasonInfo::new( + ReasonCode::MissingPrivilege, + "Cannot execute pg_cancel_backend", + )); + CapabilityStatus::Unavailable + } else if read_only { + reasons.push(ReasonInfo::new( + ReasonCode::RequiresReadWrite, + "Query cancellation requires read-write mode", + )); + CapabilityStatus::Unavailable + } else { + CapabilityStatus::Available + }; + + CapabilityInfo { + id: "fix.cancel", + name: "Cancel Query", + description: "Cancel a running query by PID", + status, + reasons, + requirements, + limitations: vec![], + } +} + +fn check_fix_terminate_capability(has_pg_terminate: bool, read_only: bool) -> CapabilityInfo { + let requirements = vec![ + Requirement { + what: "pg_terminate_backend EXECUTE".to_string(), + met: has_pg_terminate, + }, + Requirement { + what: "read-write mode".to_string(), + met: !read_only, + }, + ]; + + let mut reasons = vec![]; + + let status = if !has_pg_terminate { + reasons.push(ReasonInfo::new( + ReasonCode::MissingPrivilege, + "Cannot execute pg_terminate_backend", + )); + CapabilityStatus::Unavailable + } else if read_only { + reasons.push(ReasonInfo::new( + ReasonCode::RequiresReadWrite, + "Connection termination requires read-write mode", + )); + CapabilityStatus::Unavailable + } else { + CapabilityStatus::Available + }; + + CapabilityInfo { + id: "fix.terminate", + name: "Terminate Connection", + description: "Terminate a connection by PID", + status, + reasons, + requirements, + limitations: vec![], + } +} + +/// Print capabilities in human-readable format +pub fn print_human(result: &CapabilitiesResult) { + println!("CAPABILITIES:"); + println!(); + + for cap in &result.capabilities { + let status_str = match cap.status { + CapabilityStatus::Available => "✓ available", + CapabilityStatus::Degraded => "⚠ degraded", + CapabilityStatus::Unavailable => "✗ unavailable", + CapabilityStatus::Unknown => "? unknown", + }; + + println!(" {:25} {}", cap.id, status_str); + + if !cap.limitations.is_empty() { + for lim in &cap.limitations { + println!(" - {}", lim); + } + } + + if !cap.reasons.is_empty() && cap.status != CapabilityStatus::Available { + for reason in &cap.reasons { + println!(" - {}", reason.message); + } + } + } + + println!(); + println!( + "SUMMARY: {} available, {} degraded, {} unavailable", + result.summary.available, result.summary.degraded, result.summary.unavailable + ); +} + +/// Print capabilities as JSON with schema versioning +pub fn print_json( + result: &CapabilitiesResult, + timeouts: Option, +) -> Result<()> { + use crate::output::{schema, DiagnosticOutput, Severity}; + + // Derive severity from capability status + let severity = if result.summary.unavailable > result.summary.available { + Severity::Warning + } else { + Severity::Healthy + }; + + let output = match timeouts { + Some(t) => DiagnosticOutput::with_timeouts(schema::CAPABILITIES, result, severity, t), + None => DiagnosticOutput::new(schema::CAPABILITIES, result, severity), + }; + output.print()?; + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_capability_status_serialization() { + let json = serde_json::to_string(&CapabilityStatus::Available).unwrap(); + assert_eq!(json, "\"available\""); + + let json = serde_json::to_string(&CapabilityStatus::Degraded).unwrap(); + assert_eq!(json, "\"degraded\""); + } +} diff --git a/src/commands/context.rs b/src/commands/context.rs new file mode 100644 index 0000000..8bf8fda --- /dev/null +++ b/src/commands/context.rs @@ -0,0 +1,415 @@ +//! Context command: Show connection context and server information. +//! +//! Provides information about the current connection, server capabilities, +//! installed extensions, and effective privileges. Useful for understanding +//! what pgcrate can do in the current environment. + +use anyhow::Result; +use serde::Serialize; +use std::collections::HashMap; +use tokio_postgres::Client; + +/// Connection target information +#[derive(Debug, Serialize)] +pub struct TargetInfo { + /// Host (redacted by default for security) + pub host: String, + /// Port number + pub port: u16, + /// Database name + pub database: String, + /// Connected user + pub user: String, + /// Whether connection is read-only + pub readonly: bool, +} + +/// Server information +#[derive(Debug, Serialize)] +pub struct ServerInfo { + /// Full version string (e.g., "PostgreSQL 16.1 on x86_64-linux") + pub version: String, + /// Numeric version (e.g., 160001 for 16.0.1) + pub version_num: i32, + /// Major version (e.g., 16) + pub version_major: i32, + /// Whether server is a replica (in recovery mode) + pub in_recovery: bool, + /// Data directory (if readable) + #[serde(skip_serializing_if = "Option::is_none")] + pub data_directory: Option, +} + +/// Privilege information for diagnostic capabilities +#[derive(Debug, Serialize)] +pub struct PrivilegeInfo { + /// Can read pg_stat_activity (for locks, sessions) + pub pg_stat_activity_select: bool, + /// Can call pg_cancel_backend (for canceling queries) + pub pg_cancel_backend_execute: bool, + /// Can call pg_terminate_backend (for killing connections) + pub pg_terminate_backend_execute: bool, + /// Can read pg_stat_statements (for query analysis) + pub pg_stat_statements_select: bool, + /// Whether user is superuser + pub is_superuser: bool, + /// Current user roles + pub roles: Vec, +} + +/// Full context data +#[derive(Debug, Serialize)] +pub struct ContextData { + pub target: TargetInfo, + pub server: ServerInfo, + /// Installed extensions (name -> version available) + pub extensions: HashMap, + /// Effective privileges + pub privileges: PrivilegeInfo, +} + +/// Context results wrapper +#[derive(Debug, Serialize)] +pub struct ContextResult { + #[serde(flatten)] + pub context: ContextData, +} + +/// Query target information from the connection +pub async fn get_target_info( + client: &Client, + connection_url: &str, + read_only: bool, + no_redact: bool, +) -> Result { + // Parse connection URL to get host/port + let url = url::Url::parse(connection_url)?; + let host = if no_redact { + url.host_str().unwrap_or("localhost").to_string() + } else { + // Redact to just show it's configured + url.host_str() + .map(|h| { + if h == "localhost" || h == "127.0.0.1" { + h.to_string() + } else { + format!("{}...", &h.chars().take(4).collect::()) + } + }) + .unwrap_or_else(|| "***".to_string()) + }; + let port = url.port().unwrap_or(5432); + + let row = client + .query_one("SELECT current_database(), current_user", &[]) + .await?; + let database: String = row.get(0); + let user: String = row.get(1); + + Ok(TargetInfo { + host, + port, + database, + user, + readonly: read_only, + }) +} + +/// Query server information +pub async fn get_server_info(client: &Client, no_redact: bool) -> Result { + let row = client + .query_one( + r#" + SELECT + version(), + current_setting('server_version_num')::int, + pg_is_in_recovery() + "#, + &[], + ) + .await?; + + let version: String = row.get(0); + let version_num: i32 = row.get(1); + let in_recovery: bool = row.get(2); + + // Extract major version from version_num (e.g., 160001 -> 16) + let version_major = version_num / 10000; + + // Data directory is sensitive (reveals filesystem paths) - only show with --no-redact + let data_directory = if no_redact { + match client + .query_one("SELECT current_setting('data_directory')", &[]) + .await + { + Ok(row) => { + let dir: String = row.get(0); + Some(dir) + } + Err(_) => None, + } + } else { + None + }; + + Ok(ServerInfo { + version, + version_num, + version_major, + in_recovery, + data_directory, + }) +} + +/// Query installed extensions +pub async fn get_extensions(client: &Client) -> Result> { + let rows = client + .query( + "SELECT extname, extversion FROM pg_extension ORDER BY extname", + &[], + ) + .await?; + + let mut extensions = HashMap::new(); + for row in rows { + let name: String = row.get(0); + let version: String = row.get(1); + extensions.insert(name, version); + } + + Ok(extensions) +} + +/// Query privilege information +pub async fn get_privileges(client: &Client) -> Result { + // Check various privileges in parallel + let pg_stat_activity = client + .query_one( + "SELECT has_table_privilege('pg_stat_activity', 'SELECT')", + &[], + ) + .await + .map(|r| r.get(0)) + .unwrap_or(false); + + let pg_cancel = client + .query_one( + "SELECT has_function_privilege('pg_cancel_backend(int)', 'EXECUTE')", + &[], + ) + .await + .map(|r| r.get(0)) + .unwrap_or(false); + + let pg_terminate = client + .query_one( + "SELECT has_function_privilege('pg_terminate_backend(int)', 'EXECUTE')", + &[], + ) + .await + .map(|r| r.get(0)) + .unwrap_or(false); + + // Check if pg_stat_statements extension exists and is accessible + let pg_stat_statements = client + .query_one( + r#" + SELECT EXISTS ( + SELECT 1 FROM pg_extension WHERE extname = 'pg_stat_statements' + ) AND has_table_privilege('pg_stat_statements', 'SELECT') + "#, + &[], + ) + .await + .map(|r| r.get(0)) + .unwrap_or(false); + + // Check superuser status + let is_superuser: bool = client + .query_one("SELECT current_setting('is_superuser') = 'on'", &[]) + .await + .map(|r| r.get(0)) + .unwrap_or(false); + + // Get user roles + let role_rows = client + .query( + r#" + SELECT r.rolname + FROM pg_roles r + JOIN pg_auth_members m ON r.oid = m.roleid + JOIN pg_roles u ON m.member = u.oid + WHERE u.rolname = current_user + ORDER BY r.rolname + "#, + &[], + ) + .await + .unwrap_or_default(); + + let roles: Vec = role_rows.iter().map(|r| r.get(0)).collect(); + + Ok(PrivilegeInfo { + pg_stat_activity_select: pg_stat_activity, + pg_cancel_backend_execute: pg_cancel, + pg_terminate_backend_execute: pg_terminate, + pg_stat_statements_select: pg_stat_statements, + is_superuser, + roles, + }) +} + +/// Run full context analysis +pub async fn run_context( + client: &Client, + connection_url: &str, + read_only: bool, + no_redact: bool, +) -> Result { + let target = get_target_info(client, connection_url, read_only, no_redact).await?; + let server = get_server_info(client, no_redact).await?; + let extensions = get_extensions(client).await?; + let privileges = get_privileges(client).await?; + + Ok(ContextResult { + context: ContextData { + target, + server, + extensions, + privileges, + }, + }) +} + +/// Print context in human-readable format +pub fn print_human(result: &ContextResult) { + let ctx = &result.context; + + println!("CONNECTION:"); + println!(" Host: {}", ctx.target.host); + println!(" Port: {}", ctx.target.port); + println!(" Database: {}", ctx.target.database); + println!(" User: {}", ctx.target.user); + println!( + " Mode: {}", + if ctx.target.readonly { + "read-only" + } else { + "read-write" + } + ); + + println!(); + println!("SERVER:"); + println!( + " Version: {} ({})", + ctx.server.version_major, ctx.server.version_num + ); + println!( + " Recovery: {}", + if ctx.server.in_recovery { + "yes (replica)" + } else { + "no (primary)" + } + ); + if let Some(ref dir) = ctx.server.data_directory { + println!(" Data dir: {}", dir); + } + + println!(); + println!("EXTENSIONS ({}):", ctx.extensions.len()); + let mut exts: Vec<_> = ctx.extensions.iter().collect(); + exts.sort_by_key(|(name, _)| name.as_str()); + for (name, version) in exts.iter().take(10) { + println!(" {} ({})", name, version); + } + if ctx.extensions.len() > 10 { + println!(" ... and {} more", ctx.extensions.len() - 10); + } + + println!(); + println!("PRIVILEGES:"); + println!( + " Superuser: {}", + if ctx.privileges.is_superuser { + "yes" + } else { + "no" + } + ); + println!( + " pg_stat_activity: {}", + if ctx.privileges.pg_stat_activity_select { + "✓" + } else { + "✗" + } + ); + println!( + " pg_cancel_backend: {}", + if ctx.privileges.pg_cancel_backend_execute { + "✓" + } else { + "✗" + } + ); + println!( + " pg_terminate: {}", + if ctx.privileges.pg_terminate_backend_execute { + "✓" + } else { + "✗" + } + ); + println!( + " pg_stat_statements: {}", + if ctx.privileges.pg_stat_statements_select { + "✓" + } else { + "✗" + } + ); + + if !ctx.privileges.roles.is_empty() { + println!(); + println!("ROLES:"); + for role in &ctx.privileges.roles { + println!(" {}", role); + } + } +} + +/// Print context as JSON with schema versioning +pub fn print_json( + result: &ContextResult, + timeouts: Option, +) -> Result<()> { + use crate::output::{schema, DiagnosticOutput, Severity}; + + // Context is informational, always healthy unless there's an error + let severity = Severity::Healthy; + + let output = match timeouts { + Some(t) => DiagnosticOutput::with_timeouts(schema::CONTEXT, result, severity, t), + None => DiagnosticOutput::new(schema::CONTEXT, result, severity), + }; + output.print()?; + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_server_version_major() { + // 160001 should be major version 16 + let major = 160001 / 10000; + assert_eq!(major, 16); + + // 150005 should be major version 15 + let major = 150005 / 10000; + assert_eq!(major, 15); + } +} diff --git a/src/commands/indexes.rs b/src/commands/indexes.rs index 4e18e4a..a61a40c 100644 --- a/src/commands/indexes.rs +++ b/src/commands/indexes.rs @@ -554,10 +554,36 @@ pub fn print_json( result: &IndexesResult, timeouts: Option, ) -> Result<()> { - use crate::output::{schema, DiagnosticOutput}; + use crate::output::{schema, DiagnosticOutput, Severity}; + + // Derive severity from findings + // Missing indexes with high seq scans are concerning + // Large amounts of wasted space from unused/duplicates also warrant attention + let severity = if result + .missing + .iter() + .any(|m| m.seq_scan > 10000 && m.scan_ratio > 100.0) + { + // High sequential scans with very poor index coverage + Severity::Warning + } else if result.total_unused_bytes > 1_000_000_000 + || result.total_duplicate_bytes > 500_000_000 + { + // Over 1GB wasted on unused indexes or 500MB on duplicates + Severity::Warning + } else if !result.missing.is_empty() + || !result.unused.is_empty() + || !result.duplicates.is_empty() + { + // Some findings - report as warning so automation knows there's something to review + Severity::Warning + } else { + Severity::Healthy + }; + let output = match timeouts { - Some(t) => DiagnosticOutput::with_timeouts(schema::INDEXES, result, t), - None => DiagnosticOutput::new(schema::INDEXES, result), + Some(t) => DiagnosticOutput::with_timeouts(schema::INDEXES, result, severity, t), + None => DiagnosticOutput::new(schema::INDEXES, result, severity), }; output.print()?; Ok(()) diff --git a/src/commands/locks.rs b/src/commands/locks.rs index 946fc56..c83fcc3 100644 --- a/src/commands/locks.rs +++ b/src/commands/locks.rs @@ -554,10 +554,33 @@ pub fn print_json( result: &LocksResult, timeouts: Option, ) -> Result<()> { - use crate::output::{schema, DiagnosticOutput}; + use crate::output::{schema, DiagnosticOutput, Severity}; + + // Derive severity from findings + let severity = if result + .blocking_chains + .iter() + .any(|c| c.oldest_blocked_seconds > 1800) + { + // Any lock blocked > 30 min is critical + Severity::Critical + } else if !result.blocking_chains.is_empty() + || result + .long_transactions + .iter() + .any(|t| t.duration_seconds > 1800) + { + // Blocking locks or transactions > 30 min are warnings + Severity::Warning + } else if !result.long_transactions.is_empty() || !result.idle_in_transaction.is_empty() { + Severity::Warning + } else { + Severity::Healthy + }; + let output = match timeouts { - Some(t) => DiagnosticOutput::with_timeouts(schema::LOCKS, result, t), - None => DiagnosticOutput::new(schema::LOCKS, result), + Some(t) => DiagnosticOutput::with_timeouts(schema::LOCKS, result, severity, t), + None => DiagnosticOutput::new(schema::LOCKS, result, severity), }; output.print()?; Ok(()) diff --git a/src/commands/mod.rs b/src/commands/mod.rs index 03eb2ce..64e7f08 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -4,6 +4,8 @@ mod anonymize; mod bootstrap; +pub mod capabilities; +pub mod context; mod db; mod doctor; mod extension; diff --git a/src/commands/sequences.rs b/src/commands/sequences.rs index a1e73c3..1ebccb2 100644 --- a/src/commands/sequences.rs +++ b/src/commands/sequences.rs @@ -257,10 +257,18 @@ pub fn print_json( result: &SequencesResult, timeouts: Option, ) -> Result<()> { - use crate::output::{schema, DiagnosticOutput}; + use crate::output::{schema, DiagnosticOutput, Severity}; + + // Convert SeqStatus to Severity + let severity = match result.overall_status { + SeqStatus::Healthy => Severity::Healthy, + SeqStatus::Warning => Severity::Warning, + SeqStatus::Critical => Severity::Critical, + }; + let output = match timeouts { - Some(t) => DiagnosticOutput::with_timeouts(schema::SEQUENCES, result, t), - None => DiagnosticOutput::new(schema::SEQUENCES, result), + Some(t) => DiagnosticOutput::with_timeouts(schema::SEQUENCES, result, severity, t), + None => DiagnosticOutput::new(schema::SEQUENCES, result, severity), }; output.print()?; Ok(()) diff --git a/src/commands/triage.rs b/src/commands/triage.rs index 5923882..a0fb572 100644 --- a/src/commands/triage.rs +++ b/src/commands/triage.rs @@ -7,40 +7,7 @@ use anyhow::Result; use serde::Serialize; use tokio_postgres::Client; -/// Why a check was skipped (stable enum for automation). -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] -#[serde(rename_all = "snake_case")] -pub enum SkipReason { - /// Insufficient database privileges to run the check - InsufficientPrivilege, - /// Required PostgreSQL extension not installed - MissingExtension, - /// PostgreSQL version too old for this check - UnsupportedPostgresVersion, - /// Check timed out (statement_timeout hit) - StatementTimeout, - /// Could not acquire lock (lock_timeout hit) - LockTimeout, - /// Check not applicable to this database configuration - NotApplicable, - /// Unexpected error during check execution - InternalError, -} - -impl SkipReason { - /// Human-readable description of the skip reason. - pub fn description(&self) -> &'static str { - match self { - SkipReason::InsufficientPrivilege => "insufficient privileges", - SkipReason::MissingExtension => "required extension not installed", - SkipReason::UnsupportedPostgresVersion => "PostgreSQL version not supported", - SkipReason::StatementTimeout => "statement timeout exceeded", - SkipReason::LockTimeout => "lock timeout exceeded", - SkipReason::NotApplicable => "not applicable to this configuration", - SkipReason::InternalError => "internal error", - } - } -} +use crate::reason_codes::ReasonCode; /// A check that could not be executed. #[derive(Debug, Clone, Serialize)] @@ -48,7 +15,7 @@ pub struct SkippedCheck { /// Check identifier (e.g., "blocking_locks") pub check_id: &'static str, /// Why the check was skipped (stable enum for automation) - pub reason_code: SkipReason, + pub reason_code: ReasonCode, /// Human-readable explanation pub reason_human: String, } @@ -173,39 +140,10 @@ impl TriageResults { } } -/// Classify error message using heuristics (fallback when SQLSTATE unavailable). -fn classify_error_message(msg: &str) -> SkipReason { - let lower = msg.to_lowercase(); - if lower.contains("permission denied") || lower.contains("must be superuser") { - SkipReason::InsufficientPrivilege - } else if lower.contains("statement timeout") || lower.contains("canceling statement") { - SkipReason::StatementTimeout - } else if lower.contains("lock timeout") || lower.contains("could not obtain lock") { - SkipReason::LockTimeout - } else if lower.contains("does not exist") && lower.contains("extension") { - SkipReason::MissingExtension - } else { - SkipReason::InternalError - } -} - -fn classify_error(err: &tokio_postgres::Error) -> (SkipReason, String) { +fn classify_error(err: &tokio_postgres::Error) -> (ReasonCode, String) { let msg = err.to_string(); - - // Check for SQLSTATE codes first (most reliable) - if let Some(db_err) = err.as_db_error() { - let code = db_err.code().code(); - let reason = match code { - "42501" => SkipReason::InsufficientPrivilege, - "57014" => SkipReason::StatementTimeout, // query_canceled - "55P03" => SkipReason::LockTimeout, // lock_not_available - _ => classify_error_message(&msg), - }; - return (reason, msg); - } - - // Fallback to message heuristics - (classify_error_message(&msg), msg) + let code = ReasonCode::from_postgres_error(err); + (code, msg) } /// Result of running a single check: either success or skip. @@ -821,12 +759,33 @@ pub fn print_json( results: &TriageResults, timeouts: Option, ) -> Result<()> { - use crate::output::{schema, DiagnosticOutput}; + use crate::output::{schema, DiagnosticOutput, Severity}; + use crate::reason_codes::ReasonInfo; + + // Derive severity from overall status + let severity = Severity::from_check_status(&results.overall_status); + + // Convert skipped checks to warnings + let warnings: Vec = results + .skipped_checks + .iter() + .map(|skip| { + ReasonInfo::new( + skip.reason_code, + format!("{}: {}", skip.check_id, skip.reason_human), + ) + }) + .collect(); + + let partial = !results.skipped_checks.is_empty(); + let output = match timeouts { - Some(t) => DiagnosticOutput::with_timeouts(schema::TRIAGE, results, t), - None => DiagnosticOutput::new(schema::TRIAGE, results), + Some(t) => DiagnosticOutput::with_timeouts(schema::TRIAGE, results, severity, t), + None => DiagnosticOutput::new(schema::TRIAGE, results, severity), }; + let output = output.with_partial(partial).with_warnings(warnings); output.print()?; + Ok(()) } @@ -908,7 +867,7 @@ mod tests { vec![check("test1", "TEST1", CheckStatus::Healthy)], vec![SkippedCheck { check_id: "replication", - reason_code: SkipReason::InsufficientPrivilege, + reason_code: ReasonCode::MissingPrivilege, reason_human: "permission denied".to_string(), }], ); @@ -921,56 +880,4 @@ mod tests { let action = NextAction::pgcrate(&["locks", "--blocking"], "test"); assert_eq!(action.to_command_string(), "pgcrate locks --blocking"); } - - #[test] - fn test_classify_error_message_permission_denied() { - assert_eq!( - classify_error_message("permission denied for table foo"), - SkipReason::InsufficientPrivilege - ); - assert_eq!( - classify_error_message("must be superuser to do this"), - SkipReason::InsufficientPrivilege - ); - } - - #[test] - fn test_classify_error_message_timeout() { - assert_eq!( - classify_error_message("canceling statement due to statement timeout"), - SkipReason::StatementTimeout - ); - assert_eq!( - classify_error_message("statement timeout expired"), - SkipReason::StatementTimeout - ); - } - - #[test] - fn test_classify_error_message_lock() { - assert_eq!( - classify_error_message("could not obtain lock on relation"), - SkipReason::LockTimeout - ); - assert_eq!( - classify_error_message("lock timeout"), - SkipReason::LockTimeout - ); - } - - #[test] - fn test_classify_error_message_extension() { - assert_eq!( - classify_error_message("extension \"pg_stat_statements\" does not exist"), - SkipReason::MissingExtension - ); - } - - #[test] - fn test_classify_error_message_unknown() { - assert_eq!( - classify_error_message("some random error"), - SkipReason::InternalError - ); - } } diff --git a/src/commands/xid.rs b/src/commands/xid.rs index 1796c19..e157fec 100644 --- a/src/commands/xid.rs +++ b/src/commands/xid.rs @@ -204,7 +204,7 @@ fn format_xid(age: i64) -> String { } /// Print XID results in human-readable format -pub fn print_human(result: &XidResult, _quiet: bool) { +pub fn print_human(result: &XidResult) { // Database-level XID println!("DATABASE XID AGE:"); println!(); @@ -283,10 +283,18 @@ pub fn print_json( result: &XidResult, timeouts: Option, ) -> Result<()> { - use crate::output::{schema, DiagnosticOutput}; + use crate::output::{schema, DiagnosticOutput, Severity}; + + // Convert XidStatus to Severity + let severity = match result.overall_status { + XidStatus::Healthy => Severity::Healthy, + XidStatus::Warning => Severity::Warning, + XidStatus::Critical => Severity::Critical, + }; + let output = match timeouts { - Some(t) => DiagnosticOutput::with_timeouts(schema::XID, result, t), - None => DiagnosticOutput::new(schema::XID, result), + Some(t) => DiagnosticOutput::with_timeouts(schema::XID, result, severity, t), + None => DiagnosticOutput::new(schema::XID, result, severity), }; output.print()?; Ok(()) diff --git a/src/diagnostic.rs b/src/diagnostic.rs index c429768..cc9a17f 100644 --- a/src/diagnostic.rs +++ b/src/diagnostic.rs @@ -176,12 +176,12 @@ pub fn parse_duration(s: &str) -> Result { } // Try to find the unit suffix - let (num_part, unit) = if s.ends_with("ms") { - (&s[..s.len() - 2], "ms") - } else if s.ends_with('s') { - (&s[..s.len() - 1], "s") - } else if s.ends_with('m') { - (&s[..s.len() - 1], "m") + let (num_part, unit) = if let Some(stripped) = s.strip_suffix("ms") { + (stripped, "ms") + } else if let Some(stripped) = s.strip_suffix('s') { + (stripped, "s") + } else if let Some(stripped) = s.strip_suffix('m') { + (stripped, "m") } else { // Default to seconds if no unit (s, "s") diff --git a/src/exit_codes.rs b/src/exit_codes.rs index b80bfc2..abe3d5a 100644 --- a/src/exit_codes.rs +++ b/src/exit_codes.rs @@ -1,5 +1,7 @@ //! Exit code policy for pgcrate. //! +#![allow(dead_code)] // Constants defined for policy documentation, used selectively +//! //! ## Findings (0-2) //! //! Diagnostic commands return exit codes based on findings: diff --git a/src/main.rs b/src/main.rs index 9c75f16..10af13f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -16,6 +16,7 @@ mod introspect; mod migrations; mod model; mod output; +mod reason_codes; mod redact; mod seed; mod snapshot; @@ -74,6 +75,8 @@ fn json_supported(command: &Commands) -> bool { Commands::Xid { .. } => true, Commands::Sequences { .. } => true, Commands::Indexes { .. } => true, + Commands::Context => true, + Commands::Capabilities => true, Commands::Sql { .. } => true, Commands::Snapshot { command } => matches!( command, @@ -331,6 +334,10 @@ enum Commands { #[arg(long, default_value = "20")] unused_limit: usize, }, + /// Show connection context, server info, extensions, and privileges + Context, + /// Show available capabilities based on privileges and connection mode + Capabilities, /// Run arbitrary SQL against the database (alias: query) #[command(alias = "query")] Sql { @@ -1204,11 +1211,11 @@ async fn run(cli: Cli, output: &Output) -> Result<()> { eprintln!("pgcrate: WARNING: --no-redact disables credential redaction. Output may contain sensitive data."); } if let Some(pid) = cancel { - commands::locks::cancel_query(&client, pid, execute, should_redact).await?; + commands::locks::cancel_query(client, pid, execute, should_redact).await?; return Ok(()); } if let Some(pid) = kill { - commands::locks::terminate_connection(&client, pid, execute, should_redact).await?; + commands::locks::terminate_connection(client, pid, execute, should_redact).await?; return Ok(()); } @@ -1224,16 +1231,16 @@ async fn run(cli: Cli, output: &Output) -> Result<()> { }; if show_blocking { - result.blocking_chains = commands::locks::get_blocking_chains(&client).await?; + result.blocking_chains = commands::locks::get_blocking_chains(client).await?; } if show_long_tx { let min_minutes = long_tx.unwrap_or(5); result.long_transactions = - commands::locks::get_long_transactions(&client, min_minutes).await?; + commands::locks::get_long_transactions(client, min_minutes).await?; } if show_idle { result.idle_in_transaction = - commands::locks::get_idle_in_transaction(&client).await?; + commands::locks::get_idle_in_transaction(client).await?; } // Apply redaction unless explicitly disabled (warning already printed above) @@ -1296,7 +1303,7 @@ async fn run(cli: Cli, output: &Output) -> Result<()> { if cli.json { commands::xid::print_json(&result, Some(session.effective_timeouts()))?; } else { - commands::xid::print_human(&result, cli.quiet); + commands::xid::print_human(&result); } // Exit with appropriate code @@ -1384,6 +1391,82 @@ async fn run(cli: Cli, output: &Output) -> Result<()> { commands::indexes::print_human(&result, cli.verbose); } } + Commands::Context => { + let config = + Config::load(cli.config_path.as_deref()).context("Failed to load configuration")?; + let conn_result = connection::resolve_and_validate( + &config, + cli.database_url.as_deref(), + cli.connection.as_deref(), + cli.env_var.as_deref(), + cli.allow_primary, + cli.read_write, + cli.quiet, + )?; + + // Use DiagnosticSession with timeout enforcement + let timeout_config = parse_timeout_config(&cli)?; + let session = DiagnosticSession::connect(&conn_result.url, timeout_config).await?; + + // Set up Ctrl+C handler to cancel queries gracefully + setup_ctrlc_handler(session.cancel_token()); + + // Show effective timeouts unless quiet + if !cli.quiet && !cli.json { + eprintln!("pgcrate: timeouts: {}", session.effective_timeouts()); + } + + let result = commands::context::run_context( + session.client(), + &conn_result.url, + !cli.read_write, // read_only is the inverse of read_write flag + cli.no_redact, + ) + .await?; + + if cli.json { + commands::context::print_json(&result, Some(session.effective_timeouts()))?; + } else { + commands::context::print_human(&result); + } + } + Commands::Capabilities => { + let config = + Config::load(cli.config_path.as_deref()).context("Failed to load configuration")?; + let conn_result = connection::resolve_and_validate( + &config, + cli.database_url.as_deref(), + cli.connection.as_deref(), + cli.env_var.as_deref(), + cli.allow_primary, + cli.read_write, + cli.quiet, + )?; + + // Use DiagnosticSession with timeout enforcement + let timeout_config = parse_timeout_config(&cli)?; + let session = DiagnosticSession::connect(&conn_result.url, timeout_config).await?; + + // Set up Ctrl+C handler to cancel queries gracefully + setup_ctrlc_handler(session.cancel_token()); + + // Show effective timeouts unless quiet + if !cli.quiet && !cli.json { + eprintln!("pgcrate: timeouts: {}", session.effective_timeouts()); + } + + let result = commands::capabilities::run_capabilities( + session.client(), + !cli.read_write, // read_only is the inverse of read_write flag + ) + .await?; + + if cli.json { + commands::capabilities::print_json(&result, Some(session.effective_timeouts()))?; + } else { + commands::capabilities::print_human(&result); + } + } Commands::Sql { command, allow_write, @@ -1755,6 +1838,8 @@ async fn run(cli: Cli, output: &Output) -> Result<()> { | Commands::Xid { .. } | Commands::Sequences { .. } | Commands::Indexes { .. } + | Commands::Context + | Commands::Capabilities | Commands::Sql { .. } | Commands::Db { .. } | Commands::Snapshot { .. } diff --git a/src/output.rs b/src/output.rs index 1ca6434..a7795b6 100644 --- a/src/output.rs +++ b/src/output.rs @@ -100,38 +100,64 @@ impl Output { // JSON Response Types // ============================================================================= -/// JSON error response (written to stdout with non-zero exit) +/// JSON error response using envelope structure (written to stdout with non-zero exit). +/// Matches DiagnosticOutput structure so consumers get consistent envelope format. #[derive(Debug, Serialize)] pub struct JsonError { pub ok: bool, - pub error: JsonErrorDetails, + pub schema_id: &'static str, + pub schema_version: &'static str, + pub tool_version: &'static str, + pub generated_at: String, + pub severity: &'static str, + pub errors: Vec, + /// Always null for error responses + pub data: Option<()>, } #[derive(Debug, Serialize)] -pub struct JsonErrorDetails { +pub struct JsonErrorInfo { + pub code: &'static str, pub message: String, #[serde(skip_serializing_if = "Option::is_none")] pub details: Option, } impl JsonError { + /// Generic error schema for non-diagnostic failures + pub const SCHEMA_ID: &'static str = "pgcrate.error"; + pub fn new(message: impl Into) -> Self { Self { ok: false, - error: JsonErrorDetails { + schema_id: Self::SCHEMA_ID, + schema_version: DIAGNOSTIC_SCHEMA_VERSION, + tool_version: TOOL_VERSION, + generated_at: chrono::Utc::now().to_rfc3339(), + severity: "error", + errors: vec![JsonErrorInfo { + code: "internal_error", message: message.into(), details: None, - }, + }], + data: None, } } pub fn with_details(message: impl Into, details: impl Into) -> Self { Self { ok: false, - error: JsonErrorDetails { + schema_id: Self::SCHEMA_ID, + schema_version: DIAGNOSTIC_SCHEMA_VERSION, + tool_version: TOOL_VERSION, + generated_at: chrono::Utc::now().to_rfc3339(), + severity: "error", + errors: vec![JsonErrorInfo { + code: "internal_error", message: message.into(), details: Some(details.into()), - }, + }], + data: None, } } @@ -231,19 +257,83 @@ pub struct DescribeResponse { /// Schema version for diagnostic JSON outputs. /// Follows semver: breaking=major, additive=minor, bugfix=patch. -pub const DIAGNOSTIC_SCHEMA_VERSION: &str = "1.0.0"; +/// v2.0.0: Breaking change - data field is no longer flattened, added severity/warnings/errors. +pub const DIAGNOSTIC_SCHEMA_VERSION: &str = "2.0.0"; + +/// Tool version from Cargo.toml. +pub const TOOL_VERSION: &str = env!("CARGO_PKG_VERSION"); + +/// Overall severity level for diagnostic output. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] +#[serde(rename_all = "lowercase")] +#[allow(dead_code)] // Error variant and worst() designed for future use +pub enum Severity { + /// All checks healthy, no issues found + Healthy, + /// Some issues found that warrant attention + Warning, + /// Critical issues found that need immediate attention + Critical, + /// Error during diagnostic execution (not a finding, but a failure) + Error, +} + +impl Severity { + /// Combine two severities, returning the worst. + #[allow(dead_code)] + pub fn worst(self, other: Self) -> Self { + use Severity::*; + match (self, other) { + (Error, _) | (_, Error) => Error, + (Critical, _) | (_, Critical) => Critical, + (Warning, _) | (_, Warning) => Warning, + (Healthy, Healthy) => Healthy, + } + } + + /// Convert from triage CheckStatus. + pub fn from_check_status(status: &crate::commands::triage::CheckStatus) -> Self { + match status { + crate::commands::triage::CheckStatus::Healthy => Severity::Healthy, + crate::commands::triage::CheckStatus::Warning => Severity::Warning, + crate::commands::triage::CheckStatus::Critical => Severity::Critical, + } + } +} /// Wrapper for diagnostic command JSON output. /// Includes schema metadata for stable automation and versioning. +/// +/// v2.0.0 changes: +/// - Added `tool_version` and `generated_at` for traceability +/// - Added `severity` to indicate overall health +/// - Added `partial` flag when some checks were skipped +/// - Added `warnings` and `errors` arrays with structured reason codes +/// - Removed `#[serde(flatten)]` from `data` - data is now a nested field #[derive(Debug, Serialize)] pub struct DiagnosticOutput { pub ok: bool, pub schema_id: &'static str, pub schema_version: &'static str, + /// Tool version (pgcrate version that generated this output) + pub tool_version: &'static str, + /// ISO 8601 timestamp when this output was generated + pub generated_at: String, + /// Overall severity: healthy, warning, critical, or error + pub severity: Severity, + /// Whether this output is partial (some checks were skipped) + #[serde(skip_serializing_if = "std::ops::Not::not")] + pub partial: bool, + /// Warnings encountered during execution (non-fatal issues) + #[serde(skip_serializing_if = "Vec::is_empty")] + pub warnings: Vec, + /// Errors encountered during execution (fatal issues that prevented checks) + #[serde(skip_serializing_if = "Vec::is_empty")] + pub errors: Vec, /// Effective timeout configuration used for this diagnostic #[serde(skip_serializing_if = "Option::is_none")] pub timeouts: Option, - #[serde(flatten)] + /// Command-specific data payload pub data: T, } @@ -257,11 +347,17 @@ pub struct TimeoutsJson { impl DiagnosticOutput { /// Create a new diagnostic output with the given schema ID and data. - pub fn new(schema_id: &'static str, data: T) -> Self { + pub fn new(schema_id: &'static str, data: T, severity: Severity) -> Self { Self { ok: true, schema_id, schema_version: DIAGNOSTIC_SCHEMA_VERSION, + tool_version: TOOL_VERSION, + generated_at: chrono::Utc::now().to_rfc3339(), + severity, + partial: false, + warnings: Vec::new(), + errors: Vec::new(), timeouts: None, data, } @@ -271,12 +367,19 @@ impl DiagnosticOutput { pub fn with_timeouts( schema_id: &'static str, data: T, + severity: Severity, timeouts: crate::diagnostic::EffectiveTimeouts, ) -> Self { Self { ok: true, schema_id, schema_version: DIAGNOSTIC_SCHEMA_VERSION, + tool_version: TOOL_VERSION, + generated_at: chrono::Utc::now().to_rfc3339(), + severity, + partial: false, + warnings: Vec::new(), + errors: Vec::new(), timeouts: Some(TimeoutsJson { connect_ms: timeouts.connect_timeout_ms, statement_ms: timeouts.statement_timeout_ms, @@ -286,6 +389,26 @@ impl DiagnosticOutput { } } + /// Mark this output as partial (some checks were skipped). + pub fn with_partial(mut self, partial: bool) -> Self { + self.partial = partial; + self + } + + /// Add warnings to this output. + pub fn with_warnings(mut self, warnings: Vec) -> Self { + self.warnings = warnings; + self + } + + /// Add errors to this output. + #[allow(dead_code)] + pub fn with_errors(mut self, errors: Vec) -> Self { + self.ok = errors.is_empty(); + self.errors = errors; + self + } + /// Print this output as JSON to stdout. pub fn print(&self) -> Result<(), serde_json::Error> { let json = serde_json::to_string_pretty(self)?; @@ -301,99 +424,8 @@ pub mod schema { pub const XID: &str = "pgcrate.diagnostics.xid"; pub const SEQUENCES: &str = "pgcrate.diagnostics.sequences"; pub const INDEXES: &str = "pgcrate.diagnostics.indexes"; -} - -/// Diagnostic-specific error response. -/// Distinguishes "tool failed" from "critical finding" by including schema metadata. -#[derive(Debug, Serialize)] -pub struct DiagnosticError { - pub ok: bool, - /// Which diagnostic command failed - pub schema_id: &'static str, - pub schema_version: &'static str, - /// Stable error code for automation - pub error_code: DiagnosticErrorCode, - /// Human-readable error message - pub message: String, - /// Optional additional details (error chain, hints) - #[serde(skip_serializing_if = "Option::is_none")] - pub details: Option, -} - -/// Error codes for diagnostic failures (stable enum for automation). -#[derive(Debug, Clone, Copy, Serialize)] -#[serde(rename_all = "snake_case")] -pub enum DiagnosticErrorCode { - /// Could not connect to database - ConnectionFailed, - /// Query timed out (statement_timeout) - StatementTimeout, - /// Could not acquire lock (lock_timeout) - LockTimeout, - /// Insufficient privileges to run diagnostic - PermissionDenied, - /// Internal error in pgcrate - InternalError, -} - -impl DiagnosticError { - pub fn new( - schema_id: &'static str, - error_code: DiagnosticErrorCode, - message: impl Into, - ) -> Self { - Self { - ok: false, - schema_id, - schema_version: DIAGNOSTIC_SCHEMA_VERSION, - error_code, - message: message.into(), - details: None, - } - } - - pub fn with_details( - schema_id: &'static str, - error_code: DiagnosticErrorCode, - message: impl Into, - details: impl Into, - ) -> Self { - Self { - ok: false, - schema_id, - schema_version: DIAGNOSTIC_SCHEMA_VERSION, - error_code, - message: message.into(), - details: Some(details.into()), - } - } - - /// Classify an anyhow error into a diagnostic error code. - pub fn classify_error(err: &anyhow::Error) -> DiagnosticErrorCode { - let msg = err.to_string().to_lowercase(); - - if msg.contains("connection refused") - || msg.contains("could not connect") - || msg.contains("connection timed out") - { - DiagnosticErrorCode::ConnectionFailed - } else if msg.contains("statement timeout") || msg.contains("canceling statement") { - DiagnosticErrorCode::StatementTimeout - } else if msg.contains("lock timeout") || msg.contains("could not obtain lock") { - DiagnosticErrorCode::LockTimeout - } else if msg.contains("permission denied") || msg.contains("must be superuser") { - DiagnosticErrorCode::PermissionDenied - } else { - DiagnosticErrorCode::InternalError - } - } - - /// Print this error as JSON to stdout. - pub fn print(&self) { - let json = serde_json::to_string_pretty(self) - .expect("DiagnosticError serialization should never fail"); - println!("{}", json); - } + pub const CONTEXT: &str = "pgcrate.diagnostics.context"; + pub const CAPABILITIES: &str = "pgcrate.diagnostics.capabilities"; } // ============================================================================= @@ -468,16 +500,20 @@ mod tests { fn test_json_error_basic() { let err = JsonError::new("Something went wrong"); assert!(!err.ok); - assert_eq!(err.error.message, "Something went wrong"); - assert!(err.error.details.is_none()); + assert_eq!(err.schema_id, "pgcrate.error"); + assert_eq!(err.severity, "error"); + assert_eq!(err.errors.len(), 1); + assert_eq!(err.errors[0].message, "Something went wrong"); + assert!(err.errors[0].details.is_none()); } #[test] fn test_json_error_with_details() { let err = JsonError::with_details("Connection failed", "Host not found"); assert!(!err.ok); - assert_eq!(err.error.message, "Connection failed"); - assert_eq!(err.error.details, Some("Host not found".to_string())); + assert_eq!(err.errors.len(), 1); + assert_eq!(err.errors[0].message, "Connection failed"); + assert_eq!(err.errors[0].details, Some("Host not found".to_string())); } #[test] diff --git a/src/reason_codes.rs b/src/reason_codes.rs new file mode 100644 index 0000000..372a5b1 --- /dev/null +++ b/src/reason_codes.rs @@ -0,0 +1,420 @@ +//! Reason codes for diagnostic outputs. +//! +//! Provides a taxonomy of reasons why operations failed, degraded, or were skipped. +//! These are stable identifiers for automation - the enum variants are the contract. + +use serde::Serialize; + +/// Reason code taxonomy for diagnostic outputs. +/// +/// Categorized into three groups: +/// - **Operational**: Runtime conditions (timeouts, connection issues) +/// - **Policy**: Intentional restrictions (safety rails, confirmation required) +/// - **Capability**: Missing prerequisites (extensions, privileges, features) +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] +#[serde(rename_all = "snake_case")] +#[allow(dead_code)] // Full taxonomy defined; variants used as needed +pub enum ReasonCode { + // ========================================================================= + // Operational: Runtime conditions that prevented operation + // ========================================================================= + /// Connection to database timed out + ConnectionTimeout, + /// Statement execution timed out (statement_timeout) + StatementTimeout, + /// Could not acquire lock within timeout (lock_timeout) + LockTimeout, + /// Connection to database failed + ConnectionFailed, + /// Query was cancelled (e.g., by Ctrl+C) + QueryCancelled, + /// Database server is shutting down + ServerShutdown, + /// Too many connections to server + TooManyConnections, + /// Out of memory on server + OutOfMemory, + /// Disk full on server + DiskFull, + /// Unexpected internal error + InternalError, + + // ========================================================================= + // Policy: Intentional restrictions enforced by pgcrate + // ========================================================================= + /// Operation requires --primary flag to confirm primary database access + PrimaryRequiresAck, + /// Operation requires --read-write flag (default is read-only) + RequiresReadWrite, + /// Operation is too dangerous without explicit confirmation + DangerousOperation, + /// Operation not allowed on replica/standby + ReplicaNotAllowed, + /// Operation not allowed on primary + PrimaryNotAllowed, + /// Feature is disabled in configuration + FeatureDisabled, + + // ========================================================================= + // Capability: Missing prerequisites to run the operation + // ========================================================================= + /// Required PostgreSQL extension not installed + MissingExtension, + /// Insufficient database privileges + MissingPrivilege, + /// Required database role does not exist + MissingRole, + /// Required table or relation does not exist + MissingTable, + /// Required schema does not exist + MissingSchema, + /// Required function does not exist + MissingFunction, + /// PostgreSQL version is too old for this feature + UnsupportedVersion, + /// Feature not applicable to current database configuration + NotApplicable, + /// Required configuration parameter not set + MissingConfig, + /// Feature requires superuser privileges + RequiresSuperuser, + /// Feature requires replication privileges + RequiresReplication, +} + +impl ReasonCode { + /// Human-readable description of the reason code. + pub fn description(&self) -> &'static str { + match self { + // Operational + ReasonCode::ConnectionTimeout => "connection timed out", + ReasonCode::StatementTimeout => "statement timeout exceeded", + ReasonCode::LockTimeout => "lock timeout exceeded", + ReasonCode::ConnectionFailed => "connection failed", + ReasonCode::QueryCancelled => "query was cancelled", + ReasonCode::ServerShutdown => "server is shutting down", + ReasonCode::TooManyConnections => "too many connections", + ReasonCode::OutOfMemory => "out of memory", + ReasonCode::DiskFull => "disk full", + ReasonCode::InternalError => "internal error", + + // Policy + ReasonCode::PrimaryRequiresAck => "requires --primary flag to confirm", + ReasonCode::RequiresReadWrite => "requires --read-write flag", + ReasonCode::DangerousOperation => "dangerous operation requires confirmation", + ReasonCode::ReplicaNotAllowed => "operation not allowed on replica", + ReasonCode::PrimaryNotAllowed => "operation not allowed on primary", + ReasonCode::FeatureDisabled => "feature is disabled", + + // Capability + ReasonCode::MissingExtension => "required extension not installed", + ReasonCode::MissingPrivilege => "insufficient privileges", + ReasonCode::MissingRole => "required role does not exist", + ReasonCode::MissingTable => "required table does not exist", + ReasonCode::MissingSchema => "required schema does not exist", + ReasonCode::MissingFunction => "required function does not exist", + ReasonCode::UnsupportedVersion => "PostgreSQL version not supported", + ReasonCode::NotApplicable => "not applicable to this configuration", + ReasonCode::MissingConfig => "required configuration not set", + ReasonCode::RequiresSuperuser => "requires superuser privileges", + ReasonCode::RequiresReplication => "requires replication privileges", + } + } + + /// Category of the reason code. + #[allow(dead_code)] + pub fn category(&self) -> ReasonCategory { + match self { + ReasonCode::ConnectionTimeout + | ReasonCode::StatementTimeout + | ReasonCode::LockTimeout + | ReasonCode::ConnectionFailed + | ReasonCode::QueryCancelled + | ReasonCode::ServerShutdown + | ReasonCode::TooManyConnections + | ReasonCode::OutOfMemory + | ReasonCode::DiskFull + | ReasonCode::InternalError => ReasonCategory::Operational, + + ReasonCode::PrimaryRequiresAck + | ReasonCode::RequiresReadWrite + | ReasonCode::DangerousOperation + | ReasonCode::ReplicaNotAllowed + | ReasonCode::PrimaryNotAllowed + | ReasonCode::FeatureDisabled => ReasonCategory::Policy, + + ReasonCode::MissingExtension + | ReasonCode::MissingPrivilege + | ReasonCode::MissingRole + | ReasonCode::MissingTable + | ReasonCode::MissingSchema + | ReasonCode::MissingFunction + | ReasonCode::UnsupportedVersion + | ReasonCode::NotApplicable + | ReasonCode::MissingConfig + | ReasonCode::RequiresSuperuser + | ReasonCode::RequiresReplication => ReasonCategory::Capability, + } + } + + /// Classify a tokio_postgres error into a reason code. + pub fn from_postgres_error(err: &tokio_postgres::Error) -> Self { + let msg = err.to_string().to_lowercase(); + + // Check for SQLSTATE codes first (most reliable) + if let Some(db_err) = err.as_db_error() { + let code = db_err.code().code(); + return match code { + // Class 08 - Connection Exception + "08000" | "08003" | "08006" => ReasonCode::ConnectionFailed, + "08001" => ReasonCode::ConnectionFailed, // sqlclient_unable_to_establish_sqlconnection + "08004" => ReasonCode::ConnectionFailed, // sqlserver_rejected_establishment_of_sqlconnection + + // Class 42 - Syntax/Access + "42501" => ReasonCode::MissingPrivilege, // insufficient_privilege + "42883" => ReasonCode::MissingFunction, // undefined_function + "42P01" => ReasonCode::MissingTable, // undefined_table + "3F000" => ReasonCode::MissingSchema, // invalid_schema_name + + // Class 53 - Insufficient Resources + "53000" => ReasonCode::OutOfMemory, // insufficient_resources + "53100" => ReasonCode::DiskFull, // disk_full + "53200" => ReasonCode::OutOfMemory, // out_of_memory + "53300" => ReasonCode::TooManyConnections, // too_many_connections + + // Class 55 - Object Not In Prerequisite State + "55P03" => ReasonCode::LockTimeout, // lock_not_available + + // Class 57 - Operator Intervention + // 57014 is query_canceled but also used for statement_timeout - check message + "57014" => { + if msg.contains("statement timeout") { + ReasonCode::StatementTimeout + } else { + ReasonCode::QueryCancelled + } + } + "57P01" => ReasonCode::ServerShutdown, // admin_shutdown + "57P02" => ReasonCode::ServerShutdown, // crash_shutdown + "57P03" => ReasonCode::ServerShutdown, // cannot_connect_now + + _ => Self::classify_message(&msg), + }; + } + + // Fallback to message heuristics + Self::classify_message(&msg) + } + + /// Classify an error message into a reason code using heuristics. + fn classify_message(msg: &str) -> Self { + if msg.contains("permission denied") || msg.contains("must be superuser") { + ReasonCode::MissingPrivilege + } else if msg.contains("statement timeout") || msg.contains("canceling statement") { + ReasonCode::StatementTimeout + } else if msg.contains("lock timeout") || msg.contains("could not obtain lock") { + ReasonCode::LockTimeout + } else if msg.contains("connection refused") + || msg.contains("could not connect") + || msg.contains("connection timed out") + { + ReasonCode::ConnectionTimeout + } else if msg.contains("does not exist") && msg.contains("extension") { + ReasonCode::MissingExtension + } else if msg.contains("does not exist") && msg.contains("relation") { + ReasonCode::MissingTable + } else if msg.contains("does not exist") && msg.contains("schema") { + ReasonCode::MissingSchema + } else if msg.contains("does not exist") && msg.contains("role") { + ReasonCode::MissingRole + } else if msg.contains("too many connections") { + ReasonCode::TooManyConnections + } else if msg.contains("out of memory") { + ReasonCode::OutOfMemory + } else if msg.contains("disk full") || msg.contains("no space left") { + ReasonCode::DiskFull + } else { + ReasonCode::InternalError + } + } +} + +/// Category of reason codes. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] +#[serde(rename_all = "lowercase")] +#[allow(dead_code)] +pub enum ReasonCategory { + /// Runtime conditions (timeouts, connection issues) + Operational, + /// Intentional restrictions (safety rails) + Policy, + /// Missing prerequisites (extensions, privileges) + Capability, +} + +/// Structured reason information for JSON output. +#[derive(Debug, Clone, Serialize)] +pub struct ReasonInfo { + /// Stable reason code for automation + pub code: ReasonCode, + /// Human-readable message + pub message: String, + /// Optional additional details (structured data) + #[serde(skip_serializing_if = "Option::is_none")] + pub details: Option, +} + +impl ReasonInfo { + /// Create a new reason info with just a code and message. + pub fn new(code: ReasonCode, message: impl Into) -> Self { + Self { + code, + message: message.into(), + details: None, + } + } + + /// Create a reason info with additional structured details. + #[allow(dead_code)] + pub fn with_details( + code: ReasonCode, + message: impl Into, + details: serde_json::Value, + ) -> Self { + Self { + code, + message: message.into(), + details: Some(details), + } + } + + /// Create from a tokio_postgres error. + #[allow(dead_code)] + pub fn from_postgres_error(err: &tokio_postgres::Error) -> Self { + let code = ReasonCode::from_postgres_error(err); + Self { + code, + message: err.to_string(), + details: None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_reason_code_descriptions() { + assert_eq!( + ReasonCode::ConnectionTimeout.description(), + "connection timed out" + ); + assert_eq!( + ReasonCode::MissingPrivilege.description(), + "insufficient privileges" + ); + assert_eq!( + ReasonCode::RequiresReadWrite.description(), + "requires --read-write flag" + ); + } + + #[test] + fn test_reason_code_categories() { + assert_eq!( + ReasonCode::ConnectionTimeout.category(), + ReasonCategory::Operational + ); + assert_eq!( + ReasonCode::RequiresReadWrite.category(), + ReasonCategory::Policy + ); + assert_eq!( + ReasonCode::MissingExtension.category(), + ReasonCategory::Capability + ); + } + + #[test] + fn test_classify_message_permission() { + assert_eq!( + ReasonCode::classify_message("permission denied for table foo"), + ReasonCode::MissingPrivilege + ); + assert_eq!( + ReasonCode::classify_message("must be superuser to do this"), + ReasonCode::MissingPrivilege + ); + } + + #[test] + fn test_classify_message_timeout() { + assert_eq!( + ReasonCode::classify_message("canceling statement due to statement timeout"), + ReasonCode::StatementTimeout + ); + assert_eq!( + ReasonCode::classify_message("could not obtain lock on relation"), + ReasonCode::LockTimeout + ); + } + + #[test] + fn test_classify_message_connection() { + assert_eq!( + ReasonCode::classify_message("connection refused"), + ReasonCode::ConnectionTimeout + ); + assert_eq!( + ReasonCode::classify_message("could not connect to server"), + ReasonCode::ConnectionTimeout + ); + } + + #[test] + fn test_classify_message_missing() { + assert_eq!( + ReasonCode::classify_message("extension \"foo\" does not exist"), + ReasonCode::MissingExtension + ); + assert_eq!( + ReasonCode::classify_message("relation \"foo\" does not exist"), + ReasonCode::MissingTable + ); + } + + #[test] + fn test_reason_info_new() { + let info = ReasonInfo::new( + ReasonCode::MissingPrivilege, + "permission denied for pg_stat_activity", + ); + assert_eq!(info.code, ReasonCode::MissingPrivilege); + assert_eq!(info.message, "permission denied for pg_stat_activity"); + assert!(info.details.is_none()); + } + + #[test] + fn test_reason_info_with_details() { + let details = serde_json::json!({ + "required_privilege": "SELECT", + "object": "pg_stat_activity" + }); + let info = ReasonInfo::with_details( + ReasonCode::MissingPrivilege, + "permission denied", + details.clone(), + ); + assert_eq!(info.code, ReasonCode::MissingPrivilege); + assert_eq!(info.details, Some(details)); + } + + #[test] + fn test_reason_info_serialization() { + let info = ReasonInfo::new(ReasonCode::StatementTimeout, "query timed out"); + let json = serde_json::to_string(&info).unwrap(); + assert!(json.contains("\"code\":\"statement_timeout\"")); + assert!(json.contains("\"message\":\"query timed out\"")); + } +} diff --git a/src/redact.rs b/src/redact.rs index 186bdf4..9124075 100644 --- a/src/redact.rs +++ b/src/redact.rs @@ -29,6 +29,7 @@ const MAX_QUERY_LENGTH: usize = 200; /// /// Keeps: scheme, host, port, database name, user /// Removes: password, query parameters that may contain secrets +#[allow(dead_code)] // Useful utility, may be used in future for context command pub fn redact_dsn(dsn: &str) -> String { match Url::parse(dsn) { Ok(mut url) => { diff --git a/tests/diagnostics/basic.rs b/tests/diagnostics/basic.rs index bf05d97..d90f9da 100644 --- a/tests/diagnostics/basic.rs +++ b/tests/diagnostics/basic.rs @@ -61,15 +61,16 @@ fn test_triage_json_structure() { json ); - // Data fields (flattened from result) + // Data fields (nested in data object) + let data = json.get("data").expect("JSON should have data field"); assert!( - json.get("overall_status").is_some(), - "JSON should have overall_status: {}", + data.get("overall_status").is_some(), + "JSON should have data.overall_status: {}", json ); assert!( - json.get("checks").is_some(), - "JSON should have checks: {}", + data.get("checks").is_some(), + "JSON should have data.checks: {}", json ); } @@ -175,15 +176,16 @@ fn test_sequences_json_structure() { let json = parse_json(&output); assert!(json.is_object(), "Should return JSON object"); - // Should have expected fields + // Should have expected fields (nested in data object) + let data = json.get("data").expect("JSON should have data field"); assert!( - json.get("sequences").is_some(), - "JSON should have sequences: {}", + data.get("sequences").is_some(), + "JSON should have data.sequences: {}", json ); assert!( - json.get("overall_status").is_some(), - "JSON should have overall_status: {}", + data.get("overall_status").is_some(), + "JSON should have data.overall_status: {}", json ); } diff --git a/tests/diagnostics/indexes.rs b/tests/diagnostics/indexes.rs index 574a66e..92cb645 100644 --- a/tests/diagnostics/indexes.rs +++ b/tests/diagnostics/indexes.rs @@ -186,12 +186,13 @@ fn test_indexes_json_structure() { let json = parse_json(&output); - // Should have expected top-level keys + // Should have expected keys (nested in data object) + let data = json.get("data").expect("JSON should have data field"); assert!( - json.get("missing").is_some() - || json.get("unused").is_some() - || json.get("duplicates").is_some(), - "JSON should have missing, unused, or duplicates: {}", + data.get("missing").is_some() + || data.get("unused").is_some() + || data.get("duplicates").is_some(), + "JSON should have data.missing, data.unused, or data.duplicates: {}", json ); } @@ -267,10 +268,11 @@ fn test_indexes_excludes_primary_keys_from_duplicates() { // The pk_test_id_idx duplicates the PK index - implementation may or may not flag this. // Primary goal: verify the command handles this edge case without error. - // The JSON should be valid and contain a duplicates key. + // The JSON should be valid and contain a duplicates key (nested in data object). + let data = json.get("data").expect("JSON should have data field"); assert!( - json.get("duplicates").is_some(), - "JSON should contain duplicates key: {}", + data.get("duplicates").is_some(), + "JSON should contain data.duplicates key: {}", json ); } diff --git a/tests/diagnostics/sequences_scenarios.rs b/tests/diagnostics/sequences_scenarios.rs index e51d4bd..49be823 100644 --- a/tests/diagnostics/sequences_scenarios.rs +++ b/tests/diagnostics/sequences_scenarios.rs @@ -241,22 +241,23 @@ fn test_sequences_warning_json_structure() { let json: serde_json::Value = serde_json::from_str(&out) .unwrap_or_else(|e| panic!("Invalid JSON: {}\nOutput: {}", e, out)); - // Should have sequences array + // Should have data field with sequences array + let data = json.get("data").expect("JSON should have data field"); assert!( - json.get("sequences").is_some(), - "JSON should have sequences: {}", + data.get("sequences").is_some(), + "JSON should have data.sequences: {}", json ); - // Should have overall_status + // Should have overall_status in data assert!( - json.get("overall_status").is_some(), - "JSON should have overall_status: {}", + data.get("overall_status").is_some(), + "JSON should have data.overall_status: {}", json ); // Overall status should be warning - let status = json.get("overall_status").and_then(|s| s.as_str()); + let status = data.get("overall_status").and_then(|s| s.as_str()); assert!( status == Some("warning") || status == Some("Warning"), "overall_status should be warning: {}", diff --git a/tests/json_integration.rs b/tests/json_integration.rs index 798350d..85f9486 100644 --- a/tests/json_integration.rs +++ b/tests/json_integration.rs @@ -68,7 +68,7 @@ fn test_json_error_missing_database_url() { // stdout should contain JSON error let json = parse_json(&output); assert_eq!(json["ok"], false); - assert!(json["error"]["message"] + assert!(json["errors"][0]["message"] .as_str() .unwrap() .contains("DATABASE_URL")); @@ -83,17 +83,17 @@ fn test_json_error_schema() { // Required fields assert!(json.get("ok").is_some(), "Missing 'ok' field"); - assert!(json.get("error").is_some(), "Missing 'error' field"); + assert!(json.get("errors").is_some(), "Missing 'errors' field"); assert!( - json["error"].get("message").is_some(), - "Missing 'error.message' field" + json["errors"][0].get("message").is_some(), + "Missing 'errors[0].message' field" ); // ok must be false for errors assert_eq!(json["ok"], false); // message must be a non-empty string - let message = json["error"]["message"].as_str().unwrap(); + let message = json["errors"][0]["message"].as_str().unwrap(); assert!(!message.is_empty(), "Error message should not be empty"); } @@ -216,7 +216,7 @@ fn test_json_usage_error_missing_required_arg() { // stdout should contain JSON error let json = parse_json(&output); assert_eq!(json["ok"], false); - assert!(json["error"]["message"] + assert!(json["errors"][0]["message"] .as_str() .unwrap() .contains("--steps")); @@ -291,7 +291,7 @@ fn test_json_unsupported_command_returns_json_error() { let json = parse_json(&output); assert_eq!(json["ok"], false); - let message = json["error"]["message"].as_str().unwrap(); + let message = json["errors"][0]["message"].as_str().unwrap(); assert!( message.contains("--json not supported"), "Error should mention --json not supported: {}",