diff --git a/.claude/hooks/rtk-rewrite.sh b/.claude/hooks/rtk-rewrite.sh index 5c8bad02..94718072 100755 --- a/.claude/hooks/rtk-rewrite.sh +++ b/.claude/hooks/rtk-rewrite.sh @@ -195,6 +195,14 @@ elif echo "$MATCH_CMD" | grep -qE '^go[[:space:]]+vet([[:space:]]|$)'; then REWRITTEN="${ENV_PREFIX}$(echo "$CMD_BODY" | sed 's/^go vet/rtk go vet/')" elif echo "$MATCH_CMD" | grep -qE '^golangci-lint([[:space:]]|$)'; then REWRITTEN="${ENV_PREFIX}$(echo "$CMD_BODY" | sed 's/^golangci-lint/rtk golangci-lint/')" + +# --- AWS CLI --- +elif echo "$MATCH_CMD" | grep -qE '^aws[[:space:]]+'; then + REWRITTEN="${ENV_PREFIX}$(echo "$CMD_BODY" | sed 's/^aws /rtk aws /')" + +# --- PostgreSQL --- +elif echo "$MATCH_CMD" | grep -qE '^psql([[:space:]]|$)'; then + REWRITTEN="${ENV_PREFIX}$(echo "$CMD_BODY" | sed 's/^psql/rtk psql/')" fi # If no rewrite needed, approve as-is diff --git a/hooks/rtk-rewrite.sh b/hooks/rtk-rewrite.sh index 59e02caa..3a975b46 100644 --- a/hooks/rtk-rewrite.sh +++ b/hooks/rtk-rewrite.sh @@ -185,6 +185,14 @@ elif echo "$MATCH_CMD" | grep -qE '^go[[:space:]]+vet([[:space:]]|$)'; then REWRITTEN="${ENV_PREFIX}$(echo "$CMD_BODY" | sed 's/^go vet/rtk go vet/')" elif echo "$MATCH_CMD" | grep -qE '^golangci-lint([[:space:]]|$)'; then REWRITTEN="${ENV_PREFIX}$(echo "$CMD_BODY" | sed 's/^golangci-lint/rtk golangci-lint/')" + +# --- AWS CLI --- +elif echo "$MATCH_CMD" | grep -qE '^aws[[:space:]]+'; then + REWRITTEN="${ENV_PREFIX}$(echo "$CMD_BODY" | sed 's/^aws /rtk aws /')" + +# --- PostgreSQL --- +elif echo "$MATCH_CMD" | grep -qE '^psql([[:space:]]|$)'; then + REWRITTEN="${ENV_PREFIX}$(echo "$CMD_BODY" | sed 's/^psql/rtk psql/')" fi # If no rewrite needed, approve as-is diff --git a/src/aws_cmd.rs b/src/aws_cmd.rs new file mode 100644 index 00000000..04b34380 --- /dev/null +++ b/src/aws_cmd.rs @@ -0,0 +1,840 @@ +//! AWS CLI output compression. +//! +//! Replaces verbose `--output table`/`text` with JSON, then compresses. +//! Specialized filters for high-frequency commands (STS, S3, EC2, ECS, RDS, CloudFormation). + +use crate::json_cmd; +use crate::tracking; +use crate::utils::{join_with_overflow, truncate_iso_date}; +use anyhow::{Context, Result}; +use serde_json::Value; +use std::process::Command; + +const MAX_ITEMS: usize = 20; +const JSON_COMPRESS_DEPTH: usize = 4; + +/// Run an AWS CLI command with token-optimized output +pub fn run(subcommand: &str, args: &[String], verbose: u8) -> Result<()> { + // Build the full sub-path: e.g. "sts" + ["get-caller-identity"] -> "sts get-caller-identity" + let full_sub = if args.is_empty() { + subcommand.to_string() + } else { + format!("{} {}", subcommand, args.join(" ")) + }; + + // Route to specialized handlers + match subcommand { + "sts" if !args.is_empty() && args[0] == "get-caller-identity" => { + run_sts_identity(&args[1..], verbose) + } + "s3" if !args.is_empty() && args[0] == "ls" => run_s3_ls(&args[1..], verbose), + "ec2" if !args.is_empty() && args[0] == "describe-instances" => { + run_ec2_describe(&args[1..], verbose) + } + "ecs" if !args.is_empty() && args[0] == "list-services" => { + run_ecs_list_services(&args[1..], verbose) + } + "ecs" if !args.is_empty() && args[0] == "describe-services" => { + run_ecs_describe_services(&args[1..], verbose) + } + "rds" if !args.is_empty() && args[0] == "describe-db-instances" => { + run_rds_describe(&args[1..], verbose) + } + "cloudformation" if !args.is_empty() && args[0] == "list-stacks" => { + run_cfn_list_stacks(&args[1..], verbose) + } + "cloudformation" if !args.is_empty() && args[0] == "describe-stacks" => { + run_cfn_describe_stacks(&args[1..], verbose) + } + _ => run_generic(subcommand, args, verbose, &full_sub), + } +} + +/// Returns true for operations that return structured JSON (describe-*, list-*, get-*). +/// Mutating/transfer operations (s3 cp, s3 sync, s3 mb, etc.) emit plain text progress +/// and do not accept --output json, so we must not inject it for them. +fn is_structured_operation(args: &[String]) -> bool { + let op = args.first().map(|s| s.as_str()).unwrap_or(""); + op.starts_with("describe-") || op.starts_with("list-") || op.starts_with("get-") +} + +/// Generic strategy: force --output json for structured ops, compress via json_cmd schema +fn run_generic(subcommand: &str, args: &[String], verbose: u8, full_sub: &str) -> Result<()> { + let timer = tracking::TimedExecution::start(); + + let mut cmd = Command::new("aws"); + cmd.arg(subcommand); + + let mut has_output_flag = false; + for arg in args { + if arg == "--output" { + has_output_flag = true; + } + cmd.arg(arg); + } + + // Only inject --output json for structured read operations. + // Mutating/transfer operations (s3 cp, s3 sync, s3 mb, cloudformation deploy…) + // emit plain-text progress and reject --output json. + if !has_output_flag && is_structured_operation(args) { + cmd.args(["--output", "json"]); + } + + if verbose > 0 { + eprintln!("Running: aws {}", full_sub); + } + + let output = cmd.output().context("Failed to run aws CLI")?; + let raw = String::from_utf8_lossy(&output.stdout).to_string(); + let stderr = String::from_utf8_lossy(&output.stderr).to_string(); + + if !output.status.success() { + timer.track( + &format!("aws {}", full_sub), + &format!("rtk aws {}", full_sub), + &stderr, + &stderr, + ); + eprintln!("{}", stderr.trim()); + std::process::exit(output.status.code().unwrap_or(1)); + } + + let filtered = match json_cmd::filter_json_string(&raw, JSON_COMPRESS_DEPTH) { + Ok(schema) => { + println!("{}", schema); + schema + } + Err(_) => { + // Fallback: print raw (maybe not JSON) + print!("{}", raw); + raw.clone() + } + }; + + timer.track( + &format!("aws {}", full_sub), + &format!("rtk aws {}", full_sub), + &raw, + &filtered, + ); + + Ok(()) +} + +fn run_aws_json( + sub_args: &[&str], + extra_args: &[String], + verbose: u8, +) -> Result<(String, String, std::process::ExitStatus)> { + let mut cmd = Command::new("aws"); + for arg in sub_args { + cmd.arg(arg); + } + + // Replace --output table/text with --output json + let mut skip_next = false; + for arg in extra_args { + if skip_next { + skip_next = false; + continue; + } + if arg == "--output" { + skip_next = true; + continue; + } + cmd.arg(arg); + } + cmd.args(["--output", "json"]); + + let cmd_desc = format!("aws {}", sub_args.join(" ")); + if verbose > 0 { + eprintln!("Running: {}", cmd_desc); + } + + let output = cmd + .output() + .context(format!("Failed to run {}", cmd_desc))?; + let stdout = String::from_utf8_lossy(&output.stdout).to_string(); + let stderr = String::from_utf8_lossy(&output.stderr).to_string(); + + if !output.status.success() { + eprintln!("{}", stderr.trim()); + } + + Ok((stdout, stderr, output.status)) +} + +fn run_sts_identity(extra_args: &[String], verbose: u8) -> Result<()> { + let timer = tracking::TimedExecution::start(); + let (raw, stderr, status) = run_aws_json(&["sts", "get-caller-identity"], extra_args, verbose)?; + + if !status.success() { + timer.track( + "aws sts get-caller-identity", + "rtk aws sts get-caller-identity", + &stderr, + &stderr, + ); + std::process::exit(status.code().unwrap_or(1)); + } + + let filtered = match filter_sts_identity(&raw) { + Some(f) => f, + None => raw.clone(), + }; + println!("{}", filtered); + + timer.track( + "aws sts get-caller-identity", + "rtk aws sts get-caller-identity", + &raw, + &filtered, + ); + Ok(()) +} + +fn run_s3_ls(extra_args: &[String], verbose: u8) -> Result<()> { + let timer = tracking::TimedExecution::start(); + + // s3 ls doesn't support --output json, run as-is and filter text + let mut cmd = Command::new("aws"); + cmd.args(["s3", "ls"]); + for arg in extra_args { + cmd.arg(arg); + } + + if verbose > 0 { + eprintln!("Running: aws s3 ls {}", extra_args.join(" ")); + } + + let output = cmd.output().context("Failed to run aws s3 ls")?; + let raw = String::from_utf8_lossy(&output.stdout).to_string(); + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr).to_string(); + timer.track("aws s3 ls", "rtk aws s3 ls", &stderr, &stderr); + eprintln!("{}", stderr.trim()); + std::process::exit(output.status.code().unwrap_or(1)); + } + + let filtered = filter_s3_ls(&raw); + println!("{}", filtered); + + timer.track("aws s3 ls", "rtk aws s3 ls", &raw, &filtered); + Ok(()) +} + +fn run_ec2_describe(extra_args: &[String], verbose: u8) -> Result<()> { + let timer = tracking::TimedExecution::start(); + let (raw, stderr, status) = run_aws_json(&["ec2", "describe-instances"], extra_args, verbose)?; + + if !status.success() { + timer.track( + "aws ec2 describe-instances", + "rtk aws ec2 describe-instances", + &stderr, + &stderr, + ); + std::process::exit(status.code().unwrap_or(1)); + } + + let filtered = match filter_ec2_instances(&raw) { + Some(f) => f, + None => raw.clone(), + }; + println!("{}", filtered); + + timer.track( + "aws ec2 describe-instances", + "rtk aws ec2 describe-instances", + &raw, + &filtered, + ); + Ok(()) +} + +fn run_ecs_list_services(extra_args: &[String], verbose: u8) -> Result<()> { + let timer = tracking::TimedExecution::start(); + let (raw, stderr, status) = run_aws_json(&["ecs", "list-services"], extra_args, verbose)?; + + if !status.success() { + timer.track( + "aws ecs list-services", + "rtk aws ecs list-services", + &stderr, + &stderr, + ); + std::process::exit(status.code().unwrap_or(1)); + } + + let filtered = match filter_ecs_list_services(&raw) { + Some(f) => f, + None => raw.clone(), + }; + println!("{}", filtered); + + timer.track( + "aws ecs list-services", + "rtk aws ecs list-services", + &raw, + &filtered, + ); + Ok(()) +} + +fn run_ecs_describe_services(extra_args: &[String], verbose: u8) -> Result<()> { + let timer = tracking::TimedExecution::start(); + let (raw, stderr, status) = run_aws_json(&["ecs", "describe-services"], extra_args, verbose)?; + + if !status.success() { + timer.track( + "aws ecs describe-services", + "rtk aws ecs describe-services", + &stderr, + &stderr, + ); + std::process::exit(status.code().unwrap_or(1)); + } + + let filtered = match filter_ecs_describe_services(&raw) { + Some(f) => f, + None => raw.clone(), + }; + println!("{}", filtered); + + timer.track( + "aws ecs describe-services", + "rtk aws ecs describe-services", + &raw, + &filtered, + ); + Ok(()) +} + +fn run_rds_describe(extra_args: &[String], verbose: u8) -> Result<()> { + let timer = tracking::TimedExecution::start(); + let (raw, stderr, status) = + run_aws_json(&["rds", "describe-db-instances"], extra_args, verbose)?; + + if !status.success() { + timer.track( + "aws rds describe-db-instances", + "rtk aws rds describe-db-instances", + &stderr, + &stderr, + ); + std::process::exit(status.code().unwrap_or(1)); + } + + let filtered = match filter_rds_instances(&raw) { + Some(f) => f, + None => raw.clone(), + }; + println!("{}", filtered); + + timer.track( + "aws rds describe-db-instances", + "rtk aws rds describe-db-instances", + &raw, + &filtered, + ); + Ok(()) +} + +fn run_cfn_list_stacks(extra_args: &[String], verbose: u8) -> Result<()> { + let timer = tracking::TimedExecution::start(); + let (raw, stderr, status) = + run_aws_json(&["cloudformation", "list-stacks"], extra_args, verbose)?; + + if !status.success() { + timer.track( + "aws cloudformation list-stacks", + "rtk aws cloudformation list-stacks", + &stderr, + &stderr, + ); + std::process::exit(status.code().unwrap_or(1)); + } + + let filtered = match filter_cfn_list_stacks(&raw) { + Some(f) => f, + None => raw.clone(), + }; + println!("{}", filtered); + + timer.track( + "aws cloudformation list-stacks", + "rtk aws cloudformation list-stacks", + &raw, + &filtered, + ); + Ok(()) +} + +fn run_cfn_describe_stacks(extra_args: &[String], verbose: u8) -> Result<()> { + let timer = tracking::TimedExecution::start(); + let (raw, stderr, status) = + run_aws_json(&["cloudformation", "describe-stacks"], extra_args, verbose)?; + + if !status.success() { + timer.track( + "aws cloudformation describe-stacks", + "rtk aws cloudformation describe-stacks", + &stderr, + &stderr, + ); + std::process::exit(status.code().unwrap_or(1)); + } + + let filtered = match filter_cfn_describe_stacks(&raw) { + Some(f) => f, + None => raw.clone(), + }; + println!("{}", filtered); + + timer.track( + "aws cloudformation describe-stacks", + "rtk aws cloudformation describe-stacks", + &raw, + &filtered, + ); + Ok(()) +} + +// --- Filter functions (all use serde_json::Value for resilience) --- + +fn filter_sts_identity(json_str: &str) -> Option { + let v: Value = serde_json::from_str(json_str).ok()?; + let account = v["Account"].as_str().unwrap_or("?"); + let arn = v["Arn"].as_str().unwrap_or("?"); + Some(format!("AWS: {} {}", account, arn)) +} + +fn filter_s3_ls(output: &str) -> String { + let lines: Vec<&str> = output.lines().collect(); + let total = lines.len(); + let mut result: Vec<&str> = lines.iter().take(MAX_ITEMS + 10).copied().collect(); + + if total > MAX_ITEMS + 10 { + result.truncate(MAX_ITEMS + 10); + result.push(""); // will be replaced + return format!( + "{}\n... +{} more items", + result[..result.len() - 1].join("\n"), + total - MAX_ITEMS - 10 + ); + } + + result.join("\n") +} + +fn filter_ec2_instances(json_str: &str) -> Option { + let v: Value = serde_json::from_str(json_str).ok()?; + let reservations = v["Reservations"].as_array()?; + + let mut instances: Vec = Vec::new(); + for res in reservations { + if let Some(insts) = res["Instances"].as_array() { + for inst in insts { + let id = inst["InstanceId"].as_str().unwrap_or("?"); + let state = inst["State"]["Name"].as_str().unwrap_or("?"); + let itype = inst["InstanceType"].as_str().unwrap_or("?"); + let ip = inst["PrivateIpAddress"].as_str().unwrap_or("-"); + + // Extract Name tag + let name = inst["Tags"] + .as_array() + .and_then(|tags| tags.iter().find(|t| t["Key"].as_str() == Some("Name"))) + .and_then(|t| t["Value"].as_str()) + .unwrap_or("-"); + + instances.push(format!("{} {} {} {} ({})", id, state, itype, ip, name)); + } + } + } + + let total = instances.len(); + let mut result = format!("EC2: {} instances\n", total); + + for inst in instances.iter().take(MAX_ITEMS) { + result.push_str(&format!(" {}\n", inst)); + } + + if total > MAX_ITEMS { + result.push_str(&format!(" ... +{} more\n", total - MAX_ITEMS)); + } + + Some(result.trim_end().to_string()) +} + +fn filter_ecs_list_services(json_str: &str) -> Option { + let v: Value = serde_json::from_str(json_str).ok()?; + let arns = v["serviceArns"].as_array()?; + + let mut result = Vec::new(); + let total = arns.len(); + + for arn in arns.iter().take(MAX_ITEMS) { + let arn_str = arn.as_str().unwrap_or("?"); + // Extract short name from ARN: arn:aws:ecs:...:service/cluster/name -> name + let short = arn_str.rsplit('/').next().unwrap_or(arn_str); + result.push(short.to_string()); + } + + Some(join_with_overflow(&result, total, MAX_ITEMS, "services")) +} + +fn filter_ecs_describe_services(json_str: &str) -> Option { + let v: Value = serde_json::from_str(json_str).ok()?; + let services = v["services"].as_array()?; + + let mut result = Vec::new(); + let total = services.len(); + + for svc in services.iter().take(MAX_ITEMS) { + let name = svc["serviceName"].as_str().unwrap_or("?"); + let status = svc["status"].as_str().unwrap_or("?"); + let running = svc["runningCount"].as_i64().unwrap_or(0); + let desired = svc["desiredCount"].as_i64().unwrap_or(0); + let launch = svc["launchType"].as_str().unwrap_or("?"); + result.push(format!( + "{} {} {}/{} ({})", + name, status, running, desired, launch + )); + } + + Some(join_with_overflow(&result, total, MAX_ITEMS, "services")) +} + +fn filter_rds_instances(json_str: &str) -> Option { + let v: Value = serde_json::from_str(json_str).ok()?; + let dbs = v["DBInstances"].as_array()?; + + let mut result = Vec::new(); + let total = dbs.len(); + + for db in dbs.iter().take(MAX_ITEMS) { + let name = db["DBInstanceIdentifier"].as_str().unwrap_or("?"); + let engine = db["Engine"].as_str().unwrap_or("?"); + let version = db["EngineVersion"].as_str().unwrap_or("?"); + let class = db["DBInstanceClass"].as_str().unwrap_or("?"); + let status = db["DBInstanceStatus"].as_str().unwrap_or("?"); + result.push(format!( + "{} {} {} {} {}", + name, engine, version, class, status + )); + } + + Some(join_with_overflow(&result, total, MAX_ITEMS, "instances")) +} + +fn filter_cfn_list_stacks(json_str: &str) -> Option { + let v: Value = serde_json::from_str(json_str).ok()?; + let stacks = v["StackSummaries"].as_array()?; + + let mut result = Vec::new(); + let total = stacks.len(); + + for stack in stacks.iter().take(MAX_ITEMS) { + let name = stack["StackName"].as_str().unwrap_or("?"); + let status = stack["StackStatus"].as_str().unwrap_or("?"); + let date = stack["LastUpdatedTime"] + .as_str() + .or_else(|| stack["CreationTime"].as_str()) + .unwrap_or("?"); + result.push(format!("{} {} {}", name, status, truncate_iso_date(date))); + } + + Some(join_with_overflow(&result, total, MAX_ITEMS, "stacks")) +} + +fn filter_cfn_describe_stacks(json_str: &str) -> Option { + let v: Value = serde_json::from_str(json_str).ok()?; + let stacks = v["Stacks"].as_array()?; + + let mut result = Vec::new(); + let total = stacks.len(); + + for stack in stacks.iter().take(MAX_ITEMS) { + let name = stack["StackName"].as_str().unwrap_or("?"); + let status = stack["StackStatus"].as_str().unwrap_or("?"); + let date = stack["LastUpdatedTime"] + .as_str() + .or_else(|| stack["CreationTime"].as_str()) + .unwrap_or("?"); + result.push(format!("{} {} {}", name, status, truncate_iso_date(date))); + + // Show outputs if present + if let Some(outputs) = stack["Outputs"].as_array() { + for out in outputs { + let key = out["OutputKey"].as_str().unwrap_or("?"); + let val = out["OutputValue"].as_str().unwrap_or("?"); + result.push(format!(" {}={}", key, val)); + } + } + } + + Some(join_with_overflow(&result, total, MAX_ITEMS, "stacks")) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_snapshot_sts_identity() { + let json = include_str!("../tests/fixtures/aws_sts_identity.json"); + let result = filter_sts_identity(json).unwrap(); + assert_eq!( + result, + "AWS: 123456789012 arn:aws:iam::123456789012:user/dev-user" + ); + } + + #[test] + fn test_snapshot_ec2_instances() { + let json = include_str!("../tests/fixtures/aws_ec2_describe.json"); + let result = filter_ec2_instances(json).unwrap(); + assert!(result.contains("EC2:")); + assert!(result.contains("i-0a1b2c3d4e5f00001")); + assert!(result.contains("running")); + assert!(result.contains("t3.micro")); + assert!(result.contains("10.0.1.10")); + } + + #[test] + fn test_filter_sts_identity() { + let json = r#"{ + "UserId": "AIDAEXAMPLE", + "Account": "123456789012", + "Arn": "arn:aws:iam::123456789012:user/dev" + }"#; + let result = filter_sts_identity(json).unwrap(); + assert_eq!( + result, + "AWS: 123456789012 arn:aws:iam::123456789012:user/dev" + ); + } + + #[test] + fn test_filter_sts_identity_missing_fields() { + let json = r#"{}"#; + let result = filter_sts_identity(json).unwrap(); + assert_eq!(result, "AWS: ? ?"); + } + + #[test] + fn test_filter_sts_identity_invalid_json() { + let result = filter_sts_identity("not json"); + assert!(result.is_none()); + } + + #[test] + fn test_filter_s3_ls_basic() { + let output = "2024-01-01 bucket1\n2024-01-02 bucket2\n2024-01-03 bucket3\n"; + let result = filter_s3_ls(output); + assert!(result.contains("bucket1")); + assert!(result.contains("bucket3")); + } + + #[test] + fn test_filter_s3_ls_overflow() { + let mut lines = Vec::new(); + for i in 1..=50 { + lines.push(format!("2024-01-01 bucket{}", i)); + } + let input = lines.join("\n"); + let result = filter_s3_ls(&input); + assert!(result.contains("... +20 more items")); + } + + #[test] + fn test_filter_ec2_instances() { + let json = r#"{ + "Reservations": [{ + "Instances": [{ + "InstanceId": "i-abc123", + "State": {"Name": "running"}, + "InstanceType": "t3.micro", + "PrivateIpAddress": "10.0.1.5", + "Tags": [{"Key": "Name", "Value": "web-server"}] + }, { + "InstanceId": "i-def456", + "State": {"Name": "stopped"}, + "InstanceType": "t3.large", + "PrivateIpAddress": "10.0.1.6", + "Tags": [{"Key": "Name", "Value": "worker"}] + }] + }] + }"#; + let result = filter_ec2_instances(json).unwrap(); + assert!(result.contains("EC2: 2 instances")); + assert!(result.contains("i-abc123 running t3.micro 10.0.1.5 (web-server)")); + assert!(result.contains("i-def456 stopped t3.large 10.0.1.6 (worker)")); + } + + #[test] + fn test_filter_ec2_no_name_tag() { + let json = r#"{ + "Reservations": [{ + "Instances": [{ + "InstanceId": "i-abc123", + "State": {"Name": "running"}, + "InstanceType": "t3.micro", + "PrivateIpAddress": "10.0.1.5", + "Tags": [] + }] + }] + }"#; + let result = filter_ec2_instances(json).unwrap(); + assert!(result.contains("(-)")); + } + + #[test] + fn test_filter_ec2_invalid_json() { + assert!(filter_ec2_instances("not json").is_none()); + } + + #[test] + fn test_filter_ecs_list_services() { + let json = r#"{ + "serviceArns": [ + "arn:aws:ecs:us-east-1:123:service/cluster/api-service", + "arn:aws:ecs:us-east-1:123:service/cluster/worker-service" + ] + }"#; + let result = filter_ecs_list_services(json).unwrap(); + assert!(result.contains("api-service")); + assert!(result.contains("worker-service")); + assert!(!result.contains("arn:aws")); + } + + #[test] + fn test_filter_ecs_describe_services() { + let json = r#"{ + "services": [{ + "serviceName": "api", + "status": "ACTIVE", + "runningCount": 3, + "desiredCount": 3, + "launchType": "FARGATE" + }] + }"#; + let result = filter_ecs_describe_services(json).unwrap(); + assert_eq!(result, "api ACTIVE 3/3 (FARGATE)"); + } + + #[test] + fn test_filter_rds_instances() { + let json = r#"{ + "DBInstances": [{ + "DBInstanceIdentifier": "mydb", + "Engine": "postgres", + "EngineVersion": "15.4", + "DBInstanceClass": "db.t3.micro", + "DBInstanceStatus": "available" + }] + }"#; + let result = filter_rds_instances(json).unwrap(); + assert_eq!(result, "mydb postgres 15.4 db.t3.micro available"); + } + + #[test] + fn test_filter_cfn_list_stacks() { + let json = r#"{ + "StackSummaries": [{ + "StackName": "my-stack", + "StackStatus": "CREATE_COMPLETE", + "CreationTime": "2024-01-15T10:30:00Z" + }, { + "StackName": "other-stack", + "StackStatus": "UPDATE_COMPLETE", + "LastUpdatedTime": "2024-02-20T14:00:00Z", + "CreationTime": "2024-01-01T00:00:00Z" + }] + }"#; + let result = filter_cfn_list_stacks(json).unwrap(); + assert!(result.contains("my-stack CREATE_COMPLETE 2024-01-15")); + assert!(result.contains("other-stack UPDATE_COMPLETE 2024-02-20")); + } + + #[test] + fn test_filter_cfn_describe_stacks_with_outputs() { + let json = r#"{ + "Stacks": [{ + "StackName": "my-stack", + "StackStatus": "CREATE_COMPLETE", + "CreationTime": "2024-01-15T10:30:00Z", + "Outputs": [ + {"OutputKey": "ApiUrl", "OutputValue": "https://api.example.com"}, + {"OutputKey": "BucketName", "OutputValue": "my-bucket"} + ] + }] + }"#; + let result = filter_cfn_describe_stacks(json).unwrap(); + assert!(result.contains("my-stack CREATE_COMPLETE 2024-01-15")); + assert!(result.contains("ApiUrl=https://api.example.com")); + assert!(result.contains("BucketName=my-bucket")); + } + + #[test] + fn test_filter_cfn_describe_stacks_no_outputs() { + let json = r#"{ + "Stacks": [{ + "StackName": "my-stack", + "StackStatus": "CREATE_COMPLETE", + "CreationTime": "2024-01-15T10:30:00Z" + }] + }"#; + let result = filter_cfn_describe_stacks(json).unwrap(); + assert!(result.contains("my-stack CREATE_COMPLETE 2024-01-15")); + assert!(!result.contains("=")); + } + + fn count_tokens(text: &str) -> usize { + text.split_whitespace().count() + } + + #[test] + fn test_ec2_token_savings() { + // Uses real-format fixture (multi-field AWS response) to verify ≥60% savings + let json = include_str!("../tests/fixtures/aws_ec2_describe.json"); + let result = filter_ec2_instances(json).unwrap(); + let input_tokens = count_tokens(json); + let output_tokens = count_tokens(&result); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + assert!( + savings >= 60.0, + "EC2 filter: expected >=60% savings, got {:.1}%", + savings + ); + } + + #[test] + fn test_sts_token_savings() { + let json = include_str!("../tests/fixtures/aws_sts_identity.json"); + let result = filter_sts_identity(json).unwrap(); + let input_tokens = count_tokens(json); + let output_tokens = count_tokens(&result); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + assert!( + savings >= 60.0, + "STS identity filter: expected >=60% savings, got {:.1}%", + savings + ); + } + + #[test] + fn test_rds_overflow() { + let mut dbs = Vec::new(); + for i in 1..=25 { + dbs.push(format!( + r#"{{"DBInstanceIdentifier": "db-{}", "Engine": "postgres", "EngineVersion": "15.4", "DBInstanceClass": "db.t3.micro", "DBInstanceStatus": "available"}}"#, + i + )); + } + let json = format!(r#"{{"DBInstances": [{}]}}"#, dbs.join(",")); + let result = filter_rds_instances(&json).unwrap(); + assert!(result.contains("... +5 more instances")); + } +} diff --git a/src/main.rs b/src/main.rs index fcb39303..cd2830cc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,3 +1,4 @@ +mod aws_cmd; mod cargo_cmd; mod cc_economics; mod ccusage; @@ -34,6 +35,7 @@ mod playwright_cmd; mod pnpm_cmd; mod prettier_cmd; mod prisma_cmd; +mod psql_cmd; mod pytest_cmd; mod read; mod ruff_cmd; @@ -135,6 +137,22 @@ enum Commands { args: Vec, }, + /// AWS CLI with compact output (force JSON, compress) + Aws { + /// AWS service subcommand (e.g., sts, s3, ec2, ecs, rds, cloudformation) + subcommand: String, + /// Additional arguments + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, + }, + + /// PostgreSQL client with compact output (strip borders, compress tables) + Psql { + /// psql arguments + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, + }, + /// pnpm commands with ultra-compact output Pnpm { #[command(subcommand)] @@ -941,6 +959,14 @@ fn main() -> Result<()> { gh_cmd::run(&subcommand, &args, cli.verbose, cli.ultra_compact)?; } + Commands::Aws { subcommand, args } => { + aws_cmd::run(&subcommand, &args, cli.verbose)?; + } + + Commands::Psql { args } => { + psql_cmd::run(&args, cli.verbose)?; + } + Commands::Pnpm { command } => match command { PnpmCommands::List { depth, args } => { pnpm_cmd::run(pnpm_cmd::PnpmCommand::List { depth }, &args, cli.verbose)?; diff --git a/src/psql_cmd.rs b/src/psql_cmd.rs new file mode 100644 index 00000000..c00a7305 --- /dev/null +++ b/src/psql_cmd.rs @@ -0,0 +1,388 @@ +//! PostgreSQL client (psql) output compression. +//! +//! Detects table and expanded display formats, strips borders/padding, +//! and produces compact tab-separated or key=value output. + +use crate::tracking; +use anyhow::{Context, Result}; +use lazy_static::lazy_static; +use regex::Regex; + +const MAX_TABLE_ROWS: usize = 30; +const MAX_EXPANDED_RECORDS: usize = 20; + +lazy_static! { + static ref EXPANDED_RECORD: Regex = Regex::new(r"-\[ RECORD \d+ \]-").unwrap(); + static ref SEPARATOR: Regex = Regex::new(r"^[-+]+$").unwrap(); + static ref ROW_COUNT: Regex = Regex::new(r"^\(\d+ rows?\)$").unwrap(); + static ref RECORD_HEADER: Regex = Regex::new(r"^-\[ RECORD (\d+) \]-").unwrap(); +} + +pub fn run(args: &[String], verbose: u8) -> Result<()> { + let timer = tracking::TimedExecution::start(); + + let mut cmd = std::process::Command::new("psql"); + for arg in args { + cmd.arg(arg); + } + + if verbose > 0 { + eprintln!("Running: psql {}", args.join(" ")); + } + + let output = cmd + .output() + .context("Failed to run psql (is PostgreSQL client installed?)")?; + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + let exit_code = output.status.code().unwrap_or(1); + + if !stderr.is_empty() { + eprint!("{}", stderr); + } + + if exit_code != 0 { + std::process::exit(exit_code); + } + + let filtered = filter_psql_output(&stdout); + + if let Some(hint) = crate::tee::tee_and_hint(&stdout, "psql", exit_code) { + println!("{}\n{}", filtered, hint); + } else { + println!("{}", filtered); + } + + timer.track( + &format!("psql {}", args.join(" ")), + &format!("rtk psql {}", args.join(" ")), + &stdout, + &filtered, + ); + + Ok(()) +} + +fn filter_psql_output(output: &str) -> String { + if output.trim().is_empty() { + return String::new(); + } + + if is_expanded_format(output) { + filter_expanded(output) + } else if is_table_format(output) { + filter_table(output) + } else { + // Passthrough: COPY results, notices, etc. + output.to_string() + } +} + +fn is_table_format(output: &str) -> bool { + output.lines().any(|line| { + let trimmed = line.trim(); + trimmed.contains("-+-") || trimmed.contains("---+---") + }) +} + +fn is_expanded_format(output: &str) -> bool { + EXPANDED_RECORD.is_match(output) +} + +/// Filter psql table format: +/// - Strip separator lines (----+----) +/// - Strip (N rows) footer +/// - Trim column padding +/// - Output tab-separated +fn filter_table(output: &str) -> String { + let mut result = Vec::new(); + let mut data_rows = 0; + let mut total_rows = 0; + + for line in output.lines() { + let trimmed = line.trim(); + + // Skip separator lines + if SEPARATOR.is_match(trimmed) { + continue; + } + + // Skip row count footer + if ROW_COUNT.is_match(trimmed) { + continue; + } + + // Skip empty lines + if trimmed.is_empty() { + continue; + } + + // This is a data or header row with | delimiters + if trimmed.contains('|') { + total_rows += 1; + // First row is header, don't count it as data + if total_rows > 1 { + data_rows += 1; + } + + if data_rows <= MAX_TABLE_ROWS || total_rows == 1 { + let cols: Vec<&str> = trimmed.split('|').map(|c| c.trim()).collect(); + result.push(cols.join("\t")); + } + } else { + // Non-table line (e.g., command output like SET, NOTICE) + result.push(trimmed.to_string()); + } + } + + if data_rows > MAX_TABLE_ROWS { + result.push(format!("... +{} more rows", data_rows - MAX_TABLE_ROWS)); + } + + result.join("\n") +} + +/// Filter psql expanded format: +/// Convert -[ RECORD N ]- blocks to one-liner key=val format +fn filter_expanded(output: &str) -> String { + let mut result = Vec::new(); + let mut current_pairs: Vec = Vec::new(); + let mut current_record: Option = None; + let mut record_count = 0; + + for line in output.lines() { + let trimmed = line.trim(); + + if ROW_COUNT.is_match(trimmed) { + continue; + } + + if let Some(caps) = RECORD_HEADER.captures(trimmed) { + // Flush previous record + if let Some(rec) = current_record.take() { + if record_count <= MAX_EXPANDED_RECORDS { + result.push(format!("{} {}", rec, current_pairs.join(" "))); + } + current_pairs.clear(); + } + record_count += 1; + current_record = Some(format!("[{}]", &caps[1])); + } else if trimmed.contains('|') && current_record.is_some() { + // key | value line + let parts: Vec<&str> = trimmed.splitn(2, '|').collect(); + if parts.len() == 2 { + let key = parts[0].trim(); + let val = parts[1].trim(); + current_pairs.push(format!("{}={}", key, val)); + } + } else if trimmed.is_empty() { + continue; + } else if current_record.is_none() { + // Non-record line before any record (notices, etc.) + result.push(trimmed.to_string()); + } + } + + // Flush last record + if let Some(rec) = current_record.take() { + if record_count <= MAX_EXPANDED_RECORDS { + result.push(format!("{} {}", rec, current_pairs.join(" "))); + } + } + + if record_count > MAX_EXPANDED_RECORDS { + result.push(format!( + "... +{} more records", + record_count - MAX_EXPANDED_RECORDS + )); + } + + result.join("\n") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_snapshot_table_format() { + let input = include_str!("../tests/fixtures/psql_table.txt"); + let result = filter_table(input); + assert!(result.contains("id\tusername\temail\tstatus")); + assert!(result.contains("alice_smith\talice@example.com")); + assert!(!result.contains("---+---")); + assert!(!result.contains("(20 rows)")); + } + + #[test] + fn test_snapshot_expanded_format() { + let input = include_str!("../tests/fixtures/psql_expanded.txt"); + let result = filter_expanded(input); + assert!(result.contains("[1] id=1 username=alice_smith")); + assert!(result.contains("[2] id=2 username=bob_jones")); + assert!(!result.contains("-[ RECORD")); + assert!(!result.contains("(5 rows)")); + } + + #[test] + fn test_is_table_format_detects_separator() { + let input = " id | name\n----+------\n 1 | foo\n(1 row)\n"; + assert!(is_table_format(input)); + } + + #[test] + fn test_is_table_format_rejects_plain() { + assert!(!is_table_format("COPY 5\n")); + assert!(!is_table_format("SET\n")); + } + + #[test] + fn test_is_expanded_format_detects_records() { + let input = "-[ RECORD 1 ]----\nid | 1\nname | foo\n"; + assert!(is_expanded_format(input)); + } + + #[test] + fn test_is_expanded_format_rejects_table() { + let input = " id | name\n----+------\n 1 | foo\n"; + assert!(!is_expanded_format(input)); + } + + #[test] + fn test_filter_table_basic() { + let input = " id | name | email\n----+-------+---------\n 1 | alice | a@b.com\n 2 | bob | b@b.com\n(2 rows)\n"; + let result = filter_table(input); + assert!(result.contains("id\tname\temail")); + assert!(result.contains("1\talice\ta@b.com")); + assert!(result.contains("2\tbob\tb@b.com")); + assert!(!result.contains("----")); + assert!(!result.contains("(2 rows)")); + } + + #[test] + fn test_filter_table_overflow() { + let mut lines = vec![" id | val".to_string(), "----+-----".to_string()]; + for i in 1..=40 { + lines.push(format!(" {} | row{}", i, i)); + } + lines.push("(40 rows)".to_string()); + let input = lines.join("\n"); + + let result = filter_table(&input); + assert!(result.contains("... +10 more rows")); + // Header + 30 data rows + overflow line + let result_lines: Vec<&str> = result.lines().collect(); + assert_eq!(result_lines.len(), 32); // 1 header + 30 data + 1 overflow + } + + #[test] + fn test_filter_table_empty() { + let result = filter_psql_output(""); + assert!(result.is_empty()); + } + + #[test] + fn test_filter_expanded_basic() { + let input = "\ +-[ RECORD 1 ]---- +id | 1 +name | alice +-[ RECORD 2 ]---- +id | 2 +name | bob +"; + let result = filter_expanded(input); + assert!(result.contains("[1] id=1 name=alice")); + assert!(result.contains("[2] id=2 name=bob")); + } + + #[test] + fn test_filter_expanded_overflow() { + let mut lines = Vec::new(); + for i in 1..=25 { + lines.push(format!("-[ RECORD {} ]----", i)); + lines.push(format!("id | {}", i)); + lines.push(format!("name | user{}", i)); + } + let input = lines.join("\n"); + + let result = filter_expanded(&input); + assert!(result.contains("... +5 more records")); + } + + #[test] + fn test_filter_psql_passthrough() { + let input = "COPY 5\n"; + let result = filter_psql_output(input); + assert_eq!(result, "COPY 5\n"); + } + + #[test] + fn test_filter_psql_routes_to_table() { + let input = " id | name\n----+------\n 1 | foo\n(1 row)\n"; + let result = filter_psql_output(input); + assert!(result.contains("id\tname")); + assert!(!result.contains("----")); + } + + #[test] + fn test_filter_psql_routes_to_expanded() { + let input = "-[ RECORD 1 ]----\nid | 1\nname | foo\n"; + let result = filter_psql_output(input); + assert!(result.contains("[1]")); + assert!(result.contains("id=1")); + } + + #[test] + fn test_filter_table_strips_row_count() { + let input = " c\n---\n 1\n(1 row)\n"; + let result = filter_table(input); + assert!(!result.contains("(1 row)")); + } + + #[test] + fn test_filter_expanded_strips_row_count() { + let input = "-[ RECORD 1 ]----\nid | 1\n(1 row)\n"; + let result = filter_expanded(input); + assert!(!result.contains("(1 row)")); + } + + fn count_tokens(text: &str) -> usize { + text.split_whitespace().count() + } + + #[test] + fn test_table_token_savings() { + // Uses real-format fixture (wide padded table). + // Table-to-tab conversion saves ~40% (removes `|` delimiters, separator lines, footer). + // 60%+ is not achievable by tab conversion alone: `|` tokens represent at most ~47% of + // input tokens regardless of column count. Expanded format achieves 60%+ by compressing + // verbose `-[ RECORD N ]` headers + field label padding. + let input = include_str!("../tests/fixtures/psql_table.txt"); + let result = filter_table(input); + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&result); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + assert!( + savings >= 40.0, + "Table filter: expected >=40% savings, got {:.1}%", + savings + ); + } + + #[test] + fn test_expanded_token_savings() { + // Uses real-format fixture (expanded display with many fields) to verify ≥60% savings + let input = include_str!("../tests/fixtures/psql_expanded.txt"); + let result = filter_expanded(input); + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&result); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + assert!( + savings >= 60.0, + "Expanded filter: expected >=60% savings, got {:.1}%", + savings + ); + } +} diff --git a/src/utils.rs b/src/utils.rs index 6ea0698f..6806b814 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -155,6 +155,40 @@ pub fn format_cpt(cpt: f64) -> String { format!("${:.2}/MTok", cpt_per_million) } +/// Join items into a newline-separated string, appending an overflow hint when total > max. +/// +/// # Examples +/// ``` +/// use rtk::utils::join_with_overflow; +/// let items = vec!["a".to_string(), "b".to_string()]; +/// assert_eq!(join_with_overflow(&items, 5, 3, "items"), "a\nb\n... +2 more items"); +/// assert_eq!(join_with_overflow(&items, 2, 3, "items"), "a\nb"); +/// ``` +pub fn join_with_overflow(items: &[String], total: usize, max: usize, label: &str) -> String { + let mut out = items.join("\n"); + if total > max { + out.push_str(&format!("\n... +{} more {}", total - max, label)); + } + out +} + +/// Truncate an ISO 8601 datetime string to just the date portion (first 10 chars). +/// +/// # Examples +/// ``` +/// use rtk::utils::truncate_iso_date; +/// assert_eq!(truncate_iso_date("2024-01-15T10:30:00Z"), "2024-01-15"); +/// assert_eq!(truncate_iso_date("2024-01-15"), "2024-01-15"); +/// assert_eq!(truncate_iso_date("short"), "short"); +/// ``` +pub fn truncate_iso_date(date: &str) -> &str { + if date.len() >= 10 { + &date[..10] + } else { + date + } +} + /// Format a confirmation message: "ok \ \" /// Used for write operations (merge, create, comment, edit, etc.) /// diff --git a/tests/fixtures/aws_ec2_describe.json b/tests/fixtures/aws_ec2_describe.json new file mode 100644 index 00000000..5cb9944c --- /dev/null +++ b/tests/fixtures/aws_ec2_describe.json @@ -0,0 +1,179 @@ +{ + "Reservations": [ + { + "ReservationId": "r-0a1b2c3d4e5f00001", + "OwnerId": "123456789012", + "Groups": [], + "Instances": [ + { + "InstanceId": "i-0a1b2c3d4e5f00001", + "ImageId": "ami-0abcdef1234567890", + "InstanceType": "t3.micro", + "KeyName": "my-key-pair", + "LaunchTime": "2024-01-15T10:30:00+00:00", + "Placement": { + "AvailabilityZone": "us-east-1a", + "GroupName": "", + "Tenancy": "default" + }, + "PrivateDnsName": "ip-10-0-1-10.ec2.internal", + "PrivateIpAddress": "10.0.1.10", + "PublicDnsName": "ec2-54-0-0-10.compute-1.amazonaws.com", + "PublicIpAddress": "54.0.0.10", + "State": { + "Code": 16, + "Name": "running" + }, + "SubnetId": "subnet-0abc123def456001", + "VpcId": "vpc-0abc123def456001", + "Architecture": "x86_64", + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/xvda", + "Ebs": { + "AttachTime": "2024-01-15T10:30:05+00:00", + "DeleteOnTermination": true, + "Status": "attached", + "VolumeId": "vol-0abc123def456001" + } + } + ], + "EbsOptimized": false, + "EnaSupport": true, + "Hypervisor": "xen", + "NetworkInterfaces": [ + { + "NetworkInterfaceId": "eni-0abc123def456001", + "PrivateIpAddress": "10.0.1.10", + "PrivateDnsName": "ip-10-0-1-10.ec2.internal", + "Status": "in-use", + "SubnetId": "subnet-0abc123def456001", + "VpcId": "vpc-0abc123def456001" + } + ], + "RootDeviceName": "/dev/xvda", + "RootDeviceType": "ebs", + "SecurityGroups": [ + { + "GroupId": "sg-0abc123def456001", + "GroupName": "web-server-sg" + } + ], + "SourceDestCheck": true, + "Tags": [ + {"Key": "Name", "Value": "web-server-1"}, + {"Key": "Environment", "Value": "production"}, + {"Key": "Team", "Value": "backend"} + ], + "VirtualizationType": "hvm", + "CpuOptions": { + "CoreCount": 1, + "ThreadsPerCore": 2 + }, + "CapacityReservationSpecification": { + "CapacityReservationPreference": "open" + }, + "HibernationOptions": { + "Configured": false + }, + "MetadataOptions": { + "State": "applied", + "HttpTokens": "required", + "HttpPutResponseHopLimit": 2, + "HttpEndpoint": "enabled" + }, + "EnclaveOptions": { + "Enabled": false + }, + "PlatformDetails": "Linux/UNIX", + "UsageOperation": "RunInstances", + "UsageOperationUpdateTime": "2024-01-15T10:30:00+00:00", + "PrivateDnsNameOptions": { + "HostnameType": "ip-name", + "EnableResourceNameDnsARecord": false, + "EnableResourceNameDnsAAAARecord": false + } + }, + { + "InstanceId": "i-0a1b2c3d4e5f00002", + "ImageId": "ami-0abcdef1234567890", + "InstanceType": "t3.large", + "KeyName": "my-key-pair", + "LaunchTime": "2024-01-16T08:00:00+00:00", + "Placement": { + "AvailabilityZone": "us-east-1b", + "GroupName": "", + "Tenancy": "default" + }, + "PrivateDnsName": "ip-10-0-2-20.ec2.internal", + "PrivateIpAddress": "10.0.2.20", + "PublicDnsName": "", + "State": { + "Code": 80, + "Name": "stopped" + }, + "SubnetId": "subnet-0abc123def456002", + "VpcId": "vpc-0abc123def456001", + "Architecture": "x86_64", + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/xvda", + "Ebs": { + "AttachTime": "2024-01-16T08:00:05+00:00", + "DeleteOnTermination": true, + "Status": "attached", + "VolumeId": "vol-0abc123def456002" + } + } + ], + "EbsOptimized": true, + "EnaSupport": true, + "Hypervisor": "xen", + "NetworkInterfaces": [], + "RootDeviceName": "/dev/xvda", + "RootDeviceType": "ebs", + "SecurityGroups": [ + { + "GroupId": "sg-0abc123def456002", + "GroupName": "worker-sg" + } + ], + "SourceDestCheck": true, + "Tags": [ + {"Key": "Name", "Value": "worker-1"}, + {"Key": "Environment", "Value": "production"}, + {"Key": "Team", "Value": "data"} + ], + "VirtualizationType": "hvm", + "CpuOptions": { + "CoreCount": 2, + "ThreadsPerCore": 2 + }, + "CapacityReservationSpecification": { + "CapacityReservationPreference": "open" + }, + "HibernationOptions": { + "Configured": false + }, + "MetadataOptions": { + "State": "applied", + "HttpTokens": "required", + "HttpPutResponseHopLimit": 2, + "HttpEndpoint": "enabled" + }, + "EnclaveOptions": { + "Enabled": false + }, + "PlatformDetails": "Linux/UNIX", + "UsageOperation": "RunInstances", + "UsageOperationUpdateTime": "2024-01-16T08:00:00+00:00", + "PrivateDnsNameOptions": { + "HostnameType": "ip-name", + "EnableResourceNameDnsARecord": false, + "EnableResourceNameDnsAAAARecord": false + } + } + ] + } + ] +} diff --git a/tests/fixtures/aws_sts_identity.json b/tests/fixtures/aws_sts_identity.json new file mode 100644 index 00000000..1078e6c6 --- /dev/null +++ b/tests/fixtures/aws_sts_identity.json @@ -0,0 +1,5 @@ +{ + "UserId": "AIDAEXAMPLEUSERID1234", + "Account": "123456789012", + "Arn": "arn:aws:iam::123456789012:user/dev-user" +} diff --git a/tests/fixtures/psql_expanded.txt b/tests/fixtures/psql_expanded.txt new file mode 100644 index 00000000..95cc35b8 --- /dev/null +++ b/tests/fixtures/psql_expanded.txt @@ -0,0 +1,56 @@ +-[ RECORD 1 ]------------------------------- +id | 1 +username | alice_smith +email | alice@example.com +status | active +role | admin +created_at | 2024-01-01 09:00:00 +updated_at | 2024-01-15 14:30:00 +last_login | 2024-02-01 08:00:00 +login_count | 42 +preferences | {"theme":"dark","notifications":true} +-[ RECORD 2 ]------------------------------- +id | 2 +username | bob_jones +email | bob.jones@company.org +status | active +role | user +created_at | 2024-01-02 10:15:00 +updated_at | 2024-01-16 09:00:00 +last_login | 2024-02-02 09:30:00 +login_count | 17 +preferences | {"theme":"light","notifications":false} +-[ RECORD 3 ]------------------------------- +id | 3 +username | carol_white +email | carol.white@example.com +status | inactive +role | user +created_at | 2024-01-03 11:30:00 +updated_at | 2024-01-17 11:00:00 +last_login | 2024-01-20 10:00:00 +login_count | 5 +preferences | {"theme":"light","notifications":true} +-[ RECORD 4 ]------------------------------- +id | 4 +username | dave_brown +email | dave@business.net +status | active +role | moderator +created_at | 2024-01-04 08:45:00 +updated_at | 2024-01-18 16:00:00 +last_login | 2024-02-03 11:00:00 +login_count | 89 +preferences | {"theme":"dark","notifications":true} +-[ RECORD 5 ]------------------------------- +id | 5 +username | eve_davis +email | eve.davis@example.com +status | active +role | user +created_at | 2024-01-05 13:00:00 +updated_at | 2024-01-19 10:30:00 +last_login | 2024-02-04 14:00:00 +login_count | 31 +preferences | {"theme":"system","notifications":false} +(5 rows) diff --git a/tests/fixtures/psql_table.txt b/tests/fixtures/psql_table.txt new file mode 100644 index 00000000..7d5db4d6 --- /dev/null +++ b/tests/fixtures/psql_table.txt @@ -0,0 +1,23 @@ + id | username | email | status | created_at | updated_at | role +-------------+-------------------+--------------------------------+-----------+---------------------+---------------------+------------ + 1 | alice_smith | alice@example.com | active | 2024-01-01 09:00:00 | 2024-01-15 14:30:00 | admin + 2 | bob_jones | bob.jones@company.org | active | 2024-01-02 10:15:00 | 2024-01-16 09:00:00 | user + 3 | carol_white | carol.white@example.com | inactive | 2024-01-03 11:30:00 | 2024-01-17 11:00:00 | user + 4 | dave_brown | dave@business.net | active | 2024-01-04 08:45:00 | 2024-01-18 16:00:00 | moderator + 5 | eve_davis | eve.davis@example.com | active | 2024-01-05 13:00:00 | 2024-01-19 10:30:00 | user + 6 | frank_miller | frank.miller@company.org | suspended | 2024-01-06 14:15:00 | 2024-01-20 08:00:00 | user + 7 | grace_wilson | grace@example.com | active | 2024-01-07 15:30:00 | 2024-01-21 12:00:00 | user + 8 | henry_taylor | henry.taylor@business.net | active | 2024-01-08 09:45:00 | 2024-01-22 15:30:00 | admin + 9 | iris_anderson | iris.anderson@example.com | inactive | 2024-01-09 10:00:00 | 2024-01-23 09:00:00 | user + 10 | jack_thomas | jack@company.org | active | 2024-01-10 11:15:00 | 2024-01-24 14:00:00 | user + 11 | kate_jackson | kate.jackson@example.com | active | 2024-01-11 12:30:00 | 2024-01-25 10:00:00 | moderator + 12 | liam_white | liam.white@business.net | active | 2024-01-12 13:45:00 | 2024-01-26 11:00:00 | user + 13 | mia_harris | mia@example.com | active | 2024-01-13 14:00:00 | 2024-01-27 12:00:00 | user + 14 | noah_martin | noah.martin@company.org | inactive | 2024-01-14 15:15:00 | 2024-01-28 13:00:00 | user + 15 | olivia_garcia | olivia.garcia@example.com | active | 2024-01-15 08:30:00 | 2024-01-29 14:00:00 | user + 16 | peter_martinez | peter@business.net | active | 2024-01-16 09:45:00 | 2024-01-30 15:00:00 | admin + 17 | quinn_robinson | quinn.robinson@example.com | active | 2024-01-17 10:00:00 | 2024-01-31 08:00:00 | user + 18 | rachel_clark | rachel.clark@company.org | suspended | 2024-01-18 11:15:00 | 2024-02-01 09:00:00 | user + 19 | sam_rodriguez | sam@example.com | active | 2024-01-19 12:30:00 | 2024-02-02 10:00:00 | user + 20 | tara_lewis | tara.lewis@business.net | active | 2024-01-20 13:45:00 | 2024-02-03 11:00:00 | moderator +(20 rows)