Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,12 @@ We've all been there – a deployment script fails because of a temporary networ

- **Strategy-based interface** – Choose the right backoff strategy for your use case
- **HTTP-aware patience** – Respects `Retry-After` headers and server timing hints
- **11 backoff strategies** – From simple fixed delays to mathematical proactive rate limiting
- **10 backoff strategies** – From simple fixed delays to mathematical proactive rate limiting
- **Diophantine strategy** – Mathematical proactive rate limiting with multi-instance coordination
- **Intelligent pattern matching** – Define success/failure based on output patterns, not just exit codes
- **Timeout protection** – Prevent commands from hanging indefinitely
- **Preserves behavior** – Your command's output and exit codes work exactly as expected
- **Zero dependencies** – Single binary that works anywhere
- **Minimal dependencies** – Single binary with vendored dependencies
- **Unix Socket Daemon** – Real daemon server with JSON protocol for multi-instance coordination
- **Metrics Daemon (Optional)** – Collect and visualize patience metrics with the [`patienced` daemon](DAEMON.md)

Expand Down
118 changes: 90 additions & 28 deletions pkg/backoff/http_aware.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,52 +129,114 @@ func (h *HTTPAware) parseRateLimitHeaders(output string) time.Duration {

// parseJSONResponse extracts retry timing from JSON response bodies
func (h *HTTPAware) parseJSONResponse(output string) time.Duration {
// Limit search to first 10KB to avoid processing huge outputs
const maxSearchSize = 10 * 1024
if len(output) > maxSearchSize {
output = output[:maxSearchSize]
}

// Look for JSON-like content
if !strings.Contains(output, "{") {
return 0
}

// Try to find JSON in the output
start := strings.Index(output, "{")
if start == -1 {
return 0
}
// Try to find balanced JSON objects and parse each one
// This is more robust than first-{ to last-} which can span unrelated content
retryFields := []string{"retry_after", "retry_after_seconds", "retryAfter", "retryAfterSeconds", "retry_in"}

// Find the end of the JSON (simple heuristic)
end := strings.LastIndex(output, "}")
if end == -1 || end <= start {
return 0
}
start := 0
for {
// Find next potential JSON start
jsonStart := strings.Index(output[start:], "{")
if jsonStart == -1 {
break
}
jsonStart += start

jsonStr := output[start : end+1]
// Find matching closing brace using brace counting
jsonEnd := findMatchingBrace(output, jsonStart)
if jsonEnd == -1 {
start = jsonStart + 1
continue
}

// Parse JSON and look for retry timing fields
var data map[string]interface{}
if err := json.Unmarshal([]byte(jsonStr), &data); err != nil {
return 0
}
jsonStr := output[jsonStart : jsonEnd+1]

// Check common retry timing field names
retryFields := []string{"retry_after", "retry_after_seconds", "retryAfter", "retryAfterSeconds"}
// Try to parse this JSON object
var data map[string]interface{}
if err := json.Unmarshal([]byte(jsonStr), &data); err != nil {
start = jsonStart + 1
continue
}

for _, field := range retryFields {
if value, exists := data[field]; exists {
switch v := value.(type) {
case float64:
return time.Duration(v) * time.Second
case int:
return time.Duration(v) * time.Second
case string:
if seconds, err := strconv.Atoi(v); err == nil {
return time.Duration(seconds) * time.Second
// Check for retry timing fields in this JSON object
for _, field := range retryFields {
if value, exists := data[field]; exists {
switch v := value.(type) {
case float64:
return time.Duration(v) * time.Second
case int:
return time.Duration(v) * time.Second
case string:
if seconds, err := strconv.Atoi(v); err == nil {
return time.Duration(seconds) * time.Second
}
}
}
}

// This JSON didn't have retry fields, try next one
start = jsonEnd + 1
}

return 0
}

// findMatchingBrace finds the index of the closing brace that matches the opening brace at start
func findMatchingBrace(s string, start int) int {
if start >= len(s) || s[start] != '{' {
return -1
}

depth := 0
inString := false
escaped := false

for i := start; i < len(s); i++ {
c := s[i]

if escaped {
escaped = false
continue
}

if c == '\\' && inString {
escaped = true
continue
}

if c == '"' {
inString = !inString
continue
}

if inString {
continue
}

if c == '{' {
depth++
} else if c == '}' {
depth--
if depth == 0 {
return i
}
}
}

return -1 // No matching brace found
}

// capDelay applies the maximum delay cap
func (h *HTTPAware) capDelay(delay time.Duration) time.Duration {
if h.maxRetryAfter > 0 && delay > h.maxRetryAfter {
Expand Down
8 changes: 6 additions & 2 deletions pkg/daemon/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,14 +113,18 @@ func (d *Daemon) Start() error {
}

// Create Unix domain socket listener
// NOTE: This duplicates functionality from UnixServer. Consider refactoring
// to use UnixServer for socket handling to reduce code duplication.
listener, err := net.Listen("unix", d.config.SocketPath)
if err != nil {
return fmt.Errorf("failed to create socket listener: %w", err)
}
d.listener = listener

// Set socket permissions
if err := os.Chmod(d.config.SocketPath, 0666); err != nil {
// Set socket permissions - restrict to owner only for security
// Using 0600 ensures only the daemon owner can read/write to the socket,
// preventing unauthorized users from sending metrics or interfering with rate limiting
if err := os.Chmod(d.config.SocketPath, 0600); err != nil {
d.logger.Warn("failed to set socket permissions", "error", err)
}

Expand Down
10 changes: 5 additions & 5 deletions pkg/executor/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,11 +82,11 @@ func (r *SystemCommandRunner) RunWithOutputAndContext(ctx context.Context, comma
// Process cleanup improvement: Set process group for better signal handling
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}

// Network timeout reliability: Set environment variables for faster DNS resolution
cmd.Env = append(os.Environ(),
"CURL_CA_BUNDLE=", // Disable CA bundle lookup for faster curl operations
"CURL_TIMEOUT=10", // Set curl-specific timeout
)
// Inherit parent environment without modifications
// Note: Previously set CURL_CA_BUNDLE="" which disabled TLS certificate validation,
// creating a security vulnerability. Users should configure curl timeouts explicitly
// via command arguments if needed (e.g., curl --connect-timeout 10)
cmd.Env = os.Environ()

// Capture stdout and stderr while also forwarding to terminal
// Use limited buffers for large outputs
Expand Down