diff --git a/backend/cmd/server/wire.go b/backend/cmd/server/wire.go index 5ef04a66b..fac74ea8e 100644 --- a/backend/cmd/server/wire.go +++ b/backend/cmd/server/wire.go @@ -69,6 +69,7 @@ func provideCleanup( opsScheduledReport *service.OpsScheduledReportService, schedulerSnapshot *service.SchedulerSnapshotService, tokenRefresh *service.TokenRefreshService, + oauthProbe *service.OAuthProbeService, accountExpiry *service.AccountExpiryService, usageCleanup *service.UsageCleanupService, pricing *service.PricingService, @@ -134,6 +135,12 @@ func provideCleanup( tokenRefresh.Stop() return nil }}, + {"OAuthProbeService", func() error { + if oauthProbe != nil { + oauthProbe.Stop() + } + return nil + }}, {"AccountExpiryService", func() error { accountExpiry.Stop() return nil diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 7b22a31ef..f94ebc227 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -113,10 +113,11 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { identityCache := repository.NewIdentityCache(redisClient) accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher, geminiQuotaService, antigravityQuotaFetcher, usageCache, identityCache) geminiTokenProvider := service.NewGeminiTokenProvider(accountRepository, geminiTokenCache, geminiOAuthService) + openAITokenProvider := service.NewOpenAITokenProvider(accountRepository, geminiTokenCache, openAIOAuthService) gatewayCache := repository.NewGatewayCache(redisClient) antigravityTokenProvider := service.NewAntigravityTokenProvider(accountRepository, geminiTokenCache, antigravityOAuthService) antigravityGatewayService := service.NewAntigravityGatewayService(accountRepository, gatewayCache, antigravityTokenProvider, rateLimitService, httpUpstream, settingService) - accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, antigravityGatewayService, httpUpstream, configConfig) + accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, openAITokenProvider, antigravityGatewayService, accountUsageService, httpUpstream, configConfig) concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig) concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig) crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig) @@ -141,8 +142,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { identityService := service.NewIdentityService(identityCache) deferredService := service.ProvideDeferredService(accountRepository, timingWheelService) claudeTokenProvider := service.NewClaudeTokenProvider(accountRepository, geminiTokenCache, oAuthService) - gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService, claudeTokenProvider, sessionLimitCache) - openAITokenProvider := service.NewOpenAITokenProvider(accountRepository, geminiTokenCache, openAIOAuthService) + gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, accountUsageService, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService, claudeTokenProvider, sessionLimitCache) openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService, openAITokenProvider) geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, schedulerSnapshotService, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService, configConfig) opsService := service.NewOpsService(opsRepository, settingRepository, configConfig, accountRepository, concurrencyService, gatewayService, openAIGatewayService, geminiMessagesCompatService, antigravityGatewayService) @@ -177,8 +177,9 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig) opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig) tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, configConfig) + oAuthProbeService := service.ProvideOAuthProbeService(accountRepository, httpUpstream, openAITokenProvider, geminiTokenProvider, claudeTokenProvider, accountUsageService, configConfig) accountExpiryService := service.ProvideAccountExpiryService(accountRepository) - v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, usageCleanupService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService) + v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, schedulerSnapshotService, tokenRefreshService, oAuthProbeService, accountExpiryService, usageCleanupService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService) application := &Application{ Server: httpServer, Cleanup: v, @@ -210,6 +211,7 @@ func provideCleanup( opsScheduledReport *service.OpsScheduledReportService, schedulerSnapshot *service.SchedulerSnapshotService, tokenRefresh *service.TokenRefreshService, + oauthProbe *service.OAuthProbeService, accountExpiry *service.AccountExpiryService, usageCleanup *service.UsageCleanupService, pricing *service.PricingService, @@ -274,6 +276,12 @@ func provideCleanup( tokenRefresh.Stop() return nil }}, + {"OAuthProbeService", func() error { + if oauthProbe != nil { + oauthProbe.Stop() + } + return nil + }}, {"AccountExpiryService", func() error { accountExpiry.Stop() return nil diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 00a784802..5cdee08e7 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -58,6 +58,7 @@ type Config struct { UsageCleanup UsageCleanupConfig `mapstructure:"usage_cleanup"` Concurrency ConcurrencyConfig `mapstructure:"concurrency"` TokenRefresh TokenRefreshConfig `mapstructure:"token_refresh"` + OAuthProbe OAuthProbeConfig `mapstructure:"oauth_probe"` RunMode string `mapstructure:"run_mode" yaml:"run_mode"` Timezone string `mapstructure:"timezone"` // e.g. "Asia/Shanghai", "UTC" Gemini GeminiConfig `mapstructure:"gemini"` @@ -127,6 +128,24 @@ type TokenRefreshConfig struct { RetryBackoffSeconds int `mapstructure:"retry_backoff_seconds"` } +// OAuthProbeConfig OAuth2 账号探活/配额同步配置 +// 通过“测试连接”式的最小请求,定期更新 OAuth 账号可用性与配额信息(OpenAI/Gemini/Claude)。 +type OAuthProbeConfig struct { + // 是否启用探活任务(默认关闭,避免对配额产生额外消耗) + Enabled bool `mapstructure:"enabled"` + // 检查间隔(分钟) + CheckIntervalMinutes int `mapstructure:"check_interval_minutes"` + // 闲置阈值(分钟):仅当账号在该时长内未被使用,才会触发探活(兜底机制)。 + // <=0 表示不按闲置过滤(每轮都会探活符合条件的账号)。 + IdleThresholdMinutes int `mapstructure:"idle_threshold_minutes"` + // 单次请求超时(秒) + RequestTimeoutSeconds int `mapstructure:"request_timeout_seconds"` + // 并发探活数(>0) + MaxConcurrency int `mapstructure:"max_concurrency"` + // 每轮最多探活账号数(<=0 表示不限制) + MaxAccountsPerCycle int `mapstructure:"max_accounts_per_cycle"` +} + type PricingConfig struct { // 价格数据远程URL(默认使用LiteLLM镜像) RemoteURL string `mapstructure:"remote_url"` @@ -862,6 +881,14 @@ func setDefaults() { viper.SetDefault("token_refresh.max_retries", 3) // 最多重试3次 viper.SetDefault("token_refresh.retry_backoff_seconds", 2) // 重试退避基础2秒 + // OAuthProbe (disabled by default to avoid extra quota consumption) + viper.SetDefault("oauth_probe.enabled", false) + viper.SetDefault("oauth_probe.check_interval_minutes", 15) + viper.SetDefault("oauth_probe.idle_threshold_minutes", 15) + viper.SetDefault("oauth_probe.request_timeout_seconds", 20) + viper.SetDefault("oauth_probe.max_concurrency", 2) + viper.SetDefault("oauth_probe.max_accounts_per_cycle", 0) + // Gemini OAuth - configure via environment variables or config file // GEMINI_OAUTH_CLIENT_ID and GEMINI_OAUTH_CLIENT_SECRET // Default: uses Gemini CLI public credentials (set via environment) diff --git a/backend/internal/service/account_test_service.go b/backend/internal/service/account_test_service.go index 46376c698..ccf4409ef 100644 --- a/backend/internal/service/account_test_service.go +++ b/backend/internal/service/account_test_service.go @@ -14,6 +14,7 @@ import ( "net/http" "regexp" "strings" + "time" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/pkg/claude" @@ -46,7 +47,9 @@ type TestEvent struct { type AccountTestService struct { accountRepo AccountRepository geminiTokenProvider *GeminiTokenProvider + openAITokenProvider *OpenAITokenProvider antigravityGatewayService *AntigravityGatewayService + accountUsageService *AccountUsageService httpUpstream HTTPUpstream cfg *config.Config } @@ -55,14 +58,18 @@ type AccountTestService struct { func NewAccountTestService( accountRepo AccountRepository, geminiTokenProvider *GeminiTokenProvider, + openAITokenProvider *OpenAITokenProvider, antigravityGatewayService *AntigravityGatewayService, + accountUsageService *AccountUsageService, httpUpstream HTTPUpstream, cfg *config.Config, ) *AccountTestService { return &AccountTestService{ accountRepo: accountRepo, geminiTokenProvider: geminiTokenProvider, + openAITokenProvider: openAITokenProvider, antigravityGatewayService: antigravityGatewayService, + accountUsageService: accountUsageService, httpUpstream: httpUpstream, cfg: cfg, } @@ -97,13 +104,19 @@ func generateSessionString() (string, error) { return fmt.Sprintf("user_%s_account__session_%s", hex64, sessionUUID), nil } -// createTestPayload creates a Claude Code style test request payload -func createTestPayload(modelID string) (map[string]any, error) { +// createTestPayload creates a Claude Code style test request payload. +// nonce is optional and can be used to avoid upstream caching when probing periodically. +func createTestPayload(modelID string, nonce string) (map[string]any, error) { sessionID, err := generateSessionString() if err != nil { return nil, err } + text := "hi" + if strings.TrimSpace(nonce) != "" { + text = fmt.Sprintf("hi-%s", strings.TrimSpace(nonce)) + } + return map[string]any{ "model": modelID, "messages": []map[string]any{ @@ -112,7 +125,7 @@ func createTestPayload(modelID string) (map[string]any, error) { "content": []map[string]any{ { "type": "text", - "text": "hi", + "text": text, "cache_control": map[string]string{ "type": "ephemeral", }, @@ -175,6 +188,7 @@ func (s *AccountTestService) testClaudeAccountConnection(c *gin.Context, account if testModelID == "" { testModelID = claude.DefaultTestModel } + nonce, _ := randomHexString(8) // For API Key accounts with model mapping, map the model if account.Type == "apikey" { @@ -228,7 +242,7 @@ func (s *AccountTestService) testClaudeAccountConnection(c *gin.Context, account c.Writer.Flush() // Create Claude Code style payload (same for all account types) - payload, err := createTestPayload(testModelID) + payload, err := createTestPayload(testModelID, nonce) if err != nil { return s.sendErrorAndEnd(c, "Failed to create test payload") } @@ -271,6 +285,16 @@ func (s *AccountTestService) testClaudeAccountConnection(c *gin.Context, account } defer func() { _ = resp.Body.Close() }() + // For Claude OAuth / Setup-Token accounts, sync usage snapshot (5h/7d windows) to DB extra. + // Do this regardless of status code so users can observe quota + reset times even when rate-limited. + if account.IsOAuth() && s.accountUsageService != nil { + go func(a *Account) { + updateCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + _ = s.accountUsageService.SyncUsageSnapshotToExtra(updateCtx, a, usageSnapshotSourceTest) + }(account) + } + if resp.StatusCode != http.StatusOK { body, _ := io.ReadAll(resp.Body) return s.sendErrorAndEnd(c, fmt.Sprintf("API returned %d: %s", resp.StatusCode, string(body))) @@ -290,6 +314,8 @@ func (s *AccountTestService) testOpenAIAccountConnection(c *gin.Context, account testModelID = openai.DefaultTestModel } + nonce, _ := randomHexString(8) + // For API Key accounts with model mapping, map the model if account.Type == "apikey" { mapping := account.GetModelMapping() @@ -309,8 +335,16 @@ func (s *AccountTestService) testOpenAIAccountConnection(c *gin.Context, account if account.IsOAuth() { isOAuth = true // OAuth - use Bearer token with ChatGPT internal API - authToken = account.GetOpenAIAccessToken() - if authToken == "" { + if s.openAITokenProvider != nil && account.Platform == PlatformOpenAI && account.Type == AccountTypeOAuth { + token, err := s.openAITokenProvider.GetAccessToken(ctx, account) + if err != nil { + return s.sendErrorAndEnd(c, fmt.Sprintf("Failed to get access token: %s", err.Error())) + } + authToken = token + } else { + authToken = account.GetOpenAIAccessToken() + } + if strings.TrimSpace(authToken) == "" { return s.sendErrorAndEnd(c, "No access token available") } @@ -345,7 +379,7 @@ func (s *AccountTestService) testOpenAIAccountConnection(c *gin.Context, account c.Writer.Flush() // Create OpenAI Responses API payload - payload := createOpenAITestPayload(testModelID, isOAuth) + payload := createOpenAITestPayload(testModelID, isOAuth, nonce) payloadBytes, _ := json.Marshal(payload) // Send test_start event @@ -381,6 +415,23 @@ func (s *AccountTestService) testOpenAIAccountConnection(c *gin.Context, account } defer func() { _ = resp.Body.Close() }() + // For OpenAI OAuth (ChatGPT Codex), extract and persist quota windows from response headers. + // Do this even when status != 200 so that users can see 5h/7d usage + reset times. + if isOAuth { + if snapshot := extractCodexUsageHeaders(resp.Header); snapshot != nil { + if derived := deriveCodexUsageSnapshot(snapshot); derived != nil && len(derived.updates) > 0 { + go func(d *codexDerivedUsageSnapshot) { + updateCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.accountRepo.UpdateExtra(updateCtx, account.ID, d.updates) + if resetAt := codexRateLimitResetAt(d); resetAt != nil && resetAt.After(time.Now()) { + _ = s.accountRepo.SetRateLimited(updateCtx, account.ID, *resetAt) + } + }(derived) + } + } + } + if resp.StatusCode != http.StatusOK { body, _ := io.ReadAll(resp.Body) return s.sendErrorAndEnd(c, fmt.Sprintf("API returned %d: %s", resp.StatusCode, string(body))) @@ -418,7 +469,8 @@ func (s *AccountTestService) testGeminiAccountConnection(c *gin.Context, account c.Writer.Flush() // Create test payload (Gemini format) - payload := createGeminiTestPayload() + nonce, _ := randomHexString(8) + payload := createGeminiTestPayload(nonce) // Build request based on account type var req *http.Request @@ -452,6 +504,16 @@ func (s *AccountTestService) testGeminiAccountConnection(c *gin.Context, account } defer func() { _ = resp.Body.Close() }() + // Persist (simulated) Gemini quota snapshot to DB extra so account list can show it without per-row API calls. + // This is local computation (usage_logs + quota policy), not an upstream quota API. + if s.accountUsageService != nil { + go func(a *Account) { + updateCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + _ = s.accountUsageService.SyncUsageSnapshotToExtra(updateCtx, a, usageSnapshotSourceTest) + }(account) + } + if resp.StatusCode != http.StatusOK { body, _ := io.ReadAll(resp.Body) return s.sendErrorAndEnd(c, fmt.Sprintf("API returned %d: %s", resp.StatusCode, string(body))) @@ -602,14 +664,19 @@ func (s *AccountTestService) buildCodeAssistRequest(ctx context.Context, accessT return req, nil } -// createGeminiTestPayload creates a minimal test payload for Gemini API -func createGeminiTestPayload() []byte { +// createGeminiTestPayload creates a minimal test payload for Gemini API. +// nonce is optional and can be used to avoid upstream caching when probing periodically. +func createGeminiTestPayload(nonce string) []byte { + text := "hi" + if strings.TrimSpace(nonce) != "" { + text = fmt.Sprintf("hi-%s", strings.TrimSpace(nonce)) + } payload := map[string]any{ "contents": []map[string]any{ { "role": "user", "parts": []map[string]any{ - {"text": "hi"}, + {"text": text}, }, }, }, @@ -694,7 +761,12 @@ func (s *AccountTestService) processGeminiStream(c *gin.Context, body io.Reader) } // createOpenAITestPayload creates a test payload for OpenAI Responses API -func createOpenAITestPayload(modelID string, isOAuth bool) map[string]any { +func createOpenAITestPayload(modelID string, isOAuth bool, nonce string) map[string]any { + text := "hi" + if strings.TrimSpace(nonce) != "" { + text = fmt.Sprintf("hi-%s", nonce) + } + payload := map[string]any{ "model": modelID, "input": []map[string]any{ @@ -703,7 +775,7 @@ func createOpenAITestPayload(modelID string, isOAuth bool) map[string]any { "content": []map[string]any{ { "type": "input_text", - "text": "hi", + "text": text, }, }, }, @@ -714,6 +786,10 @@ func createOpenAITestPayload(modelID string, isOAuth bool) map[string]any { // OAuth accounts using ChatGPT internal API require store: false if isOAuth { payload["store"] = false + // Randomize prompt_cache_key to avoid upstream caching between probes/tests. + if strings.TrimSpace(nonce) != "" { + payload["prompt_cache_key"] = "sub2api_test_" + strings.TrimSpace(nonce) + } } // All accounts require instructions for Responses API diff --git a/backend/internal/service/account_usage_service.go b/backend/internal/service/account_usage_service.go index f3b3e20d6..b7a73b665 100644 --- a/backend/internal/service/account_usage_service.go +++ b/backend/internal/service/account_usage_service.go @@ -182,6 +182,7 @@ type AccountUsageService struct { antigravityQuotaFetcher *AntigravityQuotaFetcher cache *UsageCache identityCache IdentityCache + usageSnapshotSyncCache sync.Map // accountID -> time.Time (best-effort throttle) } // NewAccountUsageService 创建AccountUsageService实例 diff --git a/backend/internal/service/account_usage_snapshot_sync.go b/backend/internal/service/account_usage_snapshot_sync.go new file mode 100644 index 000000000..bf2a33513 --- /dev/null +++ b/backend/internal/service/account_usage_snapshot_sync.go @@ -0,0 +1,162 @@ +package service + +import ( + "context" + "fmt" + "strings" + "time" +) + +const ( + claudeUsageSnapshotKey = "claude_usage_snapshot" + claudeUsageUpdatedAtKey = "claude_usage_updated_at" + claudeUsageSnapshotSource = "claude_usage_source" + geminiUsageSnapshotKey = "gemini_usage_snapshot" + geminiUsageUpdatedAtKey = "gemini_usage_updated_at" + geminiUsageSnapshotSource = "gemini_usage_source" + usageSnapshotSourceGateway = "gateway" + usageSnapshotSourceTest = "test" + usageSnapshotSourceProbe = "probe" +) + +func (s *AccountUsageService) SyncUsageSnapshotToExtra(ctx context.Context, account *Account, source string) error { + if s == nil || s.accountRepo == nil || account == nil || account.ID <= 0 { + return nil + } + + now := time.Now().UTC() + if strings.TrimSpace(source) == "" { + source = usageSnapshotSourceGateway + } + + switch account.Platform { + case PlatformAnthropic: + usage, err := s.computeClaudeUsageSnapshot(ctx, account, now) + if err != nil { + return err + } + if usage == nil { + return nil + } + if usage.UpdatedAt == nil { + usage.UpdatedAt = &now + } + + updates := map[string]any{ + claudeUsageSnapshotKey: usage, + claudeUsageUpdatedAtKey: now.Format(time.RFC3339), + claudeUsageSnapshotSource: source, + } + if err := s.accountRepo.UpdateExtra(ctx, account.ID, updates); err != nil { + return err + } + + // If the quota is already full, persist a real reset window to account.rate_limit_reset_at + // so the scheduler can skip the account until the window ends. + if resetAt := claudeRateLimitResetAt(usage); resetAt != nil && resetAt.After(time.Now()) { + _ = s.accountRepo.SetRateLimited(ctx, account.ID, *resetAt) + } + + return nil + + case PlatformGemini: + usage, err := s.computeGeminiUsageSnapshot(ctx, account, now) + if err != nil { + return err + } + if usage == nil { + return nil + } + if usage.UpdatedAt == nil { + usage.UpdatedAt = &now + } + + updates := map[string]any{ + geminiUsageSnapshotKey: usage, + geminiUsageUpdatedAtKey: now.Format(time.RFC3339), + geminiUsageSnapshotSource: source, + } + return s.accountRepo.UpdateExtra(ctx, account.ID, updates) + default: + return nil + } +} + +// MaybeSyncUsageSnapshotToExtra performs a best-effort sync with an in-memory throttle. +// NOTE: This is per-process only; multi-instance deployments still sync independently. +func (s *AccountUsageService) MaybeSyncUsageSnapshotToExtra(ctx context.Context, account *Account, source string, minInterval time.Duration) error { + if s == nil || account == nil || account.ID <= 0 { + return nil + } + if minInterval <= 0 { + return s.SyncUsageSnapshotToExtra(ctx, account, source) + } + + now := time.Now() + if v, ok := s.usageSnapshotSyncCache.Load(account.ID); ok { + if last, ok := v.(time.Time); ok && now.Sub(last) < minInterval { + return nil + } + } + s.usageSnapshotSyncCache.Store(account.ID, now) + return s.SyncUsageSnapshotToExtra(ctx, account, source) +} + +func (s *AccountUsageService) computeClaudeUsageSnapshot(ctx context.Context, account *Account, now time.Time) (*UsageInfo, error) { + if account == nil || account.Platform != PlatformAnthropic { + return nil, nil + } + // API Key accounts do not support Claude OAuth usage API. + if account.Type == AccountTypeAPIKey { + return nil, nil + } + + // OAuth accounts: call Anthropic OAuth usage API. + if account.CanGetUsage() { + apiResp, err := s.fetchOAuthUsageRaw(ctx, account) + if err != nil { + return nil, err + } + usage := s.buildUsageInfo(apiResp, &now) + s.addWindowStats(ctx, account, usage) + return usage, nil + } + + // Setup Token accounts: estimate 5h window from session_window fields. + if account.Type == AccountTypeSetupToken { + usage := s.estimateSetupTokenUsage(account) + usage.UpdatedAt = &now + s.addWindowStats(ctx, account, usage) + return usage, nil + } + + return nil, fmt.Errorf("unsupported anthropic account type: %s", account.Type) +} + +func (s *AccountUsageService) computeGeminiUsageSnapshot(ctx context.Context, account *Account, now time.Time) (*UsageInfo, error) { + if account == nil || account.Platform != PlatformGemini { + return nil, nil + } + + usage, err := s.getGeminiUsage(ctx, account) + if err != nil { + return nil, err + } + if usage != nil && usage.UpdatedAt == nil { + usage.UpdatedAt = &now + } + return usage, nil +} + +func claudeRateLimitResetAt(usage *UsageInfo) *time.Time { + if usage == nil { + return nil + } + if usage.SevenDay != nil && usage.SevenDay.ResetsAt != nil && usage.SevenDay.Utilization >= 100 { + return usage.SevenDay.ResetsAt + } + if usage.FiveHour != nil && usage.FiveHour.ResetsAt != nil && usage.FiveHour.Utilization >= 100 { + return usage.FiveHour.ResetsAt + } + return nil +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 9565da290..71fa5ed7e 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -200,6 +200,7 @@ type GatewayService struct { accountRepo AccountRepository groupRepo GroupRepository usageLogRepo UsageLogRepository + accountUsageService *AccountUsageService userRepo UserRepository userSubRepo UserSubscriptionRepository cache GatewayCache @@ -221,6 +222,7 @@ func NewGatewayService( accountRepo AccountRepository, groupRepo GroupRepository, usageLogRepo UsageLogRepository, + accountUsageService *AccountUsageService, userRepo UserRepository, userSubRepo UserSubscriptionRepository, cache GatewayCache, @@ -240,6 +242,7 @@ func NewGatewayService( accountRepo: accountRepo, groupRepo: groupRepo, usageLogRepo: usageLogRepo, + accountUsageService: accountUsageService, userRepo: userRepo, userSubRepo: userSubRepo, cache: cache, @@ -2961,6 +2964,16 @@ func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Res return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} } + // For 429s, also try to refresh quota snapshots to DB extra so the admin UI can show + // accurate window usage + reset times (especially useful for Claude OAuth 5h/7d windows). + if resp.StatusCode == http.StatusTooManyRequests && s.accountUsageService != nil { + go func(a *Account) { + updateCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + _ = s.accountUsageService.SyncUsageSnapshotToExtra(updateCtx, a, usageSnapshotSourceGateway) + }(account) + } + // 记录上游错误响应体摘要便于排障(可选:由配置控制;不回显到客户端) if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { log.Printf( @@ -3555,6 +3568,7 @@ func (s *GatewayService) RecordUsage(ctx context.Context, input *RecordUsageInpu if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple { log.Printf("[SIMPLE MODE] Usage recorded (not billed): user=%d, tokens=%d", usageLog.UserID, usageLog.TotalTokens()) s.deferredService.ScheduleLastUsedUpdate(account.ID) + s.maybeSyncAccountUsageSnapshot(account) return nil } @@ -3583,10 +3597,43 @@ func (s *GatewayService) RecordUsage(ctx context.Context, input *RecordUsageInpu // Schedule batch update for account last_used_at s.deferredService.ScheduleLastUsedUpdate(account.ID) + s.maybeSyncAccountUsageSnapshot(account) return nil } +func (s *GatewayService) maybeSyncAccountUsageSnapshot(account *Account) { + if s == nil || s.accountUsageService == nil || account == nil { + return + } + var minInterval time.Duration + switch account.Platform { + case PlatformGemini: + minInterval = time.Minute + case PlatformAnthropic: + // Claude OAuth usage API is remote; keep it low-frequency to avoid adding latency/cost. + minInterval = 5 * time.Minute + default: + return + } + + // Fast-path throttle: avoid spawning goroutines when we know we just synced recently. + if minInterval > 0 { + now := time.Now() + if v, ok := s.accountUsageService.usageSnapshotSyncCache.Load(account.ID); ok { + if last, ok := v.(time.Time); ok && now.Sub(last) < minInterval { + return + } + } + } + + go func(a *Account, interval time.Duration) { + updateCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + _ = s.accountUsageService.MaybeSyncUsageSnapshotToExtra(updateCtx, a, usageSnapshotSourceGateway, interval) + }(account, minInterval) +} + // ForwardCountTokens 转发 count_tokens 请求到上游 API // 特点:不记录使用量、仅支持非流式响应 func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, account *Account, parsed *ParsedRequest) error { diff --git a/backend/internal/service/oauth_probe_service.go b/backend/internal/service/oauth_probe_service.go new file mode 100644 index 000000000..0528cb7b0 --- /dev/null +++ b/backend/internal/service/oauth_probe_service.go @@ -0,0 +1,547 @@ +package service + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/claude" + "github.com/Wei-Shaw/sub2api/internal/pkg/geminicli" + "github.com/Wei-Shaw/sub2api/internal/pkg/openai" + "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" +) + +// OAuthProbeService periodically probes OAuth/Setup-Token accounts to: +// - keep OAuth account availability observable even when not scheduled, +// - refresh upstream quota snapshots (e.g. OpenAI Codex 5h/7d windows), +// - reduce stale/expired quota display issues on the account page. +// +// NOTE: This service is disabled by default because each probe may consume real quota. +type OAuthProbeService struct { + accountRepo AccountRepository + httpUpstream HTTPUpstream + openAITokenProvider *OpenAITokenProvider + geminiTokenProvider *GeminiTokenProvider + claudeTokenProvider *ClaudeTokenProvider + accountUsageService *AccountUsageService + cfg *config.OAuthProbeConfig + securityCfg *config.SecurityConfig + + stopCh chan struct{} + wg sync.WaitGroup +} + +func NewOAuthProbeService( + accountRepo AccountRepository, + httpUpstream HTTPUpstream, + openAITokenProvider *OpenAITokenProvider, + geminiTokenProvider *GeminiTokenProvider, + claudeTokenProvider *ClaudeTokenProvider, + accountUsageService *AccountUsageService, + cfg *config.Config, +) *OAuthProbeService { + if cfg == nil { + cfg = &config.Config{} + } + return &OAuthProbeService{ + accountRepo: accountRepo, + httpUpstream: httpUpstream, + openAITokenProvider: openAITokenProvider, + geminiTokenProvider: geminiTokenProvider, + claudeTokenProvider: claudeTokenProvider, + accountUsageService: accountUsageService, + cfg: &cfg.OAuthProbe, + securityCfg: &cfg.Security, + stopCh: make(chan struct{}), + } +} + +func (s *OAuthProbeService) Start() { + if s.cfg == nil || !s.cfg.Enabled { + log.Println("[OAuthProbe] Service disabled by configuration") + return + } + + s.wg.Add(1) + go s.probeLoop() + + log.Printf("[OAuthProbe] Service started (check every %d minutes, idle_threshold=%d minutes, timeout=%ds, concurrency=%d, max_accounts_per_cycle=%d)", + s.cfg.CheckIntervalMinutes, + s.cfg.IdleThresholdMinutes, + s.cfg.RequestTimeoutSeconds, + s.cfg.MaxConcurrency, + s.cfg.MaxAccountsPerCycle, + ) +} + +func (s *OAuthProbeService) Stop() { + if s == nil { + return + } + close(s.stopCh) + s.wg.Wait() + log.Println("[OAuthProbe] Service stopped") +} + +func (s *OAuthProbeService) probeLoop() { + defer s.wg.Done() + + interval := time.Duration(s.cfg.CheckIntervalMinutes) * time.Minute + if interval < time.Minute { + interval = 5 * time.Minute + } + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + // Run once on startup to populate usage windows quickly. + s.processProbeCycle() + + for { + select { + case <-ticker.C: + s.processProbeCycle() + case <-s.stopCh: + return + } + } +} + +func (s *OAuthProbeService) processProbeCycle() { + if s == nil || s.accountRepo == nil { + return + } + ctx := context.Background() + + accounts, err := s.accountRepo.ListActive(ctx) + if err != nil { + log.Printf("[OAuthProbe] Failed to list accounts: %v", err) + return + } + + // Filter to OAuth/Setup-Token accounts on OpenAI/Gemini/Claude only. + targets := make([]Account, 0, len(accounts)) + idleThreshold := time.Duration(s.cfg.IdleThresholdMinutes) * time.Minute + if idleThreshold < 0 { + idleThreshold = 0 + } + for _, a := range accounts { + if !a.IsOAuth() { + continue + } + switch a.Platform { + case PlatformOpenAI, PlatformGemini, PlatformAnthropic: + // Only probe idle accounts as a "fallback" mechanism. + // If the account was used recently, skip to avoid consuming extra quota. + if idleThreshold > 0 && a.LastUsedAt != nil && time.Since(*a.LastUsedAt) < idleThreshold { + continue + } + targets = append(targets, a) + } + } + + if len(targets) == 0 { + log.Printf("[OAuthProbe] Cycle complete: total=%d, targets=0", len(accounts)) + return + } + + maxPerCycle := s.cfg.MaxAccountsPerCycle + if maxPerCycle > 0 && len(targets) > maxPerCycle { + targets = targets[:maxPerCycle] + } + + concurrency := s.cfg.MaxConcurrency + if concurrency <= 0 { + concurrency = 1 + } + + var ( + mu sync.Mutex + okCount int + failCount int + openaiCount int + geminiCount int + claudeCount int + openaiOK int + geminiOK int + claudeOK int + ) + + sem := make(chan struct{}, concurrency) + var wg sync.WaitGroup + + for i := range targets { + account := targets[i] + wg.Add(1) + sem <- struct{}{} + + go func(a Account) { + defer wg.Done() + defer func() { <-sem }() + + probeOK := false + switch a.Platform { + case PlatformOpenAI: + mu.Lock() + openaiCount++ + mu.Unlock() + if err := s.probeOpenAIOAuth(ctx, &a); err == nil { + probeOK = true + mu.Lock() + openaiOK++ + mu.Unlock() + } + case PlatformGemini: + mu.Lock() + geminiCount++ + mu.Unlock() + if err := s.probeGeminiOAuth(ctx, &a); err == nil { + probeOK = true + mu.Lock() + geminiOK++ + mu.Unlock() + } + case PlatformAnthropic: + mu.Lock() + claudeCount++ + mu.Unlock() + if err := s.probeClaudeOAuth(ctx, &a); err == nil { + probeOK = true + mu.Lock() + claudeOK++ + mu.Unlock() + } + } + + mu.Lock() + if probeOK { + okCount++ + } else { + failCount++ + } + mu.Unlock() + }(account) + } + + wg.Wait() + log.Printf("[OAuthProbe] Cycle complete: total=%d, targets=%d, ok=%d, failed=%d (openai=%d ok=%d, gemini=%d ok=%d, claude=%d ok=%d)", + len(accounts), len(targets), okCount, failCount, + openaiCount, openaiOK, geminiCount, geminiOK, claudeCount, claudeOK, + ) +} + +func (s *OAuthProbeService) probeOpenAIOAuth(parent context.Context, account *Account) error { + if account == nil || account.Platform != PlatformOpenAI || account.Type != AccountTypeOAuth { + return nil + } + if s.httpUpstream == nil { + return fmt.Errorf("http upstream not configured") + } + + timeout := time.Duration(s.cfg.RequestTimeoutSeconds) * time.Second + if timeout <= 0 { + timeout = 20 * time.Second + } + ctx, cancel := context.WithTimeout(parent, timeout) + defer cancel() + + token := strings.TrimSpace(account.GetOpenAIAccessToken()) + if s.openAITokenProvider != nil { + if t, err := s.openAITokenProvider.GetAccessToken(ctx, account); err == nil && strings.TrimSpace(t) != "" { + token = t + } + } + if token == "" { + return fmt.Errorf("missing access token") + } + + nonce, _ := randomHexString(8) + payload := createOpenAITestPayload(openai.DefaultTestModel, true, nonce) + body, _ := json.Marshal(payload) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, chatgptCodexAPIURL, bytes.NewReader(body)) + if err != nil { + return err + } + req.Host = "chatgpt.com" + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("accept", "text/event-stream") + req.Header.Set("OpenAI-Beta", "responses=experimental") + if chatgptAccountID := account.GetChatGPTAccountID(); chatgptAccountID != "" { + req.Header.Set("chatgpt-account-id", chatgptAccountID) + } + + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + resp, err := s.httpUpstream.DoWithTLS(req, proxyURL, account.ID, account.Concurrency, account.IsTLSFingerprintEnabled()) + if err != nil { + s.saveProbeResult(context.Background(), account.ID, false, 0, err) + return err + } + defer func() { _ = resp.Body.Close() }() + + // Persist Codex quota snapshot (headers only). + s.persistOpenAICodexSnapshot(ctx, account.ID, resp.Header) + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(io.LimitReader(resp.Body, 4096)) + err := fmt.Errorf("openai oauth probe status=%d body=%s", resp.StatusCode, strings.TrimSpace(string(body))) + s.saveProbeResult(context.Background(), account.ID, false, resp.StatusCode, err) + return err + } + + s.saveProbeResult(context.Background(), account.ID, true, resp.StatusCode, nil) + return nil +} + +func (s *OAuthProbeService) persistOpenAICodexSnapshot(ctx context.Context, accountID int64, headers http.Header) { + if s == nil || s.accountRepo == nil { + return + } + if snapshot := extractCodexUsageHeaders(headers); snapshot != nil { + derived := deriveCodexUsageSnapshot(snapshot) + if derived == nil || len(derived.updates) == 0 { + return + } + _ = s.accountRepo.UpdateExtra(ctx, accountID, derived.updates) + if resetAt := codexRateLimitResetAt(derived); resetAt != nil && resetAt.After(time.Now()) { + _ = s.accountRepo.SetRateLimited(ctx, accountID, *resetAt) + } + } +} + +func (s *OAuthProbeService) probeGeminiOAuth(parent context.Context, account *Account) error { + if account == nil || account.Platform != PlatformGemini || account.Type != AccountTypeOAuth { + return nil + } + if s.httpUpstream == nil { + return fmt.Errorf("http upstream not configured") + } + if s.geminiTokenProvider == nil { + return fmt.Errorf("gemini token provider not configured") + } + + timeout := time.Duration(s.cfg.RequestTimeoutSeconds) * time.Second + if timeout <= 0 { + timeout = 20 * time.Second + } + ctx, cancel := context.WithTimeout(parent, timeout) + defer cancel() + + accessToken, err := s.geminiTokenProvider.GetAccessToken(ctx, account) + if err != nil { + s.saveProbeResult(context.Background(), account.ID, false, 0, err) + return err + } + + nonce, _ := randomHexString(8) + payload := createGeminiTestPayload(nonce) + + projectID := strings.TrimSpace(account.GetCredential("project_id")) + modelID := geminicli.DefaultTestModel + + var req *http.Request + if projectID == "" { + baseURL := account.GetCredential("base_url") + if strings.TrimSpace(baseURL) == "" { + baseURL = geminicli.AIStudioBaseURL + } + normalizedBaseURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return err + } + fullURL := fmt.Sprintf("%s/v1beta/models/%s:streamGenerateContent?alt=sse", strings.TrimRight(normalizedBaseURL, "/"), modelID) + req, err = http.NewRequestWithContext(ctx, http.MethodPost, fullURL, bytes.NewReader(payload)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+accessToken) + } else { + var inner map[string]any + if err := json.Unmarshal(payload, &inner); err != nil { + return err + } + wrapped := map[string]any{ + "model": modelID, + "project": projectID, + "request": inner, + } + wrappedBytes, _ := json.Marshal(wrapped) + + normalizedBaseURL, err := s.validateUpstreamBaseURL(geminicli.GeminiCliBaseURL) + if err != nil { + return err + } + fullURL := fmt.Sprintf("%s/v1internal:streamGenerateContent?alt=sse", strings.TrimRight(normalizedBaseURL, "/")) + req, err = http.NewRequestWithContext(ctx, http.MethodPost, fullURL, bytes.NewReader(wrappedBytes)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+accessToken) + req.Header.Set("User-Agent", geminicli.GeminiCLIUserAgent) + } + + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + resp, err := s.httpUpstream.DoWithTLS(req, proxyURL, account.ID, account.Concurrency, account.IsTLSFingerprintEnabled()) + if err != nil { + s.saveProbeResult(context.Background(), account.ID, false, 0, err) + return err + } + defer func() { _ = resp.Body.Close() }() + + // Persist Gemini (simulated) quota snapshot to DB extra. + if s.accountUsageService != nil { + updateCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + _ = s.accountUsageService.SyncUsageSnapshotToExtra(updateCtx, account, usageSnapshotSourceProbe) + cancel() + } + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(io.LimitReader(resp.Body, 4096)) + err := fmt.Errorf("gemini oauth probe status=%d body=%s", resp.StatusCode, strings.TrimSpace(string(body))) + s.saveProbeResult(context.Background(), account.ID, false, resp.StatusCode, err) + return err + } + + s.saveProbeResult(context.Background(), account.ID, true, resp.StatusCode, nil) + return nil +} + +func (s *OAuthProbeService) probeClaudeOAuth(parent context.Context, account *Account) error { + if account == nil || account.Platform != PlatformAnthropic || !account.IsOAuth() { + return nil + } + if s.httpUpstream == nil { + return fmt.Errorf("http upstream not configured") + } + + timeout := time.Duration(s.cfg.RequestTimeoutSeconds) * time.Second + if timeout <= 0 { + timeout = 20 * time.Second + } + ctx, cancel := context.WithTimeout(parent, timeout) + defer cancel() + + authToken := strings.TrimSpace(account.GetCredential("access_token")) + if account.Type == AccountTypeOAuth && s.claudeTokenProvider != nil { + if t, err := s.claudeTokenProvider.GetAccessToken(ctx, account); err == nil && strings.TrimSpace(t) != "" { + authToken = t + } + } + if authToken == "" { + return fmt.Errorf("missing access token") + } + + nonce, _ := randomHexString(8) + payload, err := createTestPayload(claude.DefaultTestModel, nonce) + if err != nil { + return err + } + payloadBytes, _ := json.Marshal(payload) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, testClaudeAPIURL, bytes.NewReader(payloadBytes)) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("anthropic-version", "2023-06-01") + req.Header.Set("anthropic-beta", claude.DefaultBetaHeader) + for key, value := range claude.DefaultHeaders { + req.Header.Set(key, value) + } + req.Header.Set("Authorization", "Bearer "+authToken) + + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + resp, err := s.httpUpstream.DoWithTLS(req, proxyURL, account.ID, account.Concurrency, account.IsTLSFingerprintEnabled()) + if err != nil { + s.saveProbeResult(context.Background(), account.ID, false, 0, err) + return err + } + defer func() { _ = resp.Body.Close() }() + + // Persist Claude quota snapshot (5h/7d windows) to DB extra. + // This calls Anthropic's OAuth usage API, which should not consume message quota. + if s.accountUsageService != nil { + updateCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + _ = s.accountUsageService.SyncUsageSnapshotToExtra(updateCtx, account, usageSnapshotSourceProbe) + cancel() + } + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(io.LimitReader(resp.Body, 4096)) + err := fmt.Errorf("claude oauth probe status=%d body=%s", resp.StatusCode, strings.TrimSpace(string(body))) + s.saveProbeResult(context.Background(), account.ID, false, resp.StatusCode, err) + return err + } + + s.saveProbeResult(context.Background(), account.ID, true, resp.StatusCode, nil) + return nil +} + +func (s *OAuthProbeService) saveProbeResult(ctx context.Context, accountID int64, ok bool, statusCode int, err error) { + if s == nil || s.accountRepo == nil || accountID <= 0 { + return + } + + msg := "" + if err != nil { + msg = strings.TrimSpace(err.Error()) + if len(msg) > 256 { + msg = msg[:256] + } + } + + updates := map[string]any{ + "oauth_probe": map[string]any{ + "updated_at": time.Now().UTC().Format(time.RFC3339), + "ok": ok, + "status_code": statusCode, + "error": msg, + }, + } + + updateCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + _ = s.accountRepo.UpdateExtra(updateCtx, accountID, updates) +} + +func (s *OAuthProbeService) validateUpstreamBaseURL(raw string) (string, error) { + if s.securityCfg == nil { + return urlvalidator.ValidateURLFormat(raw, true) + } + if !s.securityCfg.URLAllowlist.Enabled { + return urlvalidator.ValidateURLFormat(raw, s.securityCfg.URLAllowlist.AllowInsecureHTTP) + } + normalized, err := urlvalidator.ValidateHTTPSURL(raw, urlvalidator.ValidationOptions{ + AllowedHosts: s.securityCfg.URLAllowlist.UpstreamHosts, + RequireAllowlist: true, + AllowPrivate: s.securityCfg.URLAllowlist.AllowPrivateHosts, + }) + if err != nil { + return "", err + } + return normalized, nil +} diff --git a/backend/internal/service/openai_codex_usage_snapshot.go b/backend/internal/service/openai_codex_usage_snapshot.go new file mode 100644 index 000000000..59a564979 --- /dev/null +++ b/backend/internal/service/openai_codex_usage_snapshot.go @@ -0,0 +1,205 @@ +package service + +import ( + "time" +) + +type codexDerivedUsageSnapshot struct { + updates map[string]any + + updatedAt time.Time + + fiveHourUsedPercent *float64 + fiveHourResetAfterSeconds *int + fiveHourWindowMinutes *int + fiveHourResetAt *time.Time + + sevenDayUsedPercent *float64 + sevenDayResetAfterSeconds *int + sevenDayWindowMinutes *int + sevenDayResetAt *time.Time +} + +func deriveCodexUsageSnapshot(snapshot *OpenAICodexUsageSnapshot) *codexDerivedUsageSnapshot { + if snapshot == nil { + return nil + } + + updates := make(map[string]any) + + if snapshot.PrimaryUsedPercent != nil { + updates["codex_primary_used_percent"] = *snapshot.PrimaryUsedPercent + } + if snapshot.PrimaryResetAfterSeconds != nil { + updates["codex_primary_reset_after_seconds"] = *snapshot.PrimaryResetAfterSeconds + } + if snapshot.PrimaryWindowMinutes != nil { + updates["codex_primary_window_minutes"] = *snapshot.PrimaryWindowMinutes + } + if snapshot.SecondaryUsedPercent != nil { + updates["codex_secondary_used_percent"] = *snapshot.SecondaryUsedPercent + } + if snapshot.SecondaryResetAfterSeconds != nil { + updates["codex_secondary_reset_after_seconds"] = *snapshot.SecondaryResetAfterSeconds + } + if snapshot.SecondaryWindowMinutes != nil { + updates["codex_secondary_window_minutes"] = *snapshot.SecondaryWindowMinutes + } + if snapshot.PrimaryOverSecondaryPercent != nil { + updates["codex_primary_over_secondary_percent"] = *snapshot.PrimaryOverSecondaryPercent + } + if snapshot.UpdatedAt != "" { + updates["codex_usage_updated_at"] = snapshot.UpdatedAt + } + + updatedAt := time.Now() + if snapshot.UpdatedAt != "" { + if t, err := time.Parse(time.RFC3339, snapshot.UpdatedAt); err == nil { + updatedAt = t + } + } + + // Normalize to canonical 5h/7d fields based on window_minutes + // This fixes the issue where OpenAI's primary/secondary naming is ambiguous across accounts. + // + // IMPORTANT: We can only reliably determine window type from window_minutes field. + // reset_after_seconds is remaining time, not window size, so it cannot be used for comparison. + + var primaryWindowMins, secondaryWindowMins int + var hasPrimaryWindow, hasSecondaryWindow bool + + if snapshot.PrimaryWindowMinutes != nil { + primaryWindowMins = *snapshot.PrimaryWindowMinutes + hasPrimaryWindow = true + } + if snapshot.SecondaryWindowMinutes != nil { + secondaryWindowMins = *snapshot.SecondaryWindowMinutes + hasSecondaryWindow = true + } + + var use5hFromPrimary, use7dFromPrimary bool + var use5hFromSecondary, use7dFromSecondary bool + + if hasPrimaryWindow && hasSecondaryWindow { + // Both window sizes known: compare and assign smaller to 5h, larger to 7d. + if primaryWindowMins < secondaryWindowMins { + use5hFromPrimary = true + use7dFromSecondary = true + } else { + use5hFromSecondary = true + use7dFromPrimary = true + } + } else if hasPrimaryWindow { + // Only primary window size known: classify by absolute threshold. + if primaryWindowMins <= 360 { + use5hFromPrimary = true + } else { + use7dFromPrimary = true + } + } else if hasSecondaryWindow { + // Only secondary window size known: classify by absolute threshold. + if secondaryWindowMins <= 360 { + use5hFromSecondary = true + } else { + use7dFromSecondary = true + } + } else { + // No window_minutes available: cannot reliably determine window types. + // Fall back to legacy assumption (may be incorrect): + // assume primary=7d, secondary=5h based on historical observation. + if snapshot.SecondaryUsedPercent != nil || snapshot.SecondaryResetAfterSeconds != nil || snapshot.SecondaryWindowMinutes != nil { + use5hFromSecondary = true + } + if snapshot.PrimaryUsedPercent != nil || snapshot.PrimaryResetAfterSeconds != nil || snapshot.PrimaryWindowMinutes != nil { + use7dFromPrimary = true + } + } + + d := &codexDerivedUsageSnapshot{ + updates: updates, + updatedAt: updatedAt, + } + + if use5hFromPrimary { + d.fiveHourUsedPercent = snapshot.PrimaryUsedPercent + d.fiveHourResetAfterSeconds = snapshot.PrimaryResetAfterSeconds + d.fiveHourWindowMinutes = snapshot.PrimaryWindowMinutes + if snapshot.PrimaryUsedPercent != nil { + updates["codex_5h_used_percent"] = *snapshot.PrimaryUsedPercent + } + if snapshot.PrimaryResetAfterSeconds != nil { + updates["codex_5h_reset_after_seconds"] = *snapshot.PrimaryResetAfterSeconds + } + if snapshot.PrimaryWindowMinutes != nil { + updates["codex_5h_window_minutes"] = *snapshot.PrimaryWindowMinutes + } + } else if use5hFromSecondary { + d.fiveHourUsedPercent = snapshot.SecondaryUsedPercent + d.fiveHourResetAfterSeconds = snapshot.SecondaryResetAfterSeconds + d.fiveHourWindowMinutes = snapshot.SecondaryWindowMinutes + if snapshot.SecondaryUsedPercent != nil { + updates["codex_5h_used_percent"] = *snapshot.SecondaryUsedPercent + } + if snapshot.SecondaryResetAfterSeconds != nil { + updates["codex_5h_reset_after_seconds"] = *snapshot.SecondaryResetAfterSeconds + } + if snapshot.SecondaryWindowMinutes != nil { + updates["codex_5h_window_minutes"] = *snapshot.SecondaryWindowMinutes + } + } + + if use7dFromPrimary { + d.sevenDayUsedPercent = snapshot.PrimaryUsedPercent + d.sevenDayResetAfterSeconds = snapshot.PrimaryResetAfterSeconds + d.sevenDayWindowMinutes = snapshot.PrimaryWindowMinutes + if snapshot.PrimaryUsedPercent != nil { + updates["codex_7d_used_percent"] = *snapshot.PrimaryUsedPercent + } + if snapshot.PrimaryResetAfterSeconds != nil { + updates["codex_7d_reset_after_seconds"] = *snapshot.PrimaryResetAfterSeconds + } + if snapshot.PrimaryWindowMinutes != nil { + updates["codex_7d_window_minutes"] = *snapshot.PrimaryWindowMinutes + } + } else if use7dFromSecondary { + d.sevenDayUsedPercent = snapshot.SecondaryUsedPercent + d.sevenDayResetAfterSeconds = snapshot.SecondaryResetAfterSeconds + d.sevenDayWindowMinutes = snapshot.SecondaryWindowMinutes + if snapshot.SecondaryUsedPercent != nil { + updates["codex_7d_used_percent"] = *snapshot.SecondaryUsedPercent + } + if snapshot.SecondaryResetAfterSeconds != nil { + updates["codex_7d_reset_after_seconds"] = *snapshot.SecondaryResetAfterSeconds + } + if snapshot.SecondaryWindowMinutes != nil { + updates["codex_7d_window_minutes"] = *snapshot.SecondaryWindowMinutes + } + } + + // Compute absolute reset timestamps using snapshot updated_at + reset_after_seconds. + if d.fiveHourResetAfterSeconds != nil { + resetAt := updatedAt.Add(time.Duration(*d.fiveHourResetAfterSeconds) * time.Second) + d.fiveHourResetAt = &resetAt + updates["codex_5h_reset_at"] = resetAt.UTC().Format(time.RFC3339) + } + if d.sevenDayResetAfterSeconds != nil { + resetAt := updatedAt.Add(time.Duration(*d.sevenDayResetAfterSeconds) * time.Second) + d.sevenDayResetAt = &resetAt + updates["codex_7d_reset_at"] = resetAt.UTC().Format(time.RFC3339) + } + + return d +} + +func codexRateLimitResetAt(derived *codexDerivedUsageSnapshot) *time.Time { + if derived == nil { + return nil + } + if derived.sevenDayUsedPercent != nil && *derived.sevenDayUsedPercent >= 100 && derived.sevenDayResetAt != nil { + return derived.sevenDayResetAt + } + if derived.fiveHourUsedPercent != nil && *derived.fiveHourUsedPercent >= 100 && derived.fiveHourResetAt != nil { + return derived.fiveHourResetAt + } + return nil +} diff --git a/backend/internal/service/openai_codex_usage_snapshot_test.go b/backend/internal/service/openai_codex_usage_snapshot_test.go new file mode 100644 index 000000000..b94739271 --- /dev/null +++ b/backend/internal/service/openai_codex_usage_snapshot_test.go @@ -0,0 +1,142 @@ +package service + +import ( + "testing" + "time" +) + +func TestDeriveCodexUsageSnapshot_Assigns5hAnd7dByWindowMinutes(t *testing.T) { + now := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt := now.Format(time.RFC3339) + + primaryUsed := 80.0 + primaryReset := 100 + primaryWindow := 10080 + + secondaryUsed := 50.0 + secondaryReset := 200 + secondaryWindow := 300 + + snapshot := &OpenAICodexUsageSnapshot{ + PrimaryUsedPercent: &primaryUsed, + PrimaryResetAfterSeconds: &primaryReset, + PrimaryWindowMinutes: &primaryWindow, + SecondaryUsedPercent: &secondaryUsed, + SecondaryResetAfterSeconds: &secondaryReset, + SecondaryWindowMinutes: &secondaryWindow, + UpdatedAt: updatedAt, + } + + derived := deriveCodexUsageSnapshot(snapshot) + if derived == nil { + t.Fatalf("expected derived snapshot, got nil") + } + + if derived.fiveHourUsedPercent == nil || *derived.fiveHourUsedPercent != secondaryUsed { + t.Fatalf("expected 5h used=%v, got=%v", secondaryUsed, derefFloat(derived.fiveHourUsedPercent)) + } + if derived.sevenDayUsedPercent == nil || *derived.sevenDayUsedPercent != primaryUsed { + t.Fatalf("expected 7d used=%v, got=%v", primaryUsed, derefFloat(derived.sevenDayUsedPercent)) + } + + if v, ok := derived.updates["codex_5h_used_percent"].(float64); !ok || v != secondaryUsed { + t.Fatalf("expected updates.codex_5h_used_percent=%v, got=%v", secondaryUsed, derived.updates["codex_5h_used_percent"]) + } + if v, ok := derived.updates["codex_7d_used_percent"].(float64); !ok || v != primaryUsed { + t.Fatalf("expected updates.codex_7d_used_percent=%v, got=%v", primaryUsed, derived.updates["codex_7d_used_percent"]) + } + + fiveHourResetAt := now.Add(time.Duration(secondaryReset) * time.Second).UTC().Format(time.RFC3339) + if v, ok := derived.updates["codex_5h_reset_at"].(string); !ok || v != fiveHourResetAt { + t.Fatalf("expected updates.codex_5h_reset_at=%q, got=%v", fiveHourResetAt, derived.updates["codex_5h_reset_at"]) + } + + sevenDayResetAt := now.Add(time.Duration(primaryReset) * time.Second).UTC().Format(time.RFC3339) + if v, ok := derived.updates["codex_7d_reset_at"].(string); !ok || v != sevenDayResetAt { + t.Fatalf("expected updates.codex_7d_reset_at=%q, got=%v", sevenDayResetAt, derived.updates["codex_7d_reset_at"]) + } +} + +func TestCodexRateLimitResetAt_Prefers7dOver5h(t *testing.T) { + now := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt := now.Format(time.RFC3339) + + fiveHourUsed := 100.0 + fiveHourReset := 60 + fiveHourWindow := 300 + + sevenDayUsed := 100.0 + sevenDayReset := 3600 + sevenDayWindow := 10080 + + // Put 7d in primary, 5h in secondary. + snapshot := &OpenAICodexUsageSnapshot{ + PrimaryUsedPercent: &sevenDayUsed, + PrimaryResetAfterSeconds: &sevenDayReset, + PrimaryWindowMinutes: &sevenDayWindow, + SecondaryUsedPercent: &fiveHourUsed, + SecondaryResetAfterSeconds: &fiveHourReset, + SecondaryWindowMinutes: &fiveHourWindow, + UpdatedAt: updatedAt, + } + + derived := deriveCodexUsageSnapshot(snapshot) + if derived == nil { + t.Fatalf("expected derived snapshot, got nil") + } + + resetAt := codexRateLimitResetAt(derived) + if resetAt == nil { + t.Fatalf("expected resetAt, got nil") + } + + want := now.Add(time.Duration(sevenDayReset) * time.Second) + if !resetAt.Equal(want) { + t.Fatalf("expected resetAt=%v, got=%v", want, *resetAt) + } +} + +func TestCodexRateLimitResetAt_Uses5hWhen7dNotFull(t *testing.T) { + now := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt := now.Format(time.RFC3339) + + fiveHourUsed := 100.0 + fiveHourReset := 120 + fiveHourWindow := 300 + + sevenDayUsed := 80.0 + sevenDayReset := 3600 + sevenDayWindow := 10080 + + snapshot := &OpenAICodexUsageSnapshot{ + PrimaryUsedPercent: &sevenDayUsed, + PrimaryResetAfterSeconds: &sevenDayReset, + PrimaryWindowMinutes: &sevenDayWindow, + SecondaryUsedPercent: &fiveHourUsed, + SecondaryResetAfterSeconds: &fiveHourReset, + SecondaryWindowMinutes: &fiveHourWindow, + UpdatedAt: updatedAt, + } + + derived := deriveCodexUsageSnapshot(snapshot) + if derived == nil { + t.Fatalf("expected derived snapshot, got nil") + } + + resetAt := codexRateLimitResetAt(derived) + if resetAt == nil { + t.Fatalf("expected resetAt, got nil") + } + + want := now.Add(time.Duration(fiveHourReset) * time.Second) + if !resetAt.Equal(want) { + t.Fatalf("expected resetAt=%v, got=%v", want, *resetAt) + } +} + +func derefFloat(v *float64) any { + if v == nil { + return nil + } + return *v +} diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index 65ba01b37..1302201b3 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -1734,145 +1734,22 @@ func extractCodexUsageHeaders(headers http.Header) *OpenAICodexUsageSnapshot { // updateCodexUsageSnapshot saves the Codex usage snapshot to account's Extra field func (s *OpenAIGatewayService) updateCodexUsageSnapshot(ctx context.Context, accountID int64, snapshot *OpenAICodexUsageSnapshot) { - if snapshot == nil { + derived := deriveCodexUsageSnapshot(snapshot) + if derived == nil || len(derived.updates) == 0 { return } - // Convert snapshot to map for merging into Extra - updates := make(map[string]any) - if snapshot.PrimaryUsedPercent != nil { - updates["codex_primary_used_percent"] = *snapshot.PrimaryUsedPercent - } - if snapshot.PrimaryResetAfterSeconds != nil { - updates["codex_primary_reset_after_seconds"] = *snapshot.PrimaryResetAfterSeconds - } - if snapshot.PrimaryWindowMinutes != nil { - updates["codex_primary_window_minutes"] = *snapshot.PrimaryWindowMinutes - } - if snapshot.SecondaryUsedPercent != nil { - updates["codex_secondary_used_percent"] = *snapshot.SecondaryUsedPercent - } - if snapshot.SecondaryResetAfterSeconds != nil { - updates["codex_secondary_reset_after_seconds"] = *snapshot.SecondaryResetAfterSeconds - } - if snapshot.SecondaryWindowMinutes != nil { - updates["codex_secondary_window_minutes"] = *snapshot.SecondaryWindowMinutes - } - if snapshot.PrimaryOverSecondaryPercent != nil { - updates["codex_primary_over_secondary_percent"] = *snapshot.PrimaryOverSecondaryPercent - } - updates["codex_usage_updated_at"] = snapshot.UpdatedAt - - // Normalize to canonical 5h/7d fields based on window_minutes - // This fixes the issue where OpenAI's primary/secondary naming is reversed - // Strategy: Compare the two windows and assign the smaller one to 5h, larger one to 7d - - // IMPORTANT: We can only reliably determine window type from window_minutes field - // The reset_after_seconds is remaining time, not window size, so it cannot be used for comparison - - var primaryWindowMins, secondaryWindowMins int - var hasPrimaryWindow, hasSecondaryWindow bool - - // Only use window_minutes for reliable window size comparison - if snapshot.PrimaryWindowMinutes != nil { - primaryWindowMins = *snapshot.PrimaryWindowMinutes - hasPrimaryWindow = true - } - - if snapshot.SecondaryWindowMinutes != nil { - secondaryWindowMins = *snapshot.SecondaryWindowMinutes - hasSecondaryWindow = true - } - - // Determine which is 5h and which is 7d - var use5hFromPrimary, use7dFromPrimary bool - var use5hFromSecondary, use7dFromSecondary bool - - if hasPrimaryWindow && hasSecondaryWindow { - // Both window sizes known: compare and assign smaller to 5h, larger to 7d - if primaryWindowMins < secondaryWindowMins { - use5hFromPrimary = true - use7dFromSecondary = true - } else { - use5hFromSecondary = true - use7dFromPrimary = true - } - } else if hasPrimaryWindow { - // Only primary window size known: classify by absolute threshold - if primaryWindowMins <= 360 { - use5hFromPrimary = true - } else { - use7dFromPrimary = true - } - } else if hasSecondaryWindow { - // Only secondary window size known: classify by absolute threshold - if secondaryWindowMins <= 360 { - use5hFromSecondary = true - } else { - use7dFromSecondary = true - } - } else { - // No window_minutes available: cannot reliably determine window types - // Fall back to legacy assumption (may be incorrect) - // Assume primary=7d, secondary=5h based on historical observation - if snapshot.SecondaryUsedPercent != nil || snapshot.SecondaryResetAfterSeconds != nil || snapshot.SecondaryWindowMinutes != nil { - use5hFromSecondary = true - } - if snapshot.PrimaryUsedPercent != nil || snapshot.PrimaryResetAfterSeconds != nil || snapshot.PrimaryWindowMinutes != nil { - use7dFromPrimary = true - } - } - - // Write canonical 5h fields - if use5hFromPrimary { - if snapshot.PrimaryUsedPercent != nil { - updates["codex_5h_used_percent"] = *snapshot.PrimaryUsedPercent - } - if snapshot.PrimaryResetAfterSeconds != nil { - updates["codex_5h_reset_after_seconds"] = *snapshot.PrimaryResetAfterSeconds - } - if snapshot.PrimaryWindowMinutes != nil { - updates["codex_5h_window_minutes"] = *snapshot.PrimaryWindowMinutes - } - } else if use5hFromSecondary { - if snapshot.SecondaryUsedPercent != nil { - updates["codex_5h_used_percent"] = *snapshot.SecondaryUsedPercent - } - if snapshot.SecondaryResetAfterSeconds != nil { - updates["codex_5h_reset_after_seconds"] = *snapshot.SecondaryResetAfterSeconds - } - if snapshot.SecondaryWindowMinutes != nil { - updates["codex_5h_window_minutes"] = *snapshot.SecondaryWindowMinutes - } - } - - // Write canonical 7d fields - if use7dFromPrimary { - if snapshot.PrimaryUsedPercent != nil { - updates["codex_7d_used_percent"] = *snapshot.PrimaryUsedPercent - } - if snapshot.PrimaryResetAfterSeconds != nil { - updates["codex_7d_reset_after_seconds"] = *snapshot.PrimaryResetAfterSeconds - } - if snapshot.PrimaryWindowMinutes != nil { - updates["codex_7d_window_minutes"] = *snapshot.PrimaryWindowMinutes - } - } else if use7dFromSecondary { - if snapshot.SecondaryUsedPercent != nil { - updates["codex_7d_used_percent"] = *snapshot.SecondaryUsedPercent - } - if snapshot.SecondaryResetAfterSeconds != nil { - updates["codex_7d_reset_after_seconds"] = *snapshot.SecondaryResetAfterSeconds - } - if snapshot.SecondaryWindowMinutes != nil { - updates["codex_7d_window_minutes"] = *snapshot.SecondaryWindowMinutes - } - } - - // Update account's Extra field asynchronously go func() { updateCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - _ = s.accountRepo.UpdateExtra(updateCtx, accountID, updates) + + _ = s.accountRepo.UpdateExtra(updateCtx, accountID, derived.updates) + + // Persist real window-based cooldown to account.rate_limit_reset_at: + // - 7d>=100% => cooldown until 7d reset + // - else if 5h>=100% => cooldown until 5h reset + if resetAt := codexRateLimitResetAt(derived); resetAt != nil && resetAt.After(time.Now()) { + _ = s.accountRepo.SetRateLimited(updateCtx, accountID, *resetAt) + } }() } diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go index 41bd253cf..79602cc10 100644 --- a/backend/internal/service/ratelimit_service.go +++ b/backend/internal/service/ratelimit_service.go @@ -343,6 +343,28 @@ func (s *RateLimitService) handleCustomErrorCode(ctx context.Context, account *A // handle429 处理429限流错误 // 解析响应头获取重置时间,标记账号为限流状态 func (s *RateLimitService) handle429(ctx context.Context, account *Account, headers http.Header, responseBody []byte) { + // OpenAI OAuth (ChatGPT Codex) uses x-codex-* headers to describe real quota windows (5h/7d). + // If we can derive an exact reset time from those headers, prefer it over the generic 5-minute fallback. + if account != nil && account.Platform == PlatformOpenAI && account.Type == AccountTypeOAuth { + if snapshot := extractCodexUsageHeaders(headers); snapshot != nil { + derived := deriveCodexUsageSnapshot(snapshot) + if derived != nil && len(derived.updates) > 0 { + if err := s.accountRepo.UpdateExtra(ctx, account.ID, derived.updates); err != nil { + slog.Warn("openai_codex_usage_snapshot_update_failed", "account_id", account.ID, "error", err) + } + } + + if resetAt := codexRateLimitResetAt(derived); resetAt != nil && resetAt.After(time.Now()) { + if err := s.accountRepo.SetRateLimited(ctx, account.ID, *resetAt); err != nil { + slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err) + } else { + slog.Info("openai_oauth_codex_rate_limited", "account_id", account.ID, "reset_at", *resetAt) + } + return + } + } + } + // 解析重置时间戳 resetTimestamp := headers.Get("anthropic-ratelimit-unified-reset") if resetTimestamp == "" { diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index b210286d3..c7b1f9f5c 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -51,6 +51,21 @@ func ProvideTokenRefreshService( return svc } +// ProvideOAuthProbeService creates and starts OAuthProbeService. +func ProvideOAuthProbeService( + accountRepo AccountRepository, + httpUpstream HTTPUpstream, + openAITokenProvider *OpenAITokenProvider, + geminiTokenProvider *GeminiTokenProvider, + claudeTokenProvider *ClaudeTokenProvider, + accountUsageService *AccountUsageService, + cfg *config.Config, +) *OAuthProbeService { + svc := NewOAuthProbeService(accountRepo, httpUpstream, openAITokenProvider, geminiTokenProvider, claudeTokenProvider, accountUsageService, cfg) + svc.Start() + return svc +} + // ProvideDashboardAggregationService 创建并启动仪表盘聚合服务 func ProvideDashboardAggregationService(repo DashboardAggregationRepository, timingWheel *TimingWheelService, cfg *config.Config) *DashboardAggregationService { svc := NewDashboardAggregationService(repo, timingWheel, cfg) @@ -255,6 +270,7 @@ var ProviderSet = wire.NewSet( NewCRSSyncService, ProvideUpdateService, ProvideTokenRefreshService, + ProvideOAuthProbeService, ProvideAccountExpiryService, ProvideTimingWheelService, ProvideDashboardAggregationService, diff --git a/config.yaml b/config.yaml index 5e7513fb5..a1ad946e4 100644 --- a/config.yaml +++ b/config.yaml @@ -525,3 +525,37 @@ gemini: # Cooldown time (minutes) after hitting quota # 达到配额后的冷却时间(分钟) cooldown_minutes: 5 + +# ============================================================================= +# OAuth Probe (Optional) +# OAuth2 探活/配额同步(可选) +# ============================================================================= +# Periodically sends "test connection" style requests to OAuth/Setup-Token accounts to: +# 定期对 OAuth/Setup-Token 账号执行“测试连接”式请求,用于: +# - Sync quota/usage even when the account is not scheduled +# - 在账号未被调度时同步用量/限额信息 +# - Refresh OpenAI OAuth Codex 5h/7d usage windows (from x-codex-* response headers) +# - 刷新 OpenAI OAuth 的 Codex 5h/7d 窗口用量(来自响应头 x-codex-*) +# +# WARNING: Probing consumes real quota. Keep it disabled unless needed. +# 注意:探活会消耗真实配额,非必要请保持关闭。 +oauth_probe: + # Enable periodic probing + # 是否启用 + enabled: false + # Check interval (minutes) + # 检查间隔(分钟) + check_interval_minutes: 15 + # Idle threshold (minutes): only probe accounts that have NOT been used within this window. + # 闲置阈值(分钟):仅当账号在该时长内未被使用,才会触发探活(兜底机制)。 + # <=0 表示不按闲置过滤(每轮都会探活符合条件的账号)。 + idle_threshold_minutes: 15 + # Per-request timeout (seconds) + # 单次请求超时(秒) + request_timeout_seconds: 20 + # Max concurrent probes + # 最大并发探活数 + max_concurrency: 2 + # Max accounts to probe per cycle (0 = unlimited) + # 每轮最多探活账号数(0 = 不限制) + max_accounts_per_cycle: 0 diff --git a/frontend/src/components/account/AccountUsageCell.vue b/frontend/src/components/account/AccountUsageCell.vue index c0212c5a5..78a6e1f39 100644 --- a/frontend/src/components/account/AccountUsageCell.vue +++ b/frontend/src/components/account/AccountUsageCell.vue @@ -1,66 +1,39 @@