diff --git a/.env.example b/.env.example index 72351421..22c61142 100644 --- a/.env.example +++ b/.env.example @@ -85,6 +85,32 @@ # Path to your iFlow credential file (e.g., ~/.iflow/oauth_creds.json). #IFLOW_OAUTH_1="" +# --- OpenAI Codex (ChatGPT OAuth) --- +# One-time import from Codex CLI auth files (copied into oauth_creds/openai_codex_oauth_*.json) +#OPENAI_CODEX_OAUTH_1="~/.codex/auth.json" + +# Stateless env credentials (legacy single account) +#OPENAI_CODEX_ACCESS_TOKEN="" +#OPENAI_CODEX_REFRESH_TOKEN="" +#OPENAI_CODEX_EXPIRY_DATE="0" +#OPENAI_CODEX_ID_TOKEN="" +#OPENAI_CODEX_ACCOUNT_ID="" +#OPENAI_CODEX_EMAIL="" + +# Stateless env credentials (numbered multi-account) +#OPENAI_CODEX_1_ACCESS_TOKEN="" +#OPENAI_CODEX_1_REFRESH_TOKEN="" +#OPENAI_CODEX_1_EXPIRY_DATE="0" +#OPENAI_CODEX_1_ID_TOKEN="" +#OPENAI_CODEX_1_ACCOUNT_ID="" +#OPENAI_CODEX_1_EMAIL="" + +# OpenAI Codex routing/config +#OPENAI_CODEX_API_BASE="https://chatgpt.com/backend-api" +#OPENAI_CODEX_OAUTH_PORT=1455 +#OPENAI_CODEX_MODELS='["gpt-5.1-codex","gpt-5-codex"]' +#ROTATION_MODE_OPENAI_CODEX=sequential + # ------------------------------------------------------------------------------ # | [ADVANCED] Provider-Specific Settings | @@ -162,6 +188,7 @@ # # Provider Defaults: # - antigravity: sequential (free tier accounts with daily quotas) +# - openai_codex: sequential (account-level quota behavior) # - All others: balanced # # Example: @@ -401,8 +428,24 @@ # ------------------------------------------------------------------------------ # # OAuth callback port for Antigravity interactive re-authentication. -# Default: 8085 (same as Gemini CLI, shared) -# ANTIGRAVITY_OAUTH_PORT=8085 +# Default: 51121 +# ANTIGRAVITY_OAUTH_PORT=51121 + +# ------------------------------------------------------------------------------ +# | [ADVANCED] iFlow OAuth Configuration | +# ------------------------------------------------------------------------------ +# +# OAuth callback port for iFlow interactive re-authentication. +# Default: 11451 +# IFLOW_OAUTH_PORT=11451 + +# ------------------------------------------------------------------------------ +# | [ADVANCED] OpenAI Codex OAuth Configuration | +# ------------------------------------------------------------------------------ +# +# OAuth callback port for OpenAI Codex interactive authentication. +# Default: 1455 +# OPENAI_CODEX_OAUTH_PORT=1455 # ------------------------------------------------------------------------------ # | [ADVANCED] Debugging / Logging | diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md index 905ab4b0..6ec91790 100644 --- a/DOCUMENTATION.md +++ b/DOCUMENTATION.md @@ -205,15 +205,17 @@ The `CredentialManager` class (`credential_manager.py`) centralizes the lifecycl On startup (unless `SKIP_OAUTH_INIT_CHECK=true`), the manager performs a comprehensive sweep: -1. **System-Wide Scan**: Searches for OAuth credential files in standard locations: +1. **System-Wide Scan / Import Sources**: - `~/.gemini/` → All `*.json` files (typically `credentials.json`) - `~/.qwen/` → All `*.json` files (typically `oauth_creds.json`) - - `~/.iflow/` → All `*. json` files + - `~/.iflow/` → All `*.json` files + - `~/.codex/auth.json` + `~/.codex-accounts.json` → OpenAI Codex first-run import sources 2. **Local Import**: Valid credentials are **copied** (not moved) to the project's `oauth_creds/` directory with standardized names: - - `gemini_cli_oauth_1.json`, `gemini_cli_oauth_2.json`, etc. + - `gemini_cli_oauth_1.json`, `gemini_cli_oauth_2.json`, etc. - `qwen_code_oauth_1.json`, `qwen_code_oauth_2.json`, etc. - `iflow_oauth_1.json`, `iflow_oauth_2.json`, etc. + - `openai_codex_oauth_1.json`, `openai_codex_oauth_2.json`, etc. 3. **Intelligent Deduplication**: - The manager inspects each credential file for a `_proxy_metadata` field containing the user's email or ID @@ -292,6 +294,24 @@ IFLOW_EMAIL IFLOW_API_KEY ``` +**OpenAI Codex Environment Variables:** +``` +OPENAI_CODEX_ACCESS_TOKEN +OPENAI_CODEX_REFRESH_TOKEN +OPENAI_CODEX_EXPIRY_DATE +OPENAI_CODEX_ID_TOKEN +OPENAI_CODEX_ACCOUNT_ID +OPENAI_CODEX_EMAIL + +# Numbered multi-account format +OPENAI_CODEX_1_ACCESS_TOKEN +OPENAI_CODEX_1_REFRESH_TOKEN +OPENAI_CODEX_1_EXPIRY_DATE +OPENAI_CODEX_1_ID_TOKEN +OPENAI_CODEX_1_ACCOUNT_ID +OPENAI_CODEX_1_EMAIL +``` + **How it works:** - If the manager finds (e.g.) `GEMINI_CLI_ACCESS_TOKEN` or `GEMINI_CLI_1_ACCESS_TOKEN`, it constructs an in-memory credential object that mimics the file structure - The credential is referenced internally as `env://gemini_cli/0` (legacy) or `env://gemini_cli/1` (numbered) @@ -304,9 +324,11 @@ IFLOW_API_KEY env://{provider}/{index} Examples: -- env://gemini_cli/1 → GEMINI_CLI_1_ACCESS_TOKEN, etc. -- env://gemini_cli/0 → GEMINI_CLI_ACCESS_TOKEN (legacy single credential) -- env://antigravity/1 → ANTIGRAVITY_1_ACCESS_TOKEN, etc. +- env://gemini_cli/1 → GEMINI_CLI_1_ACCESS_TOKEN, etc. +- env://gemini_cli/0 → GEMINI_CLI_ACCESS_TOKEN (legacy single credential) +- env://antigravity/1 → ANTIGRAVITY_1_ACCESS_TOKEN, etc. +- env://openai_codex/1 → OPENAI_CODEX_1_ACCESS_TOKEN, etc. +- env://openai_codex/0 → OPENAI_CODEX_ACCESS_TOKEN (legacy single credential) ``` #### 2.6.3. Credential Tool Integration @@ -314,7 +336,7 @@ Examples: The `credential_tool.py` provides a user-friendly CLI interface to the `CredentialManager`: **Key Functions:** -1. **OAuth Setup**: Wraps provider-specific `AuthBase` classes (`GeminiAuthBase`, `QwenAuthBase`, `IFlowAuthBase`) to handle interactive login flows +1. **OAuth Setup**: Wraps provider-specific `AuthBase` classes (`GeminiAuthBase`, `QwenAuthBase`, `IFlowAuthBase`, `OpenAICodexAuthBase`) to handle interactive login flows 2. **Credential Export**: Reads local `.json` files and generates `.env` format output for stateless deployment 3. **API Key Management**: Adds or updates `PROVIDER_API_KEY_N` entries in the `.env` file @@ -1426,12 +1448,13 @@ Each OAuth provider uses a local callback server during authentication. The call | Gemini CLI | 8085 | `GEMINI_CLI_OAUTH_PORT` | | Antigravity | 51121 | `ANTIGRAVITY_OAUTH_PORT` | | iFlow | 11451 | `IFLOW_OAUTH_PORT` | +| OpenAI Codex | 1455 | `OPENAI_CODEX_OAUTH_PORT` | **Configuration Methods:** 1. **Via TUI Settings Menu:** - Main Menu → `4. View Provider & Advanced Settings` → `1. Launch Settings Tool` - - Select the provider (Gemini CLI, Antigravity, or iFlow) + - Select the provider (Gemini CLI, Antigravity, iFlow, or OpenAI Codex) - Modify the `*_OAUTH_PORT` setting - Use "Reset to Default" to restore the original port @@ -1441,6 +1464,7 @@ Each OAuth provider uses a local callback server during authentication. The call GEMINI_CLI_OAUTH_PORT=8085 ANTIGRAVITY_OAUTH_PORT=51121 IFLOW_OAUTH_PORT=11451 + OPENAI_CODEX_OAUTH_PORT=1455 ``` **When to Change Ports:** @@ -1528,7 +1552,7 @@ The following providers use `TimeoutConfig`: | `iflow_provider.py` | `acompletion()` | `streaming()` | | `qwen_code_provider.py` | `acompletion()` | `streaming()` | -**Note:** iFlow, Qwen Code, and Gemini CLI providers always use streaming internally (even for non-streaming requests), aggregating chunks into a complete response. Only Antigravity has a true non-streaming path. +**Note:** iFlow, Qwen Code, Gemini CLI, and OpenAI Codex providers always use streaming internally (even for non-streaming requests), aggregating chunks into a complete response. Only Antigravity has a true non-streaming path. #### Tuning Recommendations @@ -1649,7 +1673,24 @@ QUOTA_GROUPS_GEMINI_CLI_3_FLASH="gemini-3-flash-preview" * **Schema Cleaning**: Similar to Qwen, it aggressively sanitizes tool schemas to prevent 400 errors. * **Dedicated Logging**: Implements `_IFlowFileLogger` to capture raw chunks for debugging proprietary API behaviors. -### 3.4. Google Gemini (`gemini_provider.py`) +### 3.4. OpenAI Codex (`openai_codex_provider.py`) + +* **Auth Base**: Uses `OpenAICodexAuthBase` with Authorization Code + PKCE, queue-based refresh/re-auth, and local-first credential persistence (`oauth_creds/openai_codex_oauth_*.json`). +* **OAuth Client ID**: Uses OpenAI's public Codex OAuth client ID. This value is intentionally non-secret (OAuth client IDs identify the app, unlike client secrets). +* **First-Run Import**: `CredentialManager` imports from `~/.codex/auth.json` and `~/.codex-accounts.json` when no local/OpenAI Codex env creds exist. +* **Endpoint Translation**: Implements OpenAI-compatible `/v1/chat/completions` by transforming chat payloads into Codex Responses payloads and calling `POST /codex/responses`. +* **SSE Translation**: Maps Codex SSE event families (e.g. `response.output_item.*`, `response.output_text.delta`, `response.function_call_arguments.*`, `response.completed`) into LiteLLM/OpenAI chunk objects. +* **Rotation Compatibility**: Emits typed `httpx.HTTPStatusError` for transport/status failures and includes provider-specific `parse_quota_error()` for retry/cooldown extraction (`Retry-After`, `error.resets_at`). +* **Default Rotation**: `sequential` (account-level quota behavior). + +**OpenAI Codex Troubleshooting Notes:** + +- **Malformed JWT payload**: If access/id tokens cannot be decoded, account/email metadata can be missing; re-authenticate to rebuild token metadata. +- **Missing account-id claim**: Requests require `chatgpt-account-id`; if absent, refresh/re-auth to repopulate `_proxy_metadata.account_id`. +- **Callback port conflicts**: Change `OPENAI_CODEX_OAUTH_PORT` when port `1455` is already in use. +- **Header mismatch / 403**: Ensure provider sends `Authorization`, `chatgpt-account-id`, and expected Codex headers (`OpenAI-Beta`, `originator`) when routing to `/codex/responses`. + +### 3.5. Google Gemini (`gemini_provider.py`) * **Thinking Parameter**: Automatically handles the `thinking` parameter transformation required for Gemini 2.5 models (`thinking` -> `gemini-2.5-pro` reasoning parameter). * **Safety Settings**: Ensures default safety settings (blocking nothing) are applied if not provided, preventing over-sensitive refusals. diff --git a/PLAN-openai-codex.md b/PLAN-openai-codex.md new file mode 100644 index 00000000..0a528c3f --- /dev/null +++ b/PLAN-openai-codex.md @@ -0,0 +1,459 @@ +# PLAN: OpenAI Codex OAuth + Multi-Account Support (Revised) + +## Goal +Add first-class `openai_codex` support to LLM-API-Key-Proxy with: +- OAuth login + token refresh +- file/env credential loading +- multi-account rotation via existing `UsageManager` +- OpenAI-compatible `/v1/chat/completions` served through Codex Responses backend +- first-run import from existing Codex CLI credentials (`~/.codex/auth.json`, `~/.codex-accounts.json`) + +--- + +## Review updates applied in this revision + +- Aligned with current local-first architecture: **local managed creds stay in `oauth_creds/`**, not `~/.openai_codex`. +- Reduced MVP risk: **no cross-provider OAuth base refactor in phase 1**. +- Added protocol validation gate (headers/endpoints/SSE event taxonomy) before implementation. +- Expanded wiring checklist to all known hardcoded OAuth provider lists (credential tool, launcher TUI, settings tool). +- Added explicit `env://openai_codex/N` parity requirements and test-harness bootstrap work. + +--- + +## 0) Scope decisions + preflight validation (must lock before coding) + +### 0.1 Provider identity and defaults + +- [x] Provider key: `openai_codex` +- [x] OAuth env prefix: `OPENAI_CODEX` +- [x] Default API base: `https://chatgpt.com/backend-api` +- [x] Responses endpoint path: `/codex/responses` +- [x] Default rotation mode for provider: `sequential` +- [x] Callback env var: `OPENAI_CODEX_OAUTH_PORT` +- [x] JWT parsing strategy: unverified base64url decode (no `PyJWT` dependency) + +### 0.2 Architecture alignment (critical) + +- [x] Keep **local managed credentials** in project data dir: `oauth_creds/openai_codex_oauth_N.json` + - [x] Match existing patterns in `src/rotator_library/utils/paths.py` and other auth bases + - [x] Do **not** introduce a new default managed dir under `~/.openai_codex` for MVP +- [x] Treat `~/.codex/*` only as **import source**, never as primary writable store + +### 0.3 Protocol truth capture (before implementation) + +- [x] Capture one successful non-stream + stream Codex call and confirm: + - [x] Auth endpoint(s) and token exchange params + - [x] Required request headers (`chatgpt-account-id`, `OpenAI-Beta`, `originator`, etc.) + - [x] SSE event names/payload shapes + - [x] Error body format for 401/403/429/5xx +- [x] Save representative payloads/events as test fixtures under `tests/fixtures/openai_codex/` + +--- + +## 1) OAuth + credential plumbing + +## 1.1 Add OpenAI Codex auth base (MVP approach: provider-specific class) + +- [x] Create `src/rotator_library/providers/openai_codex_auth_base.py` +- [x] Base implementation strategy for MVP: + - [x] Adapt proven queue/refresh/reauth approach from `qwen_auth_base.py` / `iflow_auth_base.py` + - [x] **Do not** refactor `GoogleOAuthBase` or create shared `oauth_base.py` in phase 1 + +### 1.1.1 Core lifecycle and queue infrastructure + +- [x] Implement credential cache/locking/queue internals: + - [x] `_credentials_cache`, `_load_credentials()`, `_save_credentials()` + - [x] `_refresh_locks`, `_locks_lock`, `_get_lock()` + - [x] `_refresh_queue`, `_reauth_queue` + - [x] `_queue_refresh()`, `_process_refresh_queue()`, `_process_reauth_queue()` + - [x] `_refresh_failures`, `_next_refresh_after` (backoff tracking) + - [x] `_queued_credentials`, `_unavailable_credentials`, TTL cleanup +- [x] Implement `is_credential_available(path)` with: + - [x] re-auth queue exclusion + - [x] true-expiry check (not proactive buffer) +- [x] Implement `proactively_refresh(credential_identifier)` queue-based behavior + +### 1.1.2 OAuth flow and refresh behavior + +- [x] Interactive OAuth with PKCE + state + - [x] Local callback: `http://localhost:{OPENAI_CODEX_OAUTH_PORT}/auth/callback` + - [x] `ReauthCoordinator` integration (single interactive flow globally) +- [x] Token exchange endpoint: `https://auth.openai.com/oauth/token` +- [x] Authorization endpoint: `https://auth.openai.com/oauth/authorize` +- [x] Refresh flow (`grant_type=refresh_token`) with retry/backoff (3 attempts) +- [x] Refresh error handling: + - [x] `400 invalid_grant` => queue re-auth + raise `CredentialNeedsReauthError` + - [x] `401/403` => queue re-auth + raise `CredentialNeedsReauthError` + - [x] `429` => honor `Retry-After` + - [x] `5xx` => exponential backoff retry + +### 1.1.3 Safe persistence semantics (critical) + +- [x] `_save_credentials()` uses `safe_write_json(..., secure_permissions=True)` +- [x] For rotating refresh-token safety: + - [x] Write-to-disk success required before cache mutation for refreshed tokens + - [x] Avoid stale-cache overwrite scenarios +- [x] Env-backed credentials (`_proxy_metadata.loaded_from_env=true`) skip disk writes safely + +### 1.1.4 JWT and metadata extraction + +- [x] Add unverified JWT decode helper (base64url payload decode with padding) +- [x] Extract from access token (fallback to `id_token`): + - [x] `account_id` claim: `https://api.openai.com/auth.chatgpt_account_id` + - [x] email claim fallback chain: `email` -> `sub` + - [x] `exp` for token expiry +- [x] Maintain metadata under `_proxy_metadata`: + - [x] `email`, `account_id`, `last_check_timestamp` + - [x] `loaded_from_env`, `env_credential_index` + +### 1.1.5 Env credential support + +- [x] Support both formats in `_load_from_env()`: + - [x] legacy single: `OPENAI_CODEX_ACCESS_TOKEN`, `OPENAI_CODEX_REFRESH_TOKEN`, ... + - [x] numbered: `OPENAI_CODEX_1_ACCESS_TOKEN`, `OPENAI_CODEX_1_REFRESH_TOKEN`, ... +- [x] Implement `_parse_env_credential_path(path)` for `env://openai_codex/N` +- [x] Ensure `_load_credentials()` works for file paths **and** `env://` virtual paths + +### 1.1.6 Public methods expected by tooling/runtime + +- [x] `setup_credential()` +- [x] `initialize_token(path_or_creds, force_interactive=False)` +- [x] `get_user_info(creds_or_path)` +- [x] `get_auth_header(credential_identifier)` +- [x] `list_credentials(base_dir)` +- [x] `delete_credential(path)` +- [x] `build_env_lines(creds, cred_number)` +- [x] `export_credential_to_env(credential_path, base_dir)` (used by credential tool export flows) +- [x] `_get_provider_file_prefix() -> "openai_codex"` + +### 1.1.7 Credential schema (`openai_codex_oauth_N.json`) + +```json +{ + "access_token": "eyJhbGciOi...", + "refresh_token": "rt_...", + "id_token": "eyJhbGciOi...", + "expiry_date": 1739400000000, + "token_uri": "https://auth.openai.com/oauth/token", + "_proxy_metadata": { + "email": "user@example.com", + "account_id": "acct_...", + "last_check_timestamp": 1739396400.0, + "loaded_from_env": false, + "env_credential_index": null + } +} +``` + +> Note: client metadata like `client_id` should be class constants unless Codex token refresh explicitly requires persisted values. + +--- + +## 1.2 First-run import from Codex CLI credentials (CredentialManager integration) + +- [x] Update `src/rotator_library/credential_manager.py` to add Codex import helper + - [x] Trigger only when: + - [x] provider is `openai_codex` + - [x] no local `oauth_creds/openai_codex_oauth_*.json` + - [x] no env-based OpenAI Codex credentials already selected +- [x] Import sources (read-only): + - [x] `~/.codex/auth.json` (single account) + - [x] `~/.codex-accounts.json` (multi-account) +- [x] Normalize imported records to proxy schema +- [x] Extract and store `account_id` + email from JWT claims during import +- [x] Skip malformed entries gracefully with warnings +- [x] Preserve original source files untouched +- [x] Log import summary (count + identifiers) + +--- + +## 1.3 Wire registries and discovery maps + +- [x] Update `src/rotator_library/provider_factory.py` + - [x] Import `OpenAICodexAuthBase` + - [x] Add `"openai_codex": OpenAICodexAuthBase` to `PROVIDER_MAP` +- [x] Update `src/rotator_library/credential_manager.py` + - [x] Add to `DEFAULT_OAUTH_DIRS`: `"openai_codex": Path.home() / ".codex"` (source import context) + - [x] Add to `ENV_OAUTH_PROVIDERS`: `"openai_codex": "OPENAI_CODEX"` + +--- + +## 1.4 Wire credential UI, launcher UI, and settings UI + +### 1.4.1 Credential tool updates (`src/rotator_library/credential_tool.py`) + +- [x] Add to `OAUTH_FRIENDLY_NAMES`: `"openai_codex": "OpenAI Codex"` +- [x] Add to OAuth provider lists: + - [x] `_get_oauth_credentials_summary()` hardcoded list + - [x] `combine_all_credentials()` hardcoded list +- [x] Add to OAuth-only exclusions in API-key flow: + - [x] `oauth_only_providers` in `setup_api_key()` +- [x] Add to setup display mapping in `setup_new_credential()` +- [x] Export support: + - [x] Add OpenAI Codex export option(s) or refactor export menu to provider-driven generic flow + - [x] Ensure combine/export features call new auth-base methods + +### 1.4.2 Launcher TUI updates (`src/proxy_app/launcher_tui.py`) + +- [x] Add `"openai_codex": "OPENAI_CODEX"` to `env_oauth_providers` in `SettingsDetector.detect_credentials()` + +### 1.4.3 Settings tool updates (`src/proxy_app/settings_tool.py`) + +- [x] Import Codex default callback port from auth class with fallback constant +- [x] Add provider settings block for `openai_codex`: + - [x] `OPENAI_CODEX_OAUTH_PORT` +- [x] Register `openai_codex` in `PROVIDER_SETTINGS_MAP` + +--- + +## 1.5 Provider plugin auto-registration verification + +- [x] Create `src/rotator_library/providers/openai_codex_provider.py` + - [x] Confirm `providers/__init__.py` auto-registers as `openai_codex` +- [x] Verify name consistency across all maps/lists: + - [x] `PROVIDER_MAP` (`provider_factory.py`) + - [x] `DEFAULT_OAUTH_DIRS` / `ENV_OAUTH_PROVIDERS` (`credential_manager.py`) + - [x] `OAUTH_FRIENDLY_NAMES` + hardcoded OAuth lists (`credential_tool.py`) + - [x] `env_oauth_providers` (`launcher_tui.py`) + - [x] `PROVIDER_SETTINGS_MAP` (`settings_tool.py`) + +--- + +## 2) Codex inference provider (`openai_codex_provider.py`) + +## 2.1 Provider class skeleton + +- [x] Implement `OpenAICodexProvider(OpenAICodexAuthBase, ProviderInterface)` +- [x] Set class behavior: + - [x] `has_custom_logic() -> True` + - [x] `skip_cost_calculation = True` + - [x] `default_rotation_mode = "sequential"` + - [x] `provider_env_name = "openai_codex"` +- [x] `get_models()` model source order: + - [x] `OPENAI_CODEX_MODELS` via `ModelDefinitions` (priority) + - [x] hardcoded sane fallback models + - [x] optional dynamic discovery if Codex endpoint supports model listing + +## 2.2 Credential initialization + metadata cache + +- [x] Implement `initialize_credentials(credential_paths)` startup hook: + - [x] preload credentials (file + `env://`) + - [x] validate expiry and queue refresh where needed + - [x] parse/cache `account_id` and email + - [x] log summary of ready/refreshing/reauth-required credentials + +## 2.3 Non-streaming completion path + +- [x] Implement `acompletion()` for `stream=false` +- [x] Credential handling: + - [x] use `credential_identifier` from client + - [x] support file + `env://` paths consistently (no `os.path.isfile` shortcut assumptions) + - [x] ensure `initialize_token()` called before request when needed +- [x] Transform incoming OpenAI chat payload to Codex Responses payload: + - [x] `messages` -> Codex `input` + - [x] `model`, `temperature`, `top_p`, `max_tokens` + - [x] tools/tool_choice mapping where supported +- [x] Request target: + - [x] `POST ${OPENAI_CODEX_API_BASE or default}/codex/responses` +- [x] Required headers: + - [x] `Authorization: Bearer ` + - [x] `chatgpt-account-id: ` + - [x] protocol-validated beta/originator headers from preflight +- [x] Parse response into `litellm.ModelResponse` + +## 2.4 Streaming path + SSE translation + +- [x] Implement dedicated SSE parser/translator +- [x] Handle expected Codex event families (validated from fixtures): + - [x] `response.created` + - [x] `response.output_item.added` + - [x] `response.content_part.added` + - [x] `response.content_part.delta` + - [x] `response.content_part.done` + - [x] `response.output_item.done` + - [x] `response.completed` + - [x] `response.failed` / `response.incomplete` + - [x] `error` +- [x] Tool-call delta mapping: + - [x] `response.function_call_arguments.delta` + - [x] `response.function_call_arguments.done` +- [x] Emit translated `litellm.ModelResponse` chunks (not raw SSE strings) + - [x] compatible with `RotatingClient._safe_streaming_wrapper()` +- [x] Finish reason mapping: + - [x] stop -> `stop` + - [x] max_output_tokens -> `length` + - [x] tool_calls -> `tool_calls` + - [x] content_filter -> `content_filter` +- [x] Usage extraction from terminal event: + - [x] `input_tokens` -> `usage.prompt_tokens` + - [x] `output_tokens` -> `usage.completion_tokens` + - [x] `total_tokens` -> `usage.total_tokens` +- [x] Unknown events: + - [x] ignore safely with debug logs + - [x] do not break stream unless terminal error condition + +## 2.5 Error classification + rotation compatibility + +- [x] Ensure HTTP errors surface as `httpx.HTTPStatusError` (or equivalent classified exceptions) +- [x] Validate classification in existing `classify_error()` flow (`error_handler.py`): + - [x] 401/403 => authentication/forbidden -> rotate credential + - [x] 429 => rate_limit/quota_exceeded -> cooldown/rotate + - [x] 5xx => server_error -> retry/rotate + - [x] context-length style 400 => `context_window_exceeded` +- [x] Implement `@staticmethod parse_quota_error(error, error_body=None)` on provider + - [x] parse `Retry-After` + - [x] parse Codex-specific quota payload fields if present + +## 2.6 Quota/tier placeholders (MVP-safe defaults) + +- [x] Add conservative placeholders: + - [x] `tier_priorities` + - [x] `usage_reset_configs` + - [x] `model_quota_groups` +- [x] Mark with TODOs for empirical tuning once real quota behavior is observed + +--- + +## 3) Configuration + documentation updates + +## 3.1 `.env.example` + +- [x] Add one-time file import path: + - [x] `OPENAI_CODEX_OAUTH_1` +- [x] Add stateless env credential vars (legacy + numbered): + - [x] `OPENAI_CODEX_ACCESS_TOKEN` + - [x] `OPENAI_CODEX_REFRESH_TOKEN` + - [x] `OPENAI_CODEX_EXPIRY_DATE` + - [x] `OPENAI_CODEX_ID_TOKEN` + - [x] `OPENAI_CODEX_ACCOUNT_ID` + - [x] `OPENAI_CODEX_EMAIL` + - [x] `OPENAI_CODEX_1_*` variants +- [x] Add routing/config vars: + - [x] `OPENAI_CODEX_API_BASE` + - [x] `OPENAI_CODEX_OAUTH_PORT` + - [x] `OPENAI_CODEX_MODELS` + - [x] `ROTATION_MODE_OPENAI_CODEX` + +## 3.2 `README.md` + +- [x] Add OpenAI Codex to OAuth provider lists/tables +- [x] Add setup instructions: + - [x] interactive OAuth flow + - [x] first-run auto-import from `~/.codex/*` + - [x] env-based stateless deployment format +- [x] Add callback-port table row for OpenAI Codex + +## 3.3 `DOCUMENTATION.md` + +- [x] Update credential discovery/import flow to include Codex source files +- [x] Add OpenAI Codex auth/provider architecture section +- [x] Document schema + env vars + runtime refresh/rotation behavior +- [x] Add troubleshooting section: + - [x] malformed JWT payload + - [x] missing account-id claim + - [x] callback port conflicts + - [x] header mismatch / 403 failures + +--- + +## 4) Tests + +## 4.0 Test harness bootstrap (repo currently has no test suite) + +- [x] Add test directory structure: `tests/` +- [x] Add test dependencies (`pytest`, `pytest-asyncio`, `respx` or equivalent) +- [x] Add minimal test run documentation/command + +## 4.1 Auth base tests (`tests/test_openai_codex_auth.py`) + +- [x] JWT decode helper: + - [x] valid token + - [x] malformed token + - [x] missing claims +- [x] expiry logic: + - [x] `_is_token_expired()` with proactive buffer + - [x] `_is_token_truly_expired()` strict expiry +- [x] env loading: + - [x] legacy vars + - [x] numbered vars + - [x] `env://openai_codex/N` parsing +- [x] save/load round-trip with `_proxy_metadata` +- [x] re-auth queue availability behavior (`is_credential_available`) + +## 4.2 Import tests (`tests/test_openai_codex_import.py`) + +- [x] import from `~/.codex/auth.json` format +- [x] import from `~/.codex-accounts.json` format +- [x] skip import when local `openai_codex_oauth_*.json` exists +- [x] malformed source files handled gracefully +- [x] source files never modified + +## 4.3 Provider request mapping tests (`tests/test_openai_codex_provider.py`) + +- [x] chat request mapping to Codex Responses payload +- [x] non-stream response mapping to `ModelResponse` +- [x] header construction includes account-id + auth headers +- [x] env credential identifiers work (no file-only assumptions) + +## 4.4 SSE translation tests (`tests/test_openai_codex_sse.py`) + +- [x] fixture-driven event sequence -> expected chunk sequence +- [x] content deltas +- [x] tool-call deltas +- [x] finish reason mapping +- [x] usage extraction +- [x] error event propagation +- [x] unknown event tolerance + +## 4.5 Wiring regression tests (lightweight) + +- [x] credential discovery recognizes OpenAI Codex env vars +- [x] provider_factory returns OpenAICodexAuthBase +- [x] `providers` auto-registration includes `openai_codex` + +--- + +## 5) Manual smoke-test checklist + +- [x] `python -m rotator_library.credential_tool` shows **OpenAI Codex** in OAuth setup list +- [x] OpenAI Codex is excluded from API-key setup list (`oauth_only_providers`) +- [x] first run with no local creds imports from `~/.codex/*` into `oauth_creds/openai_codex_oauth_*.json` +- [x] env-based `env://openai_codex/N` credentials are detected and used +- [x] `/v1/models` includes `openai_codex/*` models +- [x] `/v1/chat/completions` works for: + - [x] `stream=false` + - [x] `stream=true` +- [x] expired token refresh works (proactive + on-demand) +- [x] invalid refresh token queues re-auth and rotates to next credential +- [x] `is_credential_available()` returns false for re-auth queued / truly expired creds +- [x] multi-account rotation works in: + - [x] `sequential` (default) + - [x] `balanced` (override) +- [x] launcher/settings UIs show Codex OAuth counts and callback-port setting correctly + +--- + +## 6) Optional phase 2 (post-MVP) + +- [ ] Extract common OAuth queue/cache logic into shared base mixin for `google_oauth_base`, `qwen_auth_base`, `iflow_auth_base`, and Codex +- [ ] Refactor credential tool OAuth provider lists/exports to dynamic provider-driven implementation +- [ ] Add `model_info_service` alias mapping for `openai_codex` if pricing/capability enrichment is desired +- [ ] Tune tier priorities/quota windows from observed production behavior +- [ ] Add periodic background reconciliation from external `~/.codex` stores if needed + +--- + +## Proposed implementation order + +1. **Protocol validation gate** — lock endpoints/headers/events from real fixtures +2. **Auth base** — `openai_codex_auth_base.py` (queue + refresh + reauth + env support) +3. **First-run import** — CredentialManager import flow for `~/.codex/*` +4. **Registry/discovery wiring** — provider_factory + credential_manager maps +5. **UI wiring** — credential_tool + launcher_tui + settings_tool +6. **Provider skeleton** — `openai_codex_provider.py`, model list, startup init +7. **Non-streaming completion** — request mapping + response mapping +8. **Streaming translator** — SSE event translation + tool calls + usage +9. **Error/quota integration** — `parse_quota_error`, retry/cooldown compatibility +10. **Tests** — harness + auth/import/provider/SSE/wiring tests +11. **Docs/config** — `.env.example`, `README.md`, `DOCUMENTATION.md` +12. **Manual smoke validation** — end-to-end checklist diff --git a/README.md b/README.md index c15ed094..09827930 100644 --- a/README.md +++ b/README.md @@ -106,6 +106,7 @@ anthropic/claude-3-5-sonnet ← Anthropic API openrouter/anthropic/claude-3-opus ← OpenRouter gemini_cli/gemini-2.5-pro ← Gemini CLI (OAuth) antigravity/gemini-3-pro-preview ← Antigravity (Gemini 3, Claude Opus 4.5) +openai_codex/gpt-5.1-codex ← OpenAI Codex (ChatGPT OAuth) ``` ### Usage Examples @@ -264,7 +265,7 @@ python -m rotator_library.credential_tool | Type | Providers | How to Add | |------|-----------|------------| | **API Keys** | Gemini, OpenAI, Anthropic, OpenRouter, Groq, Mistral, NVIDIA, Cohere, Chutes | Enter key in TUI or add to `.env` | -| **OAuth** | Gemini CLI, Antigravity, Qwen Code, iFlow | Interactive browser login via credential tool | +| **OAuth** | Gemini CLI, Antigravity, Qwen Code, iFlow, OpenAI Codex | Interactive browser login via credential tool | ### The `.env` File @@ -295,7 +296,7 @@ The proxy is powered by a standalone Python library that you can use directly in - **Intelligent key selection** with tiered, model-aware locking - **Deadline-driven requests** with configurable global timeout - **Automatic failover** between keys on errors -- **OAuth support** for Gemini CLI, Antigravity, Qwen, iFlow +- **OAuth support** for Gemini CLI, Antigravity, Qwen, iFlow, OpenAI Codex - **Stateless deployment ready** — load credentials from environment variables ### Basic Usage @@ -379,7 +380,7 @@ The proxy includes a powerful text-based UI for configuration and management. 🔑 Credential Management - **Auto-discovery** of API keys from environment variables -- **OAuth discovery** from standard paths (`~/.gemini/`, `~/.qwen/`, `~/.iflow/`) +- **OAuth discovery/import** from standard paths (`~/.gemini/`, `~/.qwen/`, `~/.iflow/`, `~/.codex/`) - **Duplicate detection** warns when same account added multiple times - **Credential prioritization** — paid tier used before free tier - **Stateless deployment** — export OAuth to environment variables @@ -439,6 +440,13 @@ The proxy includes a powerful text-based UI for configuration and management. - Hybrid auth with separate API key fetch - Tool schema cleaning +**OpenAI Codex:** + +- ChatGPT OAuth Authorization Code + PKCE +- Codex Responses backend (`/codex/responses`) behind OpenAI-compatible `/v1/chat/completions` +- First-run import from `~/.codex/auth.json` + `~/.codex-accounts.json` +- Sequential multi-account rotation + env credential parity (`env://openai_codex/N`) + **NVIDIA NIM:** - Dynamic model discovery @@ -454,7 +462,7 @@ The proxy includes a powerful text-based UI for configuration and management. - **Unique request directories** with full transaction details - **Streaming chunk capture** for debugging - **Performance metadata** (duration, tokens, model used) -- **Provider-specific logs** for Qwen, iFlow, Antigravity +- **Provider-specific logs** for Qwen, iFlow, Antigravity, OpenAI Codex @@ -753,6 +761,61 @@ Uses OAuth Authorization Code flow with local callback server. +
+OpenAI Codex + +Uses ChatGPT OAuth credentials and routes requests to the Codex Responses backend. + +**Setup:** + +1. Run the credential tool +2. Select "Add OAuth Credential" → "OpenAI Codex" +3. Complete browser auth flow (local callback server) +4. On first run, existing Codex CLI credentials are auto-imported from: + - `~/.codex/auth.json` + - `~/.codex-accounts.json` + +Imported credentials are normalized and stored locally as: + +- `oauth_creds/openai_codex_oauth_1.json` +- `oauth_creds/openai_codex_oauth_2.json` +- ... + +**Features:** + +- OAuth Authorization Code + PKCE +- Uses OpenAI's public Codex OAuth client ID (non-secret by OAuth design) +- Automatic refresh + re-auth queueing +- File-based and stateless env credentials (`env://openai_codex/N`) +- Sequential rotation by default (`ROTATION_MODE_OPENAI_CODEX=sequential`) +- OpenAI-compatible `/v1/chat/completions` via Codex Responses backend + +**Environment Variables (stateless mode):** + +```env +# Single credential (legacy) +OPENAI_CODEX_ACCESS_TOKEN="..." +OPENAI_CODEX_REFRESH_TOKEN="..." +OPENAI_CODEX_EXPIRY_DATE="1739400000000" +OPENAI_CODEX_ID_TOKEN="..." +OPENAI_CODEX_ACCOUNT_ID="acct_..." +OPENAI_CODEX_EMAIL="user@example.com" + +# Numbered multi-credential +OPENAI_CODEX_1_ACCESS_TOKEN="..." +OPENAI_CODEX_1_REFRESH_TOKEN="..." +OPENAI_CODEX_1_EXPIRY_DATE="1739400000000" +OPENAI_CODEX_1_ID_TOKEN="..." +OPENAI_CODEX_1_ACCOUNT_ID="acct_..." +OPENAI_CODEX_1_EMAIL="user1@example.com" + +OPENAI_CODEX_API_BASE="https://chatgpt.com/backend-api" +OPENAI_CODEX_OAUTH_PORT=1455 +ROTATION_MODE_OPENAI_CODEX=sequential +``` + +
+
Stateless Deployment (Export to Environment Variables) @@ -784,11 +847,12 @@ For platforms without file persistence (Railway, Render, Vercel): Customize OAuth callback ports if defaults conflict: -| Provider | Default Port | Environment Variable | -| ----------- | ------------ | ------------------------ | -| Gemini CLI | 8085 | `GEMINI_CLI_OAUTH_PORT` | -| Antigravity | 51121 | `ANTIGRAVITY_OAUTH_PORT` | -| iFlow | 11451 | `IFLOW_OAUTH_PORT` | +| Provider | Default Port | Environment Variable | +| ------------ | ------------ | ------------------------- | +| Gemini CLI | 8085 | `GEMINI_CLI_OAUTH_PORT` | +| Antigravity | 51121 | `ANTIGRAVITY_OAUTH_PORT` | +| iFlow | 11451 | `IFLOW_OAUTH_PORT` | +| OpenAI Codex | 1455 | `OPENAI_CODEX_OAUTH_PORT` |
@@ -967,6 +1031,23 @@ See [VPS Deployment](Deployment%20guide.md#appendix-deploying-to-a-custom-vps) f --- +## Testing + +A lightweight pytest suite is now included under `tests/`. + +```bash +# Install runtime dependencies +pip install -r requirements.txt + +# Optional explicit test dependencies (also safe to run if already included) +pip install -r requirements-dev.txt + +# Run tests +pytest -q +``` + +--- + ## Troubleshooting | Issue | Solution | @@ -975,7 +1056,7 @@ See [VPS Deployment](Deployment%20guide.md#appendix-deploying-to-a-custom-vps) f | `500 Internal Server Error` | Check provider key validity; enable `--enable-request-logging` for details | | All keys on cooldown | All keys failed recently; check `logs/detailed_logs/` for upstream errors | | Model not found | Verify format is `provider/model_name` (e.g., `gemini/gemini-2.5-flash`) | -| OAuth callback failed | Ensure callback port (8085, 51121, 11451) isn't blocked by firewall | +| OAuth callback failed | Ensure callback port (8085, 51121, 11451, 1455) isn't blocked by firewall | | Streaming hangs | Increase `TIMEOUT_READ_STREAMING`; check provider status | **Detailed Logs:** diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 00000000..530e83f9 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,3 @@ +pytest +pytest-asyncio +respx diff --git a/requirements.txt b/requirements.txt index 1f5d4985..e5ee231c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -25,3 +25,8 @@ customtkinter # For building the executable pyinstaller + +# Test dependencies +pytest +pytest-asyncio +respx diff --git a/src/proxy_app/launcher_tui.py b/src/proxy_app/launcher_tui.py index b2fec223..35461589 100644 --- a/src/proxy_app/launcher_tui.py +++ b/src/proxy_app/launcher_tui.py @@ -190,6 +190,7 @@ def detect_credentials() -> dict: "antigravity": "ANTIGRAVITY", "qwen_code": "QWEN_CODE", "iflow": "IFLOW", + "openai_codex": "OPENAI_CODEX", } for provider, env_prefix in env_oauth_providers.items(): diff --git a/src/proxy_app/settings_tool.py b/src/proxy_app/settings_tool.py index 57b7eb3b..006082b6 100644 --- a/src/proxy_app/settings_tool.py +++ b/src/proxy_app/settings_tool.py @@ -45,6 +45,13 @@ except ImportError: IFLOW_DEFAULT_OAUTH_PORT = 11451 +try: + from rotator_library.providers.openai_codex_auth_base import OpenAICodexAuthBase + + OPENAI_CODEX_DEFAULT_OAUTH_PORT = OpenAICodexAuthBase.CALLBACK_PORT +except ImportError: + OPENAI_CODEX_DEFAULT_OAUTH_PORT = 1455 + def clear_screen(subtitle: str = ""): """ @@ -553,11 +560,21 @@ def remove_multiplier(self, provider: str, priority: int): }, } +# OpenAI Codex provider environment variables +OPENAI_CODEX_SETTINGS = { + "OPENAI_CODEX_OAUTH_PORT": { + "type": "int", + "default": OPENAI_CODEX_DEFAULT_OAUTH_PORT, + "description": "Local port for OAuth callback server during authentication", + }, +} + # Map provider names to their settings definitions PROVIDER_SETTINGS_MAP = { "antigravity": ANTIGRAVITY_SETTINGS, "gemini_cli": GEMINI_CLI_SETTINGS, "iflow": IFLOW_SETTINGS, + "openai_codex": OPENAI_CODEX_SETTINGS, } diff --git a/src/rotator_library/credential_manager.py b/src/rotator_library/credential_manager.py index 9a7e5edb..37ae2319 100644 --- a/src/rotator_library/credential_manager.py +++ b/src/rotator_library/credential_manager.py @@ -3,11 +3,19 @@ import os import re +import json +import time import shutil import logging from pathlib import Path -from typing import Dict, List, Optional, Set, Union - +from typing import Dict, List, Optional, Set, Union, Any, Tuple + +from .utils.openai_codex_jwt import ( + decode_jwt_unverified, + extract_account_id_from_payload, + extract_email_from_payload, + extract_expiry_ms_from_payload, +) from .utils.paths import get_oauth_dir lib_logger = logging.getLogger("rotator_library") @@ -18,6 +26,7 @@ "qwen_code": Path.home() / ".qwen", "iflow": Path.home() / ".iflow", "antigravity": Path.home() / ".antigravity", + "openai_codex": Path.home() / ".codex", # import source context only # Add other providers like 'claude' here if they have a standard CLI path } @@ -28,6 +37,7 @@ "antigravity": "ANTIGRAVITY", "qwen_code": "QWEN_CODE", "iflow": "IFLOW", + "openai_codex": "OPENAI_CODEX", } @@ -120,6 +130,368 @@ def _discover_env_oauth_credentials(self) -> Dict[str, List[str]]: return result + # ------------------------------------------------------------------------- + # OpenAI Codex first-run import helpers + # ------------------------------------------------------------------------- + + def _extract_codex_identity( + self, + access_token: str, + id_token: Optional[str], + ) -> Tuple[Optional[str], Optional[str], Optional[int]]: + """ + Extract (account_id, email, exp_ms) from Codex JWTs. + + Priority: + - account_id: access_token -> id_token + - email: id_token -> access_token + - exp: access_token -> id_token + """ + access_payload = decode_jwt_unverified(access_token) + id_payload = decode_jwt_unverified(id_token) if id_token else None + + account_id = extract_account_id_from_payload(access_payload) or extract_account_id_from_payload( + id_payload + ) + email = extract_email_from_payload(id_payload) or extract_email_from_payload(access_payload) + exp_ms = extract_expiry_ms_from_payload(access_payload) or extract_expiry_ms_from_payload( + id_payload + ) + + return account_id, email, exp_ms + + def _normalize_openai_codex_auth_json_record(self, auth_data: Dict[str, Any]) -> Optional[Dict[str, Any]]: + """Normalize ~/.codex/auth.json format to proxy schema.""" + tokens = auth_data.get("tokens") + if not isinstance(tokens, dict): + return None + + access_token = tokens.get("access_token") + refresh_token = tokens.get("refresh_token") + id_token = tokens.get("id_token") + + if not isinstance(access_token, str) or not isinstance(refresh_token, str): + return None + + account_id, email, exp_ms = self._extract_codex_identity(access_token, id_token) + + # Respect explicit account_id from source tokens if present + explicit_account = tokens.get("account_id") + if isinstance(explicit_account, str) and explicit_account.strip(): + account_id = explicit_account.strip() + + if exp_ms is None: + # conservative fallback to 5 minutes from now + exp_ms = int((time.time() + 300) * 1000) + + return { + "access_token": access_token, + "refresh_token": refresh_token, + "id_token": id_token, + "expiry_date": exp_ms, + "token_uri": "https://auth.openai.com/oauth/token", + "_proxy_metadata": { + "email": email, + "account_id": account_id, + "last_check_timestamp": time.time(), + "loaded_from_env": False, + "env_credential_index": None, + }, + } + + def _normalize_openai_codex_accounts_record(self, account: Dict[str, Any]) -> Optional[Dict[str, Any]]: + """Normalize one ~/.codex-accounts.json account entry to proxy schema.""" + access_token = account.get("access") + refresh_token = account.get("refresh") + id_token = account.get("idToken") + + if not isinstance(access_token, str) or not isinstance(refresh_token, str): + return None + + account_id, email, exp_ms = self._extract_codex_identity(access_token, id_token) + + explicit_account = account.get("accountId") + if isinstance(explicit_account, str) and explicit_account.strip(): + account_id = explicit_account.strip() + + label = account.get("label") + if not email and isinstance(label, str) and label.strip(): + email = label.strip() + + expires = account.get("expires") + if isinstance(expires, (int, float)): + exp_ms = int(expires) + + if exp_ms is None: + exp_ms = int((time.time() + 300) * 1000) + + return { + "access_token": access_token, + "refresh_token": refresh_token, + "id_token": id_token, + "expiry_date": exp_ms, + "token_uri": "https://auth.openai.com/oauth/token", + "_proxy_metadata": { + "email": email, + "account_id": account_id, + "last_check_timestamp": time.time(), + "loaded_from_env": False, + "env_credential_index": None, + }, + } + + def _dedupe_openai_codex_records( + self, + records: List[Dict[str, Any]], + ) -> List[Dict[str, Any]]: + """Deduplicate normalized Codex credential records by account/email identity.""" + unique: List[Dict[str, Any]] = [] + seen_account_ids: Set[str] = set() + seen_emails: Set[str] = set() + + for record in records: + metadata = record.get("_proxy_metadata", {}) + account_id = metadata.get("account_id") + email = metadata.get("email") + + if isinstance(account_id, str) and account_id: + if account_id in seen_account_ids: + continue + seen_account_ids.add(account_id) + + if isinstance(email, str) and email: + if email in seen_emails: + continue + seen_emails.add(email) + + unique.append(record) + + return unique + + def _import_openai_codex_cli_credentials( + self, + auth_json_path: Optional[Path] = None, + accounts_json_path: Optional[Path] = None, + ) -> List[str]: + """ + First-run import from Codex CLI stores into local oauth_creds/. + + Source files are read-only: + - ~/.codex/auth.json (single account) + - ~/.codex-accounts.json (multi-account) + """ + auth_json_path = auth_json_path or (Path.home() / ".codex" / "auth.json") + accounts_json_path = accounts_json_path or (Path.home() / ".codex-accounts.json") + + normalized_records: List[Dict[str, Any]] = [] + + # Source 1: ~/.codex/auth.json + if auth_json_path.exists(): + try: + with open(auth_json_path, "r") as f: + auth_data = json.load(f) + + if isinstance(auth_data, dict): + record = self._normalize_openai_codex_auth_json_record(auth_data) + if record: + normalized_records.append(record) + else: + lib_logger.warning( + "OpenAI Codex import: skipping malformed ~/.codex/auth.json record" + ) + else: + lib_logger.warning( + "OpenAI Codex import: ~/.codex/auth.json root is not an object" + ) + except Exception as e: + lib_logger.warning( + f"OpenAI Codex import: failed to parse ~/.codex/auth.json: {e}" + ) + + # Source 2: ~/.codex-accounts.json + if accounts_json_path.exists(): + try: + with open(accounts_json_path, "r") as f: + accounts_data = json.load(f) + + accounts = [] + if isinstance(accounts_data, dict): + raw_accounts = accounts_data.get("accounts") + if isinstance(raw_accounts, list): + accounts = raw_accounts + elif isinstance(accounts_data, list): + accounts = accounts_data + + if not accounts: + lib_logger.warning( + "OpenAI Codex import: ~/.codex-accounts.json has no accounts list" + ) + + for idx, account in enumerate(accounts): + if not isinstance(account, dict): + lib_logger.warning( + f"OpenAI Codex import: skipping malformed account entry #{idx + 1}" + ) + continue + + record = self._normalize_openai_codex_accounts_record(account) + if record: + normalized_records.append(record) + else: + lib_logger.warning( + f"OpenAI Codex import: skipping malformed account entry #{idx + 1}" + ) + + except Exception as e: + lib_logger.warning( + f"OpenAI Codex import: failed to parse ~/.codex-accounts.json: {e}" + ) + + if not normalized_records: + return [] + + deduped_records = self._dedupe_openai_codex_records(normalized_records) + + imported_paths: List[str] = [] + for i, record in enumerate(deduped_records, 1): + local_path = self.oauth_base_dir / f"openai_codex_oauth_{i}.json" + try: + with open(local_path, "w") as f: + json.dump(record, f, indent=2) + imported_paths.append(str(local_path.resolve())) + except Exception as e: + lib_logger.error( + f"OpenAI Codex import: failed writing '{local_path.name}': {e}" + ) + + if imported_paths: + identifiers = [] + for p in imported_paths: + try: + with open(p, "r") as f: + payload = json.load(f) + meta = payload.get("_proxy_metadata", {}) + identifiers.append( + meta.get("email") or meta.get("account_id") or Path(p).name + ) + except Exception: + identifiers.append(Path(p).name) + + lib_logger.info( + "OpenAI Codex first-run import complete: " + f"{len(imported_paths)} credential(s) imported ({', '.join(str(x) for x in identifiers)})" + ) + + return imported_paths + + def _import_openai_codex_explicit_paths(self, source_paths: List[Path]) -> List[str]: + """ + Import OpenAI Codex credentials from explicit OPENAI_CODEX_OAUTH_* paths. + + Supports: + - Raw Codex CLI files (`~/.codex/auth.json`, `~/.codex-accounts.json`) + - Already-normalized proxy credential JSON files + + Returns local normalized/copied paths under oauth_creds/. + """ + if not source_paths: + return [] + + normalized_records: List[Dict[str, Any]] = [] + passthrough_paths: List[Path] = [] + + for source_path in sorted(source_paths): + try: + with open(source_path, "r") as f: + payload = json.load(f) + except Exception as e: + lib_logger.warning( + f"OpenAI Codex explicit import: failed to parse '{source_path}': {e}. Falling back to direct copy." + ) + passthrough_paths.append(source_path) + continue + + # Raw ~/.codex/auth.json shape + if isinstance(payload, dict) and isinstance(payload.get("tokens"), dict): + record = self._normalize_openai_codex_auth_json_record(payload) + if record: + normalized_records.append(record) + continue + + # Raw ~/.codex-accounts.json shape (object or root list) + accounts: List[Any] = [] + if isinstance(payload, dict) and isinstance(payload.get("accounts"), list): + accounts = payload.get("accounts") + elif isinstance(payload, list): + accounts = payload + + if accounts: + converted = 0 + for idx, account in enumerate(accounts): + if not isinstance(account, dict): + lib_logger.warning( + f"OpenAI Codex explicit import: skipping malformed account entry #{idx + 1} from '{source_path.name}'" + ) + continue + + record = self._normalize_openai_codex_accounts_record(account) + if record: + normalized_records.append(record) + converted += 1 + + if converted > 0: + continue + + # Already-normalized proxy format + if ( + isinstance(payload, dict) + and isinstance(payload.get("access_token"), str) + and isinstance(payload.get("refresh_token"), str) + ): + passthrough_paths.append(source_path) + continue + + # Unknown shape: preserve existing behavior (copy as-is) + passthrough_paths.append(source_path) + + deduped_records = self._dedupe_openai_codex_records(normalized_records) + + imported_paths: List[str] = [] + next_index = 1 + + # Write normalized records first + for record in deduped_records: + local_path = self.oauth_base_dir / f"openai_codex_oauth_{next_index}.json" + try: + with open(local_path, "w") as f: + json.dump(record, f, indent=2) + imported_paths.append(str(local_path.resolve())) + next_index += 1 + except Exception as e: + lib_logger.error( + f"OpenAI Codex explicit import: failed writing '{local_path.name}': {e}" + ) + + # Copy passthrough files after normalized ones + for source_path in passthrough_paths: + local_path = self.oauth_base_dir / f"openai_codex_oauth_{next_index}.json" + try: + shutil.copy(source_path, local_path) + imported_paths.append(str(local_path.resolve())) + next_index += 1 + except Exception as e: + lib_logger.error( + f"OpenAI Codex explicit import: failed to copy '{source_path}' -> '{local_path}': {e}" + ) + + if imported_paths: + lib_logger.info( + "OpenAI Codex explicit-path import complete: " + f"{len(imported_paths)} credential(s) prepared" + ) + + return imported_paths + def discover_and_prepare(self) -> Dict[str, List[str]]: lib_logger.info("Starting automated OAuth credential discovery...") final_config = {} @@ -165,7 +537,7 @@ def discover_and_prepare(self) -> Dict[str, List[str]]: ] continue - # If no local credentials exist, proceed with a one-time discovery and copy. + # If no local credentials exist, proceed with one-time import/copy. discovered_paths = set() # 1. Add paths from environment variables first, as they are overrides @@ -174,8 +546,30 @@ def discover_and_prepare(self) -> Dict[str, List[str]]: if path.exists(): discovered_paths.add(path) - # 2. If no overrides are provided via .env, scan the default directory - # [MODIFIED] This logic is now disabled to prefer local-first credential management. + # 2. Provider-specific first-run import for OpenAI Codex + # Trigger only when: + # - provider == openai_codex + # - no local openai_codex_oauth_*.json already exist (checked above) + # - no env-based OPENAI_CODEX credentials were selected (provider not in final_config) + # - no explicit OPENAI_CODEX_OAUTH_* file paths were provided + if provider == "openai_codex" and not discovered_paths: + imported = self._import_openai_codex_cli_credentials() + if imported: + final_config[provider] = imported + continue + + # 3. Provider-specific explicit-path import handling for OpenAI Codex + # This normalizes raw ~/.codex/auth.json / ~/.codex-accounts.json when + # supplied via OPENAI_CODEX_OAUTH_* env vars. + if provider == "openai_codex" and discovered_paths: + imported = self._import_openai_codex_explicit_paths( + sorted(list(discovered_paths)) + ) + if imported: + final_config[provider] = imported + continue + + # 4. Default directory scan remains disabled (local-first policy) # if not discovered_paths and default_dir.exists(): # for json_file in default_dir.glob('*.json'): # discovered_paths.add(json_file) diff --git a/src/rotator_library/credential_tool.py b/src/rotator_library/credential_tool.py index aad529a4..7b3ee952 100644 --- a/src/rotator_library/credential_tool.py +++ b/src/rotator_library/credential_tool.py @@ -66,6 +66,7 @@ def _ensure_providers_loaded(): "qwen_code": "Qwen Code", "iflow": "iFlow", "antigravity": "Antigravity", + "openai_codex": "OpenAI Codex", } @@ -269,7 +270,13 @@ def _get_oauth_credentials_summary() -> dict: Example: {"gemini_cli": [{"email": "user@example.com", "tier": "free-tier", ...}, ...]} """ provider_factory, _ = _ensure_providers_loaded() - oauth_providers = ["gemini_cli", "qwen_code", "iflow", "antigravity"] + oauth_providers = [ + "gemini_cli", + "qwen_code", + "iflow", + "antigravity", + "openai_codex", + ] oauth_summary = {} for provider_name in oauth_providers: @@ -1214,6 +1221,7 @@ async def setup_api_key(): "antigravity", # OAuth-only "qwen_code", # OAuth is primary, don't advertise API key "iflow", # OAuth is primary + "openai_codex", # OAuth-only (ChatGPT OAuth) } # Base classes to exclude @@ -1732,6 +1740,7 @@ async def setup_new_credential(provider_name: str): "qwen_code": "Qwen Code (OAuth - also supports API keys)", "iflow": "iFlow", "antigravity": "Antigravity (OAuth)", + "openai_codex": "OpenAI Codex (OAuth)", } display_name = oauth_friendly_names.get( provider_name, provider_name.replace("_", " ").title() @@ -2202,6 +2211,96 @@ async def export_antigravity_to_env(): ) +async def export_openai_codex_to_env(): + """ + Export an OpenAI Codex credential JSON file to .env format. + Uses the auth class's build_env_lines() and list_credentials() methods. + """ + clear_screen("Export OpenAI Codex Credential") + + provider_factory, _ = _ensure_providers_loaded() + auth_class = provider_factory.get_provider_auth_class("openai_codex") + auth_instance = auth_class() + + credentials = auth_instance.list_credentials(_get_oauth_base_dir()) + + if not credentials: + console.print( + Panel( + "No OpenAI Codex credentials found. Please add one first using 'Add OAuth Credential'.", + style="bold red", + title="No Credentials", + ) + ) + return + + cred_text = Text() + for i, cred_info in enumerate(credentials): + cred_text.append( + f" {i + 1}. {Path(cred_info['file_path']).name} ({cred_info['email']})\n" + ) + + console.print( + Panel( + cred_text, + title="Available OpenAI Codex Credentials", + style="bold blue", + ) + ) + + choice = Prompt.ask( + Text.from_markup( + "[bold]Please select a credential to export or type [red]'b'[/red] to go back[/bold]" + ), + choices=[str(i + 1) for i in range(len(credentials))] + ["b"], + show_choices=False, + ) + + if choice.lower() == "b": + return + + try: + choice_index = int(choice) - 1 + if 0 <= choice_index < len(credentials): + cred_info = credentials[choice_index] + + env_path = auth_instance.export_credential_to_env( + cred_info["file_path"], _get_oauth_base_dir() + ) + + if env_path: + numbered_prefix = f"OPENAI_CODEX_{cred_info['number']}" + success_text = Text.from_markup( + f"Successfully exported credential to [bold yellow]'{Path(env_path).name}'[/bold yellow]\n\n" + f"[bold]Environment variable prefix:[/bold] [cyan]{numbered_prefix}_*[/cyan]\n\n" + f"[bold]To use this credential:[/bold]\n" + f"1. Copy the contents to your main .env file, OR\n" + f"2. Source it: [bold cyan]source {Path(env_path).name}[/bold cyan] (Linux/Mac)\n\n" + f"[bold]To combine multiple credentials:[/bold]\n" + f"Copy lines from multiple .env files into one file.\n" + f"Each credential uses a unique number ({numbered_prefix}_*)." + ) + console.print(Panel(success_text, style="bold green", title="Success")) + else: + console.print( + Panel( + "Failed to export credential", style="bold red", title="Error" + ) + ) + else: + console.print("[bold red]Invalid choice. Please try again.[/bold red]") + except ValueError: + console.print( + "[bold red]Invalid input. Please enter a number or 'b'.[/bold red]" + ) + except Exception as e: + console.print( + Panel( + f"An error occurred during export: {e}", style="bold red", title="Error" + ) + ) + + async def export_all_provider_credentials(provider_name: str): """ Export all credentials for a specific provider to individual .env files. @@ -2366,7 +2465,13 @@ async def combine_all_credentials(): clear_screen("Combine All Credentials") # List of providers that support OAuth credentials - oauth_providers = ["gemini_cli", "qwen_code", "iflow", "antigravity"] + oauth_providers = [ + "gemini_cli", + "qwen_code", + "iflow", + "antigravity", + "openai_codex", + ] provider_factory, _ = _ensure_providers_loaded() @@ -2471,19 +2576,22 @@ async def export_credentials_submenu(): "2. Export Qwen Code credential\n" "3. Export iFlow credential\n" "4. Export Antigravity credential\n" + "5. Export OpenAI Codex credential\n" "\n" "[bold]Bulk Exports (per provider):[/bold]\n" - "5. Export ALL Gemini CLI credentials\n" - "6. Export ALL Qwen Code credentials\n" - "7. Export ALL iFlow credentials\n" - "8. Export ALL Antigravity credentials\n" + "6. Export ALL Gemini CLI credentials\n" + "7. Export ALL Qwen Code credentials\n" + "8. Export ALL iFlow credentials\n" + "9. Export ALL Antigravity credentials\n" + "10. Export ALL OpenAI Codex credentials\n" "\n" "[bold]Combine Credentials:[/bold]\n" - "9. Combine all Gemini CLI into one file\n" - "10. Combine all Qwen Code into one file\n" - "11. Combine all iFlow into one file\n" - "12. Combine all Antigravity into one file\n" - "13. Combine ALL providers into one file" + "11. Combine all Gemini CLI into one file\n" + "12. Combine all Qwen Code into one file\n" + "13. Combine all iFlow into one file\n" + "14. Combine all Antigravity into one file\n" + "15. Combine all OpenAI Codex into one file\n" + "16. Combine ALL providers into one file" ), title="Choose export option", style="bold blue", @@ -2508,6 +2616,9 @@ async def export_credentials_submenu(): "11", "12", "13", + "14", + "15", + "16", "b", ], show_choices=False, @@ -2533,42 +2644,54 @@ async def export_credentials_submenu(): await export_antigravity_to_env() console.print("\n[dim]Press Enter to return to export menu...[/dim]") input() - # Bulk exports (all credentials for a provider) elif export_choice == "5": - await export_all_provider_credentials("gemini_cli") + await export_openai_codex_to_env() console.print("\n[dim]Press Enter to return to export menu...[/dim]") input() + # Bulk exports (all credentials for a provider) elif export_choice == "6": - await export_all_provider_credentials("qwen_code") + await export_all_provider_credentials("gemini_cli") console.print("\n[dim]Press Enter to return to export menu...[/dim]") input() elif export_choice == "7": - await export_all_provider_credentials("iflow") + await export_all_provider_credentials("qwen_code") console.print("\n[dim]Press Enter to return to export menu...[/dim]") input() elif export_choice == "8": + await export_all_provider_credentials("iflow") + console.print("\n[dim]Press Enter to return to export menu...[/dim]") + input() + elif export_choice == "9": await export_all_provider_credentials("antigravity") console.print("\n[dim]Press Enter to return to export menu...[/dim]") input() + elif export_choice == "10": + await export_all_provider_credentials("openai_codex") + console.print("\n[dim]Press Enter to return to export menu...[/dim]") + input() # Combine per provider - elif export_choice == "9": + elif export_choice == "11": await combine_provider_credentials("gemini_cli") console.print("\n[dim]Press Enter to return to export menu...[/dim]") input() - elif export_choice == "10": + elif export_choice == "12": await combine_provider_credentials("qwen_code") console.print("\n[dim]Press Enter to return to export menu...[/dim]") input() - elif export_choice == "11": + elif export_choice == "13": await combine_provider_credentials("iflow") console.print("\n[dim]Press Enter to return to export menu...[/dim]") input() - elif export_choice == "12": + elif export_choice == "14": await combine_provider_credentials("antigravity") console.print("\n[dim]Press Enter to return to export menu...[/dim]") input() + elif export_choice == "15": + await combine_provider_credentials("openai_codex") + console.print("\n[dim]Press Enter to return to export menu...[/dim]") + input() # Combine all providers - elif export_choice == "13": + elif export_choice == "16": await combine_all_credentials() console.print("\n[dim]Press Enter to return to export menu...[/dim]") input() diff --git a/src/rotator_library/provider_factory.py b/src/rotator_library/provider_factory.py index dcc40bc9..cac95536 100644 --- a/src/rotator_library/provider_factory.py +++ b/src/rotator_library/provider_factory.py @@ -7,12 +7,14 @@ from .providers.qwen_auth_base import QwenAuthBase from .providers.iflow_auth_base import IFlowAuthBase from .providers.antigravity_auth_base import AntigravityAuthBase +from .providers.openai_codex_auth_base import OpenAICodexAuthBase PROVIDER_MAP = { "gemini_cli": GeminiAuthBase, "qwen_code": QwenAuthBase, "iflow": IFlowAuthBase, "antigravity": AntigravityAuthBase, + "openai_codex": OpenAICodexAuthBase, } def get_provider_auth_class(provider_name: str): diff --git a/src/rotator_library/providers/openai_codex_auth_base.py b/src/rotator_library/providers/openai_codex_auth_base.py new file mode 100644 index 00000000..b3278aca --- /dev/null +++ b/src/rotator_library/providers/openai_codex_auth_base.py @@ -0,0 +1,1527 @@ +# SPDX-License-Identifier: LGPL-3.0-only +# Copyright (c) 2026 Mirrowel + +# src/rotator_library/providers/openai_codex_auth_base.py + +import asyncio +import base64 +import copy +import hashlib +import json +import logging +import os +import re +import secrets +import time +import webbrowser +from dataclasses import dataclass, field +from glob import glob +from pathlib import Path +from typing import Any, Awaitable, Dict, List, Optional, Tuple, Union +from urllib.parse import urlencode + +import httpx +from aiohttp import web +from rich.console import Console +from rich.markup import escape as rich_escape +from rich.panel import Panel +from rich.text import Text + +from ..error_handler import CredentialNeedsReauthError +from ..utils.headless_detection import is_headless_environment +from ..utils.openai_codex_jwt import ( + ACCOUNT_ID_CLAIM, + AUTH_CLAIM, + decode_jwt_unverified, + extract_account_id_from_payload, + extract_email_from_payload, + extract_expiry_ms_from_payload, + extract_explicit_email_from_payload, +) +from ..utils.reauth_coordinator import get_reauth_coordinator +from ..utils.resilient_io import safe_write_json + +lib_logger = logging.getLogger("rotator_library") + +# OAuth constants +# Public OAuth client id used by the official Codex CLI/browser flow. +# OAuth client IDs identify the app and are intentionally non-secret. +CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann" +SCOPE = "openid profile email offline_access" +AUTHORIZATION_ENDPOINT = "https://auth.openai.com/oauth/authorize" +TOKEN_ENDPOINT = "https://auth.openai.com/oauth/token" +# OpenAI Codex OAuth redirect path registered for this client. +# Keep legacy `/oauth2callback` handler for backward compatibility with old URLs. +CALLBACK_PATH = "/auth/callback" +LEGACY_CALLBACK_PATH = "/oauth2callback" +CALLBACK_PORT = 1455 +CALLBACK_ENV_VAR = "OPENAI_CODEX_OAUTH_PORT" + +# API constants +DEFAULT_API_BASE = "https://chatgpt.com/backend-api" +RESPONSES_ENDPOINT_PATH = "/codex/responses" + +# Refresh when token is close to expiry +REFRESH_EXPIRY_BUFFER_SECONDS = 5 * 60 # 5 minutes + +INVALID_GRANT_PATTERN = re.compile( + r"\binvalid[_\s-]?grant\b|\bgrant\s+is\s+invalid\b|\brefresh\s+token\s+(?:is\s+)?(?:invalid|expired|revoked)\b", + re.IGNORECASE, +) + +console = Console() + + +@dataclass +class OpenAICodexCredentialSetupResult: + """Standardized result structure for OpenAI Codex credential setup operations.""" + + success: bool + file_path: Optional[str] = None + email: Optional[str] = None + is_update: bool = False + error: Optional[str] = None + credentials: Optional[Dict[str, Any]] = field(default=None, repr=False) + + +class OAuthCallbackServer: + """Minimal HTTP server for handling OpenAI Codex OAuth callbacks.""" + + SUCCESS_HTML = """ + + + + + Authentication successful + + +

Authentication successful. Return to your terminal to continue.

+ +""" + + def __init__(self, port: int = CALLBACK_PORT): + self.port = port + self.app = web.Application() + self.runner: Optional[web.AppRunner] = None + self.site: Optional[web.TCPSite] = None + self.result_future: Optional[asyncio.Future] = None + self.expected_state: Optional[str] = None + + async def start(self, expected_state: str): + """Start callback server on localhost:.""" + self.expected_state = expected_state + self.result_future = asyncio.Future() + + for callback_path in {CALLBACK_PATH, LEGACY_CALLBACK_PATH}: + self.app.router.add_get(callback_path, self._handle_callback) + + self.runner = web.AppRunner(self.app) + await self.runner.setup() + self.site = web.TCPSite(self.runner, "localhost", self.port) + await self.site.start() + + lib_logger.debug( + "OpenAI Codex OAuth callback server started on " + f"localhost:{self.port}{CALLBACK_PATH} " + f"(legacy alias: {LEGACY_CALLBACK_PATH})" + ) + + async def stop(self): + """Stop callback server.""" + if self.site: + await self.site.stop() + if self.runner: + await self.runner.cleanup() + lib_logger.debug("OpenAI Codex OAuth callback server stopped") + + async def _handle_callback(self, request: web.Request) -> web.Response: + query = request.query + + if "error" in query: + error = query.get("error", "unknown_error") + error_desc = query.get("error_description", "") + if not self.result_future.done(): + self.result_future.set_exception( + ValueError(f"OAuth error: {error} ({error_desc})") + ) + return web.Response(status=400, text=f"OAuth error: {error}") + + code = query.get("code") + state = query.get("state", "") + + if not code: + if not self.result_future.done(): + self.result_future.set_exception( + ValueError("Missing authorization code") + ) + return web.Response(status=400, text="Missing authorization code") + + if state != self.expected_state: + if not self.result_future.done(): + self.result_future.set_exception(ValueError("State parameter mismatch")) + return web.Response(status=400, text="State mismatch") + + if not self.result_future.done(): + self.result_future.set_result(code) + + return web.Response( + status=200, + text=self.SUCCESS_HTML, + content_type="text/html", + ) + + async def wait_for_callback(self, timeout: float = 300.0) -> str: + """Wait for OAuth callback and return auth code.""" + try: + code = await asyncio.wait_for(self.result_future, timeout=timeout) + return code + except asyncio.TimeoutError: + raise TimeoutError("Timeout waiting for OAuth callback") + + +def get_callback_port() -> int: + """Get OAuth callback port from env or fallback default.""" + env_value = os.getenv(CALLBACK_ENV_VAR) + if env_value: + try: + return int(env_value) + except ValueError: + lib_logger.warning( + f"Invalid {CALLBACK_ENV_VAR} value: {env_value}, using default {CALLBACK_PORT}" + ) + return CALLBACK_PORT + + +class OpenAICodexAuthBase: + """ + OpenAI Codex OAuth authentication base class. + + Supports: + - Interactive OAuth Authorization Code + PKCE + - Token refresh with retry/backoff + - File + env credential loading (`env://openai_codex/N`) + - Queue-based refresh and re-auth workflows + - Credential management APIs for credential_tool + """ + + CALLBACK_PORT = CALLBACK_PORT + CALLBACK_ENV_VAR = CALLBACK_ENV_VAR + + def __init__(self): + self._credentials_cache: Dict[str, Dict[str, Any]] = {} + self._refresh_locks: Dict[str, asyncio.Lock] = {} + self._locks_lock = asyncio.Lock() + + # Backoff tracking + self._refresh_failures: Dict[str, int] = {} + self._next_refresh_after: Dict[str, float] = {} + + # Queue system (normal refresh + interactive re-auth) + self._refresh_queue: asyncio.Queue = asyncio.Queue() + self._queue_processor_task: Optional[asyncio.Task] = None + + self._reauth_queue: asyncio.Queue = asyncio.Queue() + self._reauth_processor_task: Optional[asyncio.Task] = None + + self._queued_credentials: set = set() + self._unavailable_credentials: Dict[str, float] = {} + self._unavailable_ttl_seconds: int = 360 + self._queue_tracking_lock = asyncio.Lock() + + self._queue_retry_count: Dict[str, int] = {} + + # Track background tasks spawned from sync contexts so exceptions are not dropped. + self._background_tasks: set[asyncio.Task] = set() + + # Queue configuration + self._refresh_timeout_seconds: int = 20 + self._refresh_interval_seconds: int = 20 + self._refresh_max_retries: int = 3 + self._reauth_timeout_seconds: int = 300 + + # ========================================================================= + # JWT + metadata helpers + # ========================================================================= + + @staticmethod + def _decode_jwt_unverified(token: str) -> Optional[Dict[str, Any]]: + """Decode JWT payload without signature verification.""" + return decode_jwt_unverified(token) + + @staticmethod + def _extract_account_id_from_payload(payload: Optional[Dict[str, Any]]) -> Optional[str]: + """Extract account ID from JWT claims.""" + return extract_account_id_from_payload(payload) + + @staticmethod + def _extract_explicit_email_from_payload( + payload: Optional[Dict[str, Any]], + ) -> Optional[str]: + """Extract explicit email claim only (no sub fallback).""" + return extract_explicit_email_from_payload(payload) + + @staticmethod + def _extract_email_from_payload(payload: Optional[Dict[str, Any]]) -> Optional[str]: + """Extract email from JWT payload using fallback chain: email -> sub.""" + return extract_email_from_payload(payload) + + @staticmethod + def _extract_expiry_ms_from_payload(payload: Optional[Dict[str, Any]]) -> Optional[int]: + """Extract JWT exp claim and convert to milliseconds.""" + return extract_expiry_ms_from_payload(payload) + + def _populate_metadata_from_tokens(self, creds: Dict[str, Any]) -> None: + """Populate _proxy_metadata (email/account_id) from access_token or id_token.""" + metadata = creds.setdefault("_proxy_metadata", {}) + + access_payload = self._decode_jwt_unverified(creds.get("access_token", "")) + id_payload = self._decode_jwt_unverified(creds.get("id_token", "")) + + account_id = self._extract_account_id_from_payload( + access_payload + ) or self._extract_account_id_from_payload(id_payload) + + # Prefer explicit email claim from id_token first (most user-specific), + # then explicit access-token email, then fall back to sub-based extraction. + email = ( + self._extract_explicit_email_from_payload(id_payload) + or self._extract_explicit_email_from_payload(access_payload) + or self._extract_email_from_payload(id_payload) + or self._extract_email_from_payload(access_payload) + ) + + if account_id: + metadata["account_id"] = account_id + + if email: + metadata["email"] = email + + # Keep top-level expiry_date synchronized from token exp as fallback + if not creds.get("expiry_date"): + expiry_ms = self._extract_expiry_ms_from_payload(access_payload) or self._extract_expiry_ms_from_payload( + id_payload + ) + if expiry_ms: + creds["expiry_date"] = expiry_ms + + metadata["last_check_timestamp"] = time.time() + + def _ensure_proxy_metadata(self, creds: Dict[str, Any]) -> Dict[str, Any]: + """Ensure credentials include normalized _proxy_metadata fields.""" + metadata = creds.setdefault("_proxy_metadata", {}) + metadata.setdefault("loaded_from_env", False) + metadata.setdefault("env_credential_index", None) + + self._populate_metadata_from_tokens(creds) + + # Keep top-level token_uri stable for schema consistency + creds.setdefault("token_uri", TOKEN_ENDPOINT) + + return creds + + # ========================================================================= + # Env + file credential loading + # ========================================================================= + + def _parse_env_credential_path(self, path: str) -> Optional[str]: + """ + Parse a virtual env:// path and return the credential index. + + Supported formats: + - env://openai_codex/0 (legacy single) + - env://openai_codex/1 (numbered) + """ + if not path.startswith("env://"): + return None + + raw = path[6:] + parts = raw.split("/") + if not parts: + return None + + provider = parts[0].strip().lower() + if provider != "openai_codex": + return None + + if len(parts) >= 2 and parts[1].strip(): + return parts[1].strip() + + return "0" + + def _load_from_env( + self, credential_index: Optional[str] = None + ) -> Optional[Dict[str, Any]]: + """ + Load OpenAI Codex OAuth credentials from environment variables. + + Legacy single credential: + - OPENAI_CODEX_ACCESS_TOKEN + - OPENAI_CODEX_REFRESH_TOKEN + - OPENAI_CODEX_EXPIRY_DATE (optional) + - OPENAI_CODEX_ID_TOKEN (optional) + - OPENAI_CODEX_ACCOUNT_ID (optional) + - OPENAI_CODEX_EMAIL (optional) + + Numbered credentials (N): + - OPENAI_CODEX_N_ACCESS_TOKEN + - OPENAI_CODEX_N_REFRESH_TOKEN + - OPENAI_CODEX_N_EXPIRY_DATE (optional) + - OPENAI_CODEX_N_ID_TOKEN (optional) + - OPENAI_CODEX_N_ACCOUNT_ID (optional) + - OPENAI_CODEX_N_EMAIL (optional) + """ + if credential_index and credential_index != "0": + prefix = f"OPENAI_CODEX_{credential_index}" + default_email = f"env-user-{credential_index}" + env_index = credential_index + else: + prefix = "OPENAI_CODEX" + default_email = "env-user" + env_index = "0" + + access_token = os.getenv(f"{prefix}_ACCESS_TOKEN") + refresh_token = os.getenv(f"{prefix}_REFRESH_TOKEN") + + if not (access_token and refresh_token): + return None + + expiry_raw = os.getenv(f"{prefix}_EXPIRY_DATE", "") + expiry_date: Optional[int] = None + if expiry_raw: + try: + expiry_date = int(float(expiry_raw)) + except ValueError: + lib_logger.warning(f"Invalid {prefix}_EXPIRY_DATE: {expiry_raw}") + + id_token = os.getenv(f"{prefix}_ID_TOKEN") + account_id = os.getenv(f"{prefix}_ACCOUNT_ID") + email = os.getenv(f"{prefix}_EMAIL") + + creds: Dict[str, Any] = { + "access_token": access_token, + "refresh_token": refresh_token, + "id_token": id_token, + "token_uri": TOKEN_ENDPOINT, + "expiry_date": expiry_date or 0, + "_proxy_metadata": { + "email": email or default_email, + "account_id": account_id, + "last_check_timestamp": time.time(), + "loaded_from_env": True, + "env_credential_index": env_index, + }, + } + + # Fill missing metadata/expiry from JWT claims + self._populate_metadata_from_tokens(creds) + + # If expiry still missing, set conservative short expiry to trigger refresh soon + if not creds.get("expiry_date"): + creds["expiry_date"] = int((time.time() + 300) * 1000) + + return creds + + async def _read_creds_from_file(self, path: str) -> Dict[str, Any]: + """Read credentials from disk and update cache.""" + try: + with open(path, "r") as f: + creds = json.load(f) + + if not isinstance(creds, dict): + raise ValueError("Credential file root must be a JSON object") + + creds = self._ensure_proxy_metadata(creds) + self._credentials_cache[path] = creds + return creds + + except FileNotFoundError: + raise IOError(f"OpenAI Codex credential file not found at '{path}'") + except Exception as e: + raise IOError( + f"Failed to load OpenAI Codex credentials from '{path}': {e}" + ) + + async def _load_credentials(self, path: str) -> Dict[str, Any]: + """Load credentials from cache, env, or file.""" + if path in self._credentials_cache: + return self._credentials_cache[path] + + async with await self._get_lock(path): + if path in self._credentials_cache: + return self._credentials_cache[path] + + credential_index = self._parse_env_credential_path(path) + if credential_index is not None: + env_creds = self._load_from_env(credential_index) + if env_creds: + self._credentials_cache[path] = env_creds + lib_logger.info( + f"Using OpenAI Codex env credential index {credential_index}" + ) + return env_creds + raise IOError( + f"Environment variables for OpenAI Codex credential index {credential_index} not found" + ) + + # File-based path, with legacy env fallback for backwards compatibility + try: + return await self._read_creds_from_file(path) + except IOError: + env_creds = self._load_from_env("0") + if env_creds: + self._credentials_cache[path] = env_creds + lib_logger.info( + f"File '{path}' not found; using legacy OPENAI_CODEX_* environment credentials" + ) + return env_creds + raise + + async def _save_credentials(self, path: str, creds: Dict[str, Any]) -> bool: + """ + Save credentials to disk, then update cache. + + Critical semantics: + - For rotating refresh tokens, disk write must succeed before cache update. + - Env-backed creds skip disk writes and update in-memory cache only. + """ + creds = self._ensure_proxy_metadata(copy.deepcopy(creds)) + + loaded_from_env = creds.get("_proxy_metadata", {}).get("loaded_from_env", False) + if loaded_from_env or self._parse_env_credential_path(path) is not None: + self._credentials_cache[path] = creds + lib_logger.debug( + f"OpenAI Codex credential '{path}' is env-backed; skipping disk write" + ) + return True + + if not safe_write_json( + path, + creds, + lib_logger, + secure_permissions=True, + buffer_on_failure=False, + ): + lib_logger.error( + f"Failed to persist OpenAI Codex credentials for '{Path(path).name}'. Cache not updated." + ) + return False + + self._credentials_cache[path] = creds + return True + + # ========================================================================= + # Expiry / refresh helpers + # ========================================================================= + + def _is_token_expired(self, creds: Dict[str, Any]) -> bool: + """Proactive expiry check using refresh buffer.""" + expiry_timestamp = float(creds.get("expiry_date", 0)) / 1000 + return expiry_timestamp < time.time() + REFRESH_EXPIRY_BUFFER_SECONDS + + def _is_token_truly_expired(self, creds: Dict[str, Any]) -> bool: + """Strict expiry check without proactive buffer.""" + expiry_timestamp = float(creds.get("expiry_date", 0)) / 1000 + return expiry_timestamp < time.time() + + @staticmethod + def _is_invalid_grant_error(error_type: str, error_desc: str) -> bool: + """Detect invalid/revoked refresh-token errors with specific matching.""" + if str(error_type).strip().lower() == "invalid_grant": + return True + + if not isinstance(error_desc, str) or not error_desc.strip(): + return False + + return bool(INVALID_GRANT_PATTERN.search(error_desc)) + + async def _queue_reauth_request(self, path: str) -> None: + """Queue interactive re-auth, logging queueing failures explicitly.""" + try: + await self._queue_refresh(path, force=True, needs_reauth=True) + except Exception as queue_error: + lib_logger.error( + f"Failed to queue OpenAI Codex re-auth for '{Path(path).name}': {queue_error}" + ) + + async def _exchange_code_for_tokens( + self, code: str, code_verifier: str, redirect_uri: str + ) -> Dict[str, Any]: + """Exchange OAuth authorization code for tokens.""" + payload = { + "grant_type": "authorization_code", + "code": code, + "client_id": CLIENT_ID, + "redirect_uri": redirect_uri, + "code_verifier": code_verifier, + } + + headers = { + "Content-Type": "application/x-www-form-urlencoded", + "Accept": "application/json", + "User-Agent": "LLM-API-Key-Proxy/OpenAICodex", + } + + async with httpx.AsyncClient(timeout=30.0) as client: + response = await client.post(TOKEN_ENDPOINT, headers=headers, data=payload) + response.raise_for_status() + token_data = response.json() + + access_token = token_data.get("access_token") + refresh_token = token_data.get("refresh_token") + expires_in = token_data.get("expires_in") + + if not access_token or not refresh_token or not isinstance(expires_in, (int, float)): + raise ValueError("Token exchange response missing required fields") + + return token_data + + async def _refresh_token(self, path: str, force: bool = False) -> Dict[str, Any]: + """Refresh access token using refresh_token with retry/backoff.""" + async with await self._get_lock(path): + cached_creds = self._credentials_cache.get(path) + if not force and cached_creds and not self._is_token_expired(cached_creds): + return cached_creds + + # Always load freshest source before refresh attempt + is_env = self._parse_env_credential_path(path) is not None + if is_env: + source_creds = copy.deepcopy(await self._load_credentials(path)) + else: + await self._read_creds_from_file(path) + source_creds = copy.deepcopy(self._credentials_cache[path]) + + refresh_token = source_creds.get("refresh_token") + if not refresh_token: + raise ValueError("No refresh_token found in OpenAI Codex credentials") + + headers = { + "Content-Type": "application/x-www-form-urlencoded", + "Accept": "application/json", + "User-Agent": "LLM-API-Key-Proxy/OpenAICodex", + } + + max_retries = 3 + token_data = None + last_error: Optional[Exception] = None + + async with httpx.AsyncClient(timeout=30.0) as client: + for attempt in range(max_retries): + try: + response = await client.post( + TOKEN_ENDPOINT, + headers=headers, + data={ + "grant_type": "refresh_token", + "refresh_token": refresh_token, + "client_id": CLIENT_ID, + }, + ) + response.raise_for_status() + token_data = response.json() + break + + except httpx.HTTPStatusError as e: + last_error = e + status_code = e.response.status_code + + error_type = "" + error_desc = "" + try: + payload = e.response.json() + error_type = payload.get("error", "") + error_desc = payload.get("error_description", "") or payload.get( + "message", "" + ) + except Exception: + error_desc = e.response.text + + # invalid_grant and authorization failures should trigger re-auth queue + if status_code == 400: + if self._is_invalid_grant_error(error_type, error_desc): + await self._queue_reauth_request(path) + raise CredentialNeedsReauthError( + credential_path=path, + message=( + f"OpenAI Codex refresh token invalid for '{Path(path).name}'. Re-auth queued." + ), + ) + raise + + if status_code in (401, 403): + await self._queue_reauth_request(path) + raise CredentialNeedsReauthError( + credential_path=path, + message=( + f"OpenAI Codex credential '{Path(path).name}' unauthorized (HTTP {status_code}). Re-auth queued." + ), + ) + + if status_code == 429: + retry_after = e.response.headers.get("Retry-After", "60") + try: + wait_seconds = max(1, int(float(retry_after))) + except ValueError: + wait_seconds = 60 + + if attempt < max_retries - 1: + await asyncio.sleep(wait_seconds) + continue + raise + + if 500 <= status_code < 600: + if attempt < max_retries - 1: + await asyncio.sleep(2**attempt) + continue + raise + + raise + + except (httpx.RequestError, httpx.TimeoutException) as e: + last_error = e + if attempt < max_retries - 1: + await asyncio.sleep(2**attempt) + continue + raise + + if token_data is None: + self._refresh_failures[path] = self._refresh_failures.get(path, 0) + 1 + backoff_seconds = min(300, 30 * (2 ** self._refresh_failures[path])) + self._next_refresh_after[path] = time.time() + backoff_seconds + raise last_error or Exception("OpenAI Codex token refresh failed") + + access_token = token_data.get("access_token") + if not access_token: + raise ValueError("Refresh response missing access_token") + + expires_in = token_data.get("expires_in") + if not isinstance(expires_in, (int, float)): + raise ValueError("Refresh response missing expires_in") + + # Build UPDATED credential object (do not mutate cached source in-place) + updated_creds = copy.deepcopy(source_creds) + updated_creds["access_token"] = access_token + updated_creds["refresh_token"] = token_data.get( + "refresh_token", updated_creds.get("refresh_token") + ) + + if token_data.get("id_token"): + updated_creds["id_token"] = token_data.get("id_token") + + updated_creds["expiry_date"] = int((time.time() + float(expires_in)) * 1000) + updated_creds["token_uri"] = TOKEN_ENDPOINT + + self._ensure_proxy_metadata(updated_creds) + + if not updated_creds.get("access_token") or not updated_creds.get( + "refresh_token" + ): + raise ValueError("Refreshed credentials missing required token fields") + + # Successful refresh clears backoff tracking + self._refresh_failures.pop(path, None) + self._next_refresh_after.pop(path, None) + + # Persist before mutating shared cache state + if not await self._save_credentials(path, updated_creds): + raise IOError( + f"Failed to persist refreshed OpenAI Codex credential '{Path(path).name}'" + ) + + return self._credentials_cache[path] + + # ========================================================================= + # Interactive OAuth flow + # ========================================================================= + + async def _perform_interactive_oauth( + self, + path: Optional[str], + creds: Dict[str, Any], + display_name: str, + ) -> Dict[str, Any]: + """Perform interactive OpenAI Codex OAuth authorization code flow with PKCE.""" + is_headless = is_headless_environment() + + # PKCE verifier/challenge (base64url, no padding) + code_verifier = ( + base64.urlsafe_b64encode(secrets.token_bytes(32)) + .decode("utf-8") + .rstrip("=") + ) + code_challenge = ( + base64.urlsafe_b64encode( + hashlib.sha256(code_verifier.encode("utf-8")).digest() + ) + .decode("utf-8") + .rstrip("=") + ) + state = secrets.token_hex(32) + + callback_port = get_callback_port() + redirect_uri = f"http://localhost:{callback_port}{CALLBACK_PATH}" + + auth_params = { + "response_type": "code", + "client_id": CLIENT_ID, + "redirect_uri": redirect_uri, + "scope": SCOPE, + "code_challenge": code_challenge, + "code_challenge_method": "S256", + "state": state, + "id_token_add_organizations": "true", + "codex_cli_simplified_flow": "true", + "originator": "pi", + } + auth_url = f"{AUTHORIZATION_ENDPOINT}?{urlencode(auth_params)}" + + callback_server = OAuthCallbackServer(port=callback_port) + + try: + await callback_server.start(expected_state=state) + + if is_headless: + help_text = Text.from_markup( + "Running in headless environment.\n" + "Open the URL below in a browser on another machine and complete login." + ) + else: + help_text = Text.from_markup( + "Open the URL below, complete sign-in, and return here." + ) + + console.print( + Panel( + help_text, + title=f"OpenAI Codex OAuth Setup for [bold yellow]{display_name}[/bold yellow]", + style="bold blue", + ) + ) + escaped_url = rich_escape(auth_url) + console.print(f"[bold]URL:[/bold] [link={auth_url}]{escaped_url}[/link]\n") + + if not is_headless: + try: + webbrowser.open(auth_url) + lib_logger.info("Browser opened for OpenAI Codex OAuth flow") + except Exception as e: + lib_logger.warning( + f"Failed to auto-open browser for OpenAI Codex OAuth: {e}" + ) + + code = await callback_server.wait_for_callback( + timeout=float(self._reauth_timeout_seconds) + ) + + token_data = await self._exchange_code_for_tokens( + code=code, + code_verifier=code_verifier, + redirect_uri=redirect_uri, + ) + + # Build updated credential object + updated_creds = copy.deepcopy(creds) + metadata = updated_creds.setdefault("_proxy_metadata", {}) + loaded_from_env = metadata.get("loaded_from_env", False) + env_index = metadata.get("env_credential_index") + + updated_creds.update( + { + "access_token": token_data.get("access_token"), + "refresh_token": token_data.get("refresh_token"), + "id_token": token_data.get("id_token"), + "token_uri": TOKEN_ENDPOINT, + "expiry_date": int( + (time.time() + float(token_data.get("expires_in", 3600))) * 1000 + ), + } + ) + + # Restore env metadata flags if this credential originated from env + updated_creds.setdefault("_proxy_metadata", {}) + updated_creds["_proxy_metadata"]["loaded_from_env"] = loaded_from_env + updated_creds["_proxy_metadata"]["env_credential_index"] = env_index + + self._ensure_proxy_metadata(updated_creds) + + if path: + if not await self._save_credentials(path, updated_creds): + raise IOError( + f"Failed to save OpenAI Codex OAuth credentials for '{display_name}'" + ) + else: + # in-memory setup flow + creds.clear() + creds.update(updated_creds) + + lib_logger.info( + f"OpenAI Codex OAuth initialized successfully for '{display_name}'" + ) + return updated_creds + + finally: + await callback_server.stop() + + async def initialize_token( + self, + creds_or_path: Union[Dict[str, Any], str], + force_interactive: bool = False, + ) -> Dict[str, Any]: + """ + Initialize OAuth token, refreshing or running interactive flow as needed. + + Interactive re-auth is globally coordinated via ReauthCoordinator so only + one flow runs at a time across all providers. + """ + path = creds_or_path if isinstance(creds_or_path, str) else None + + if isinstance(creds_or_path, dict): + display_name = creds_or_path.get("_proxy_metadata", {}).get( + "display_name", "in-memory OpenAI Codex credential" + ) + else: + display_name = Path(path).name if path else "in-memory OpenAI Codex credential" + + try: + creds = ( + await self._load_credentials(creds_or_path) if path else copy.deepcopy(creds_or_path) + ) + + reason = "" + if force_interactive: + reason = "interactive re-auth explicitly requested" + elif not creds.get("refresh_token"): + reason = "refresh token is missing" + elif self._is_token_expired(creds): + reason = "token is expired" + + if reason: + # Prefer non-interactive refresh when we have a refresh token and this is simple expiry + if reason == "token is expired" and creds.get("refresh_token") and path: + try: + return await self._refresh_token(path) + except CredentialNeedsReauthError: + # Explicitly fall through into interactive re-auth path + pass + except Exception as e: + lib_logger.warning( + f"Automatic OpenAI Codex token refresh failed for '{display_name}': {e}. Falling back to interactive login." + ) + + coordinator = get_reauth_coordinator() + + async def _do_interactive_oauth(): + return await self._perform_interactive_oauth(path, creds, display_name) + + result = await coordinator.execute_reauth( + credential_path=path or display_name, + provider_name="OPENAI_CODEX", + reauth_func=_do_interactive_oauth, + timeout=float(self._reauth_timeout_seconds), + ) + + # Persist cache when path-based + if path and isinstance(result, dict): + self._credentials_cache[path] = self._ensure_proxy_metadata(result) + + return result + + # Token is already valid + creds = self._ensure_proxy_metadata(creds) + if path: + self._credentials_cache[path] = creds + return creds + + except Exception as e: + raise ValueError( + f"Failed to initialize OpenAI Codex OAuth credential '{display_name}': {e}" + ) + + async def get_auth_header(self, credential_identifier: str) -> Dict[str, str]: + creds = await self._load_credentials(credential_identifier) + if self._is_token_expired(creds): + creds = await self._refresh_token(credential_identifier) + return {"Authorization": f"Bearer {creds['access_token']}"} + + async def get_user_info( + self, creds_or_path: Union[Dict[str, Any], str] + ) -> Dict[str, Any]: + """Retrieve user info from _proxy_metadata.""" + try: + path = creds_or_path if isinstance(creds_or_path, str) else None + creds = await self._load_credentials(path) if path else copy.deepcopy(creds_or_path) + + if path: + await self.initialize_token(path) + creds = await self._load_credentials(path) + + metadata = creds.get("_proxy_metadata", {}) + email = metadata.get("email") + account_id = metadata.get("account_id") + + # Update timestamp in cache only (non-critical metadata) + if path and "_proxy_metadata" in creds: + creds["_proxy_metadata"]["last_check_timestamp"] = time.time() + self._credentials_cache[path] = creds + + return { + "email": email, + "account_id": account_id, + } + except Exception as e: + lib_logger.error(f"Failed to get OpenAI Codex user info: {e}") + return {"email": None, "account_id": None} + + async def proactively_refresh(self, credential_identifier: str): + """Queue proactive refresh for credentials near expiry.""" + try: + creds = await self._load_credentials(credential_identifier) + except IOError: + return + + if self._is_token_expired(creds): + await self._queue_refresh( + credential_identifier, + force=False, + needs_reauth=False, + ) + + # ========================================================================= + # Queue + availability plumbing + # ========================================================================= + + async def _get_lock(self, path: str) -> asyncio.Lock: + async with self._locks_lock: + if path not in self._refresh_locks: + self._refresh_locks[path] = asyncio.Lock() + return self._refresh_locks[path] + + def _track_background_task( + self, + task: asyncio.Task, + *, + description: str, + ) -> asyncio.Task: + """Track a background task and surface exceptions in logs.""" + self._background_tasks.add(task) + + def _on_done(done_task: asyncio.Task): + self._background_tasks.discard(done_task) + if done_task.cancelled(): + return + + try: + exc = done_task.exception() + except Exception: + return + + if exc is not None: + lib_logger.error( + f"OpenAI Codex background task failed ({description}): {exc}" + ) + + task.add_done_callback(_on_done) + return task + + def _spawn_background_task( + self, + coro: Awaitable[Any], + *, + description: str, + ) -> Optional[asyncio.Task]: + """Create a tracked task from sync contexts when an event loop is available.""" + try: + loop = asyncio.get_running_loop() + except RuntimeError: + return None + + task = loop.create_task(coro) + return self._track_background_task(task, description=description) + + def is_credential_available(self, path: str) -> bool: + """ + Check if credential is available for rotation. + + Unavailable when: + - In re-auth queue + - Truly expired (past actual expiry) + """ + if path in self._unavailable_credentials: + marked_time = self._unavailable_credentials.get(path) + if marked_time is not None: + now = time.time() + if now - marked_time > self._unavailable_ttl_seconds: + lib_logger.warning( + f"OpenAI Codex credential '{Path(path).name}' stuck in re-auth queue for {int(now - marked_time)}s. Auto-cleaning stale entry." + ) + self._unavailable_credentials.pop(path, None) + self._queued_credentials.discard(path) + else: + return False + + creds = self._credentials_cache.get(path) + if creds and self._is_token_truly_expired(creds): + if path not in self._queued_credentials: + task = self._spawn_background_task( + self._queue_refresh(path, force=True, needs_reauth=False), + description=f"queue refresh for {Path(path).name}", + ) + if task is None: + # No running event loop (e.g., sync context); caller can still + # trigger refresh through normal async request flow. + pass + return False + + return True + + async def _ensure_queue_processor_running(self): + if self._queue_processor_task is None or self._queue_processor_task.done(): + task = asyncio.create_task(self._process_refresh_queue()) + self._queue_processor_task = self._track_background_task( + task, + description="refresh queue processor", + ) + + async def _ensure_reauth_processor_running(self): + if self._reauth_processor_task is None or self._reauth_processor_task.done(): + task = asyncio.create_task(self._process_reauth_queue()) + self._reauth_processor_task = self._track_background_task( + task, + description="reauth queue processor", + ) + + async def _queue_refresh( + self, + path: str, + force: bool = False, + needs_reauth: bool = False, + ): + """Queue credential for refresh or re-auth.""" + if not needs_reauth: + now = time.time() + backoff_until = self._next_refresh_after.get(path) + if backoff_until and now < backoff_until: + return + + async with self._queue_tracking_lock: + if path in self._queued_credentials: + return + + self._queued_credentials.add(path) + + if needs_reauth: + self._unavailable_credentials[path] = time.time() + await self._reauth_queue.put(path) + await self._ensure_reauth_processor_running() + else: + await self._refresh_queue.put((path, force)) + await self._ensure_queue_processor_running() + + async def _process_refresh_queue(self): + """Sequential background worker for normal refresh queue.""" + while True: + path = None + try: + try: + path, force = await asyncio.wait_for(self._refresh_queue.get(), timeout=60.0) + except asyncio.TimeoutError: + async with self._queue_tracking_lock: + self._queue_retry_count.clear() + self._queue_processor_task = None + return + + try: + creds = self._credentials_cache.get(path) + if creds and not self._is_token_expired(creds): + self._queue_retry_count.pop(path, None) + continue + + try: + async with asyncio.timeout(self._refresh_timeout_seconds): + await self._refresh_token(path, force=force) + self._queue_retry_count.pop(path, None) + + except asyncio.TimeoutError: + await self._handle_refresh_failure(path, force, "timeout") + + except httpx.HTTPStatusError as e: + status_code = e.response.status_code + needs_reauth = False + + if status_code == 400: + try: + payload = e.response.json() + error_type = payload.get("error", "") + error_desc = payload.get("error_description", "") + except Exception: + error_type = "" + error_desc = str(e) + + if self._is_invalid_grant_error(error_type, error_desc): + needs_reauth = True + + elif status_code in (401, 403): + needs_reauth = True + + if needs_reauth: + self._queue_retry_count.pop(path, None) + async with self._queue_tracking_lock: + self._queued_credentials.discard(path) + await self._queue_refresh(path, force=True, needs_reauth=True) + else: + await self._handle_refresh_failure(path, force, f"HTTP {status_code}") + + except CredentialNeedsReauthError: + self._queue_retry_count.pop(path, None) + async with self._queue_tracking_lock: + self._queued_credentials.discard(path) + await self._queue_refresh(path, force=True, needs_reauth=True) + + except Exception as e: + await self._handle_refresh_failure(path, force, str(e)) + + finally: + async with self._queue_tracking_lock: + if ( + path in self._queued_credentials + and self._queue_retry_count.get(path, 0) == 0 + ): + self._queued_credentials.discard(path) + self._refresh_queue.task_done() + + await asyncio.sleep(self._refresh_interval_seconds) + + except asyncio.CancelledError: + break + except Exception as e: + lib_logger.error(f"Error in OpenAI Codex refresh queue processor: {e}") + if path: + async with self._queue_tracking_lock: + self._queued_credentials.discard(path) + + async def _handle_refresh_failure(self, path: str, force: bool, error: str): + retry_count = self._queue_retry_count.get(path, 0) + 1 + self._queue_retry_count[path] = retry_count + + if retry_count >= self._refresh_max_retries: + lib_logger.error( + f"OpenAI Codex refresh max retries reached for '{Path(path).name}' (last error: {error})." + ) + self._queue_retry_count.pop(path, None) + async with self._queue_tracking_lock: + self._queued_credentials.discard(path) + return + + lib_logger.warning( + f"OpenAI Codex refresh failed for '{Path(path).name}' ({error}). Retry {retry_count}/{self._refresh_max_retries}." + ) + await self._refresh_queue.put((path, force)) + + async def _process_reauth_queue(self): + """Sequential background worker for interactive re-auth queue.""" + while True: + path = None + try: + try: + path = await asyncio.wait_for(self._reauth_queue.get(), timeout=60.0) + except asyncio.TimeoutError: + self._reauth_processor_task = None + return + + try: + lib_logger.info( + f"Starting OpenAI Codex interactive re-auth for '{Path(path).name}'" + ) + await self.initialize_token(path, force_interactive=True) + lib_logger.info( + f"OpenAI Codex re-auth succeeded for '{Path(path).name}'" + ) + except Exception as e: + lib_logger.error( + f"OpenAI Codex re-auth failed for '{Path(path).name}': {e}" + ) + finally: + async with self._queue_tracking_lock: + self._queued_credentials.discard(path) + self._unavailable_credentials.pop(path, None) + self._reauth_queue.task_done() + + except asyncio.CancelledError: + if path: + async with self._queue_tracking_lock: + self._queued_credentials.discard(path) + self._unavailable_credentials.pop(path, None) + break + except Exception as e: + lib_logger.error(f"Error in OpenAI Codex re-auth queue processor: {e}") + if path: + async with self._queue_tracking_lock: + self._queued_credentials.discard(path) + self._unavailable_credentials.pop(path, None) + + # ========================================================================= + # Credential management methods for credential_tool + # ========================================================================= + + def _get_provider_file_prefix(self) -> str: + return "openai_codex" + + def _get_oauth_base_dir(self) -> Path: + return Path.cwd() / "oauth_creds" + + def _find_existing_credential_by_identity( + self, + email: Optional[str], + account_id: Optional[str], + base_dir: Optional[Path] = None, + ) -> Optional[Path]: + """ + Find an existing local credential to update. + + Matching policy (multi-account safe): + - If both email and account_id are available, require BOTH to match. + - If one identity field is missing on either side, use the other as a fallback. + + This avoids collisions when different users/accounts share a workspace + account_id while keeping backward compatibility for legacy files that may + miss one metadata field. + """ + if base_dir is None: + base_dir = self._get_oauth_base_dir() + + prefix = self._get_provider_file_prefix() + pattern = str(base_dir / f"{prefix}_oauth_*.json") + + email_fallback_match: Optional[Path] = None + account_fallback_match: Optional[Path] = None + + for cred_file in glob(pattern): + try: + with open(cred_file, "r") as f: + creds = json.load(f) + + metadata = creds.get("_proxy_metadata", {}) + existing_email = metadata.get("email") + existing_account_id = metadata.get("account_id") + + same_email = ( + bool(email) + and bool(existing_email) + and str(existing_email).strip() == str(email).strip() + ) + same_account = ( + bool(account_id) + and bool(existing_account_id) + and str(existing_account_id).strip() == str(account_id).strip() + ) + + # Strongest match: both identifiers present + matching + if same_email and same_account: + return Path(cred_file) + + # Fallbacks only when one identity dimension is missing + if same_email and (not account_id or not existing_account_id): + email_fallback_match = Path(cred_file) + + if same_account and (not email or not existing_email): + account_fallback_match = Path(cred_file) + + except Exception: + continue + + # Prefer email-based fallback over account fallback when both are possible + return email_fallback_match or account_fallback_match + + def _get_next_credential_number(self, base_dir: Optional[Path] = None) -> int: + if base_dir is None: + base_dir = self._get_oauth_base_dir() + + prefix = self._get_provider_file_prefix() + pattern = str(base_dir / f"{prefix}_oauth_*.json") + + existing_numbers = [] + for cred_file in glob(pattern): + match = re.search(r"_oauth_(\d+)\.json$", cred_file) + if match: + existing_numbers.append(int(match.group(1))) + + return (max(existing_numbers) + 1) if existing_numbers else 1 + + def _build_credential_path( + self, + base_dir: Optional[Path] = None, + number: Optional[int] = None, + ) -> Path: + if base_dir is None: + base_dir = self._get_oauth_base_dir() + + if number is None: + number = self._get_next_credential_number(base_dir) + + filename = f"{self._get_provider_file_prefix()}_oauth_{number}.json" + return base_dir / filename + + async def setup_credential( + self, + base_dir: Optional[Path] = None, + ) -> OpenAICodexCredentialSetupResult: + """Complete OpenAI Codex credential setup flow.""" + if base_dir is None: + base_dir = self._get_oauth_base_dir() + + base_dir.mkdir(parents=True, exist_ok=True) + + try: + temp_creds = { + "_proxy_metadata": { + "display_name": "new OpenAI Codex credential", + "loaded_from_env": False, + "env_credential_index": None, + } + } + new_creds = await self.initialize_token(temp_creds) + + metadata = new_creds.get("_proxy_metadata", {}) + email = metadata.get("email") + account_id = metadata.get("account_id") + + existing_path = self._find_existing_credential_by_identity( + email=email, + account_id=account_id, + base_dir=base_dir, + ) + is_update = existing_path is not None + file_path = existing_path if is_update else self._build_credential_path(base_dir) + + if not await self._save_credentials(str(file_path), new_creds): + return OpenAICodexCredentialSetupResult( + success=False, + error=f"Failed to save OpenAI Codex credential to {file_path.name}", + ) + + return OpenAICodexCredentialSetupResult( + success=True, + file_path=str(file_path), + email=email, + is_update=is_update, + credentials=new_creds, + ) + + except Exception as e: + lib_logger.error(f"OpenAI Codex credential setup failed: {e}") + return OpenAICodexCredentialSetupResult(success=False, error=str(e)) + + def build_env_lines(self, creds: Dict[str, Any], cred_number: int) -> List[str]: + """Build OPENAI_CODEX_N_* env lines from credential JSON.""" + metadata = creds.get("_proxy_metadata", {}) + email = metadata.get("email", "unknown") + account_id = metadata.get("account_id", "") + + prefix = f"OPENAI_CODEX_{cred_number}" + + lines = [ + f"# OPENAI_CODEX Credential #{cred_number} for: {email}", + f"# Exported from: openai_codex_oauth_{cred_number}.json", + f"# Generated at: {time.strftime('%Y-%m-%d %H:%M:%S')}", + "", + f"{prefix}_ACCESS_TOKEN={creds.get('access_token', '')}", + f"{prefix}_REFRESH_TOKEN={creds.get('refresh_token', '')}", + f"{prefix}_EXPIRY_DATE={int(float(creds.get('expiry_date', 0)))}", + f"{prefix}_ID_TOKEN={creds.get('id_token', '')}", + f"{prefix}_ACCOUNT_ID={account_id}", + f"{prefix}_EMAIL={email}", + ] + + return lines + + def export_credential_to_env( + self, + credential_path: str, + output_dir: Optional[Path] = None, + ) -> Optional[str]: + """Export a credential JSON file to .env format.""" + try: + cred_path = Path(credential_path) + with open(cred_path, "r") as f: + creds = json.load(f) + + metadata = creds.get("_proxy_metadata", {}) + email = metadata.get("email", "unknown") + + match = re.search(r"_oauth_(\d+)\.json$", cred_path.name) + cred_number = int(match.group(1)) if match else 1 + + if output_dir is None: + output_dir = cred_path.parent + + safe_email = str(email).replace("@", "_at_").replace(".", "_") + env_filename = f"openai_codex_{cred_number}_{safe_email}.env" + env_path = output_dir / env_filename + + env_lines = self.build_env_lines(creds, cred_number) + with open(env_path, "w") as f: + f.write("\n".join(env_lines)) + + lib_logger.info(f"Exported OpenAI Codex credential to {env_path}") + return str(env_path) + + except Exception as e: + lib_logger.error(f"Failed to export OpenAI Codex credential: {e}") + return None + + def list_credentials(self, base_dir: Optional[Path] = None) -> List[Dict[str, Any]]: + """List all local OpenAI Codex credential files.""" + if base_dir is None: + base_dir = self._get_oauth_base_dir() + + prefix = self._get_provider_file_prefix() + pattern = str(base_dir / f"{prefix}_oauth_*.json") + + credentials: List[Dict[str, Any]] = [] + for cred_file in sorted(glob(pattern)): + try: + with open(cred_file, "r") as f: + creds = json.load(f) + + metadata = creds.get("_proxy_metadata", {}) + match = re.search(r"_oauth_(\d+)\.json$", cred_file) + number = int(match.group(1)) if match else 0 + + credentials.append( + { + "file_path": cred_file, + "email": metadata.get("email", "unknown"), + "account_id": metadata.get("account_id"), + "number": number, + } + ) + except Exception: + continue + + return credentials + + def delete_credential(self, credential_path: str) -> bool: + """Delete an OpenAI Codex credential file.""" + try: + cred_path = Path(credential_path) + prefix = self._get_provider_file_prefix() + + if not cred_path.name.startswith(f"{prefix}_oauth_"): + lib_logger.error( + f"File {cred_path.name} does not appear to be an OpenAI Codex credential" + ) + return False + + if not cred_path.exists(): + lib_logger.warning( + f"OpenAI Codex credential file does not exist: {credential_path}" + ) + return False + + self._credentials_cache.pop(credential_path, None) + cred_path.unlink() + lib_logger.info(f"Deleted OpenAI Codex credential file: {credential_path}") + return True + + except Exception as e: + lib_logger.error(f"Failed to delete OpenAI Codex credential: {e}") + return False diff --git a/src/rotator_library/providers/openai_codex_provider.py b/src/rotator_library/providers/openai_codex_provider.py new file mode 100644 index 00000000..3358aabd --- /dev/null +++ b/src/rotator_library/providers/openai_codex_provider.py @@ -0,0 +1,1263 @@ +# SPDX-License-Identifier: LGPL-3.0-only +# Copyright (c) 2026 Mirrowel + +# src/rotator_library/providers/openai_codex_provider.py + +import copy +import json +import logging +import os +import re +import time +from datetime import datetime, timezone +from pathlib import Path +from typing import Any, AsyncGenerator, Dict, Iterable, List, Optional, Union + +import httpx +import litellm + +from .openai_codex_auth_base import ( + AUTH_CLAIM, + DEFAULT_API_BASE, + RESPONSES_ENDPOINT_PATH, + OpenAICodexAuthBase, +) +from .provider_interface import ProviderInterface, UsageResetConfigDef, QuotaGroupMap +from ..model_definitions import ModelDefinitions +from ..timeout_config import TimeoutConfig +from ..transaction_logger import ProviderLogger + +lib_logger = logging.getLogger("rotator_library") + +# Conservative fallback model list (can be overridden via OPENAI_CODEX_MODELS) +HARDCODED_MODELS = [ + "gpt-5.3-codex", + "gpt-5.2-codex", + "gpt-5.2", + "gpt-5.1-codex", + "gpt-5.1-codex-max", + "gpt-5.1-codex-mini", + "gpt-5-codex", +] + +RATE_LIMIT_CODE_PATTERN = re.compile( + r"^(rate[_-]?limit(?:ed)?|usage[_-]?limit(?:[_-](?:reached|exceeded))?|quota(?:[_-](?:reached|exceeded))?|insufficient_quota)$", + re.IGNORECASE, +) +RATE_LIMIT_TYPE_PATTERN = re.compile( + r"^(rate[_-]?limit(?:_error)?)$", + re.IGNORECASE, +) +RATE_LIMIT_MESSAGE_PATTERN = re.compile( + r"\b(rate\s*limit(?:ed)?|too\s+many\s+requests|usage\s+limit\s+(?:reached|exceeded)|quota\s+(?:is\s+)?(?:reached|exceeded))\b", + re.IGNORECASE, +) + + +class CodexStreamError(Exception): + """Terminal Codex stream error that should abort the stream.""" + + def __init__(self, message: str, status_code: int = 500, error_body: Optional[str] = None): + self.status_code = status_code + self.error_body = error_body or message + super().__init__(message) + + +class CodexSSETranslator: + """ + Translates OpenAI Codex SSE events into OpenAI chat.completion chunks. + + Supports both currently observed events and planned fallback aliases: + - response.output_text.delta (observed) + - response.content_part.delta (planned alias) + - response.function_call_arguments.delta / .done + """ + + def __init__(self, model_id: str): + self.model_id = model_id + self.response_id: Optional[str] = None + self.created: int = int(time.time()) + self._tool_index_by_call_id: Dict[str, int] = {} + self._tool_names_by_call_id: Dict[str, str] = {} + + def _build_chunk( + self, + *, + delta: Optional[Dict[str, Any]] = None, + finish_reason: Optional[str] = None, + usage: Optional[Dict[str, int]] = None, + ) -> Dict[str, Any]: + if not self.response_id: + self.response_id = f"chatcmpl-codex-{int(time.time() * 1000)}" + + choice = { + "index": 0, + "delta": delta or {}, + "finish_reason": finish_reason, + } + + chunk = { + "id": self.response_id, + "object": "chat.completion.chunk", + "created": self.created, + "model": self.model_id, + "choices": [choice], + } + + if usage is not None: + chunk["usage"] = usage + + return chunk + + def _extract_text_delta(self, event: Dict[str, Any]) -> Optional[str]: + event_type = event.get("type") + + if event_type == "response.output_text.delta": + delta = event.get("delta") + if isinstance(delta, str): + return delta + + if event_type == "response.content_part.delta": + # Compatibility with planned taxonomy + if isinstance(event.get("delta"), str): + return event["delta"] + part = event.get("part") + if isinstance(part, dict): + if isinstance(part.get("delta"), str): + return part["delta"] + if isinstance(part.get("text"), str): + return part["text"] + + if event_type == "response.content_part.added": + part = event.get("part") + if isinstance(part, dict): + text = part.get("text") + if isinstance(text, str) and text: + return text + + return None + + def _map_incomplete_reason(self, reason: Optional[str]) -> str: + if not reason: + return "length" + + normalized = reason.strip().lower() + if normalized in {"stop", "completed"}: + return "stop" + if normalized in {"max_output_tokens", "max_tokens", "length"}: + return "length" + if normalized in {"tool_calls", "tool_call"}: + return "tool_calls" + if normalized in {"content_filter", "content_filtered"}: + return "content_filter" + return "length" + + def _extract_usage(self, event: Dict[str, Any]) -> Optional[Dict[str, int]]: + response = event.get("response") + if not isinstance(response, dict): + return None + + usage = response.get("usage") + if not isinstance(usage, dict): + return None + + prompt_tokens = int(usage.get("input_tokens", 0) or 0) + completion_tokens = int(usage.get("output_tokens", 0) or 0) + total_tokens = int(usage.get("total_tokens", 0) or 0) + + return { + "prompt_tokens": prompt_tokens, + "completion_tokens": completion_tokens, + "total_tokens": total_tokens, + } + + def _get_response_status(self, event: Dict[str, Any]) -> str: + response = event.get("response") + if isinstance(response, dict): + status = response.get("status") + if isinstance(status, str) and status: + return status + + event_type = event.get("type") + if event_type == "response.incomplete": + return "incomplete" + if event_type == "response.failed": + return "failed" + return "completed" + + def _get_or_create_tool_index(self, call_id: str) -> int: + if call_id not in self._tool_index_by_call_id: + self._tool_index_by_call_id[call_id] = len(self._tool_index_by_call_id) + return self._tool_index_by_call_id[call_id] + + def _extract_tool_call_id(self, event: Dict[str, Any]) -> Optional[str]: + for key in ("call_id", "item_id", "id"): + value = event.get(key) + if isinstance(value, str) and value: + return value + + item = event.get("item") + if isinstance(item, dict): + for key in ("call_id", "id"): + value = item.get(key) + if isinstance(value, str) and value: + return value + + return None + + def _extract_error_payload(self, event: Dict[str, Any]) -> Dict[str, Any]: + # Common formats: + # {type:"error", error:{...}} + # {type:"response.failed", response:{error:{...}}} + payload = event.get("error") + if isinstance(payload, dict): + return payload + + response = event.get("response") + if isinstance(response, dict): + nested = response.get("error") + if isinstance(nested, dict): + return nested + + return {} + + def _classify_error_status(self, error_payload: Dict[str, Any]) -> int: + code = str(error_payload.get("code", "") or "").lower() + err_type = str(error_payload.get("type", "") or "").lower() + message = str(error_payload.get("message", "") or "").lower() + text = " ".join([code, err_type, message]) + + if any(token in text for token in ["rate_limit", "usage_limit", "quota"]): + return 429 + if any(token in text for token in ["auth", "unauthorized", "invalid_api_key"]): + return 401 + if "forbidden" in text: + return 403 + if "context" in text or "max_output_tokens" in text: + return 400 + return 500 + + def process_event(self, event: Dict[str, Any]) -> List[Dict[str, Any]]: + """Process a single SSE event and return zero or more translated chunks.""" + chunks: List[Dict[str, Any]] = [] + + event_type = event.get("type") + if not isinstance(event_type, str): + return chunks + + # Capture response id/created as early as possible + response = event.get("response") + if isinstance(response, dict): + if isinstance(response.get("id"), str) and response.get("id"): + self.response_id = response["id"] + if isinstance(response.get("created_at"), (int, float)): + self.created = int(response["created_at"]) + + if event_type == "response.output_item.added": + item = event.get("item") + if isinstance(item, dict) and item.get("type") == "function_call": + call_id = self._extract_tool_call_id(item) + if call_id: + index = self._get_or_create_tool_index(call_id) + name = item.get("name") if isinstance(item.get("name"), str) else "" + if name: + self._tool_names_by_call_id[call_id] = name + + initial_args = item.get("arguments") + if not isinstance(initial_args, str): + initial_args = "" + + tool_delta = { + "tool_calls": [ + { + "index": index, + "id": call_id, + "type": "function", + "function": { + "name": name, + "arguments": initial_args, + }, + } + ] + } + chunks.append(self._build_chunk(delta=tool_delta)) + return chunks + + if event_type == "response.function_call_arguments.delta": + call_id = self._extract_tool_call_id(event) + delta = event.get("delta") + if call_id and isinstance(delta, str): + index = self._get_or_create_tool_index(call_id) + name = self._tool_names_by_call_id.get(call_id, "") + tool_delta = { + "tool_calls": [ + { + "index": index, + "id": call_id, + "type": "function", + "function": { + "name": name, + "arguments": delta, + }, + } + ] + } + chunks.append(self._build_chunk(delta=tool_delta)) + return chunks + + if event_type == "response.function_call_arguments.done": + call_id = self._extract_tool_call_id(event) + if call_id: + index = self._get_or_create_tool_index(call_id) + name = self._tool_names_by_call_id.get(call_id, "") + arguments = event.get("arguments") + if not isinstance(arguments, str): + arguments = "" + + tool_delta = { + "tool_calls": [ + { + "index": index, + "id": call_id, + "type": "function", + "function": { + "name": name, + "arguments": arguments, + }, + } + ] + } + chunks.append(self._build_chunk(delta=tool_delta)) + return chunks + + text_delta = self._extract_text_delta(event) + if text_delta: + chunks.append(self._build_chunk(delta={"content": text_delta})) + return chunks + + if event_type in ("error", "response.failed"): + error_payload = self._extract_error_payload(event) + status_code = self._classify_error_status(error_payload) + message = ( + error_payload.get("message") + if isinstance(error_payload.get("message"), str) + else f"Codex stream failed ({event_type})" + ) + raise CodexStreamError( + message=message, + status_code=status_code, + error_body=json.dumps({"error": error_payload} if error_payload else event), + ) + + if event_type in ("response.completed", "response.incomplete"): + usage = self._extract_usage(event) + status = self._get_response_status(event) + finish_reason = "stop" + + if status == "incomplete": + incomplete_details = None + if isinstance(response, dict): + incomplete_details = response.get("incomplete_details") + reason = None + if isinstance(incomplete_details, dict): + reason = incomplete_details.get("reason") + if isinstance(reason, str): + finish_reason = self._map_incomplete_reason(reason) + else: + finish_reason = "length" + + chunks.append( + self._build_chunk(delta={}, finish_reason=finish_reason, usage=usage) + ) + return chunks + + # Ignore all other event families safely + return chunks + + +class OpenAICodexProvider(OpenAICodexAuthBase, ProviderInterface): + """OpenAI Codex provider via ChatGPT backend `/codex/responses`.""" + + skip_cost_calculation = True + default_rotation_mode: str = "sequential" + provider_env_name: str = "openai_codex" + + # Conservative placeholders (MVP-safe defaults) + tier_priorities = { + "unknown": 10, + } + + usage_reset_configs = { + "default": UsageResetConfigDef( + window_seconds=24 * 60 * 60, + mode="credential", + description=( + "MVP fallback window. Tune from production telemetry " + "(tracked in PLAN-openai-codex.md §6)." + ), + field_name="daily", + ) + } + + model_quota_groups: QuotaGroupMap = { + # Intentionally empty for MVP. Shared quota groups will be added after + # telemetry validation (tracked in PLAN-openai-codex.md §6). + } + + def __init__(self): + super().__init__() + self.model_definitions = ModelDefinitions() + + def has_custom_logic(self) -> bool: + return True + + # ========================================================================= + # Model discovery + # ========================================================================= + + async def get_models(self, credential: str, client: httpx.AsyncClient) -> List[str]: + """ + Returns OpenAI Codex models from: + 1) OPENAI_CODEX_MODELS env definitions (priority) + 2) hardcoded fallback list + 3) optional dynamic /models discovery (best-effort) + """ + models: List[str] = [] + env_model_ids = set() + + static_models = self.model_definitions.get_all_provider_models("openai_codex") + if static_models: + for model in static_models: + model_name = model.split("/")[-1] if "/" in model else model + model_id = self.model_definitions.get_model_id("openai_codex", model_name) + models.append(model) + if model_id: + env_model_ids.add(model_id) + + lib_logger.info( + f"Loaded {len(static_models)} static models for openai_codex from OPENAI_CODEX_MODELS" + ) + + for model_id in HARDCODED_MODELS: + if model_id not in env_model_ids: + models.append(f"openai_codex/{model_id}") + env_model_ids.add(model_id) + + # Optional dynamic discovery (Codex backend may not support this endpoint) + try: + await self.initialize_token(credential) + creds = await self._load_credentials(credential) + access_token, account_id = self._extract_runtime_auth(creds) + + api_base = self._resolve_api_base() + models_url = f"{api_base.rstrip('/')}/models" + + headers = self._build_request_headers( + access_token=access_token, + account_id=account_id, + stream=False, + ) + + response = await client.get(models_url, headers=headers, timeout=20.0) + response.raise_for_status() + + payload = response.json() + data = payload.get("data") if isinstance(payload, dict) else payload + + discovered = 0 + if isinstance(data, list): + for item in data: + model_id = None + if isinstance(item, dict): + model_id = item.get("id") or item.get("name") + elif isinstance(item, str): + model_id = item + + if isinstance(model_id, str) and model_id and model_id not in env_model_ids: + models.append(f"openai_codex/{model_id}") + env_model_ids.add(model_id) + discovered += 1 + + if discovered > 0: + lib_logger.debug( + f"Discovered {discovered} additional models for openai_codex via dynamic /models" + ) + + except Exception as e: + lib_logger.debug(f"Dynamic model discovery failed for openai_codex: {e}") + + return models + + async def initialize_credentials(self, credential_paths: List[str]) -> None: + """Preload credentials and queue refresh/reauth where needed.""" + ready = 0 + refreshing = 0 + reauth_required = 0 + + for cred_path in credential_paths: + try: + creds = await self._load_credentials(cred_path) + self._ensure_proxy_metadata(creds) + + if not creds.get("refresh_token"): + await self._queue_refresh(cred_path, force=True, needs_reauth=True) + reauth_required += 1 + continue + + if self._is_token_expired(creds): + await self._queue_refresh(cred_path, force=False, needs_reauth=False) + refreshing += 1 + else: + ready += 1 + + # ensure metadata caches are populated + self._credentials_cache[cred_path] = creds + + except Exception as e: + lib_logger.warning( + f"Failed to initialize OpenAI Codex credential '{cred_path}': {e}" + ) + await self._queue_refresh(cred_path, force=True, needs_reauth=True) + reauth_required += 1 + + lib_logger.info( + "OpenAI Codex credential initialization: " + f"ready={ready}, refreshing={refreshing}, reauth_required={reauth_required}" + ) + + # ========================================================================= + # Request mapping helpers + # ========================================================================= + + def _resolve_api_base(self) -> str: + return os.getenv("OPENAI_CODEX_API_BASE", DEFAULT_API_BASE) + + def _extract_runtime_auth(self, creds: Dict[str, Any]) -> Tuple[str, str]: + access_token = creds.get("access_token") + if not isinstance(access_token, str) or not access_token: + raise ValueError("OpenAI Codex credential missing access_token") + + metadata = creds.get("_proxy_metadata", {}) + account_id = metadata.get("account_id") + + if not account_id: + # Fallback parse from access_token + payload = self._decode_jwt_unverified(access_token) + if payload: + direct = payload.get("https://api.openai.com/auth.chatgpt_account_id") + nested = None + claim = payload.get(AUTH_CLAIM) + if isinstance(claim, dict): + nested = claim.get("chatgpt_account_id") + + account_id = direct or nested + + if not isinstance(account_id, str) or not account_id: + raise ValueError( + "OpenAI Codex credential missing account_id. Re-authenticate to refresh token metadata." + ) + + return access_token, account_id + + def _build_request_headers( + self, + *, + access_token: str, + account_id: str, + stream: bool, + extra_headers: Optional[Dict[str, str]] = None, + ) -> Dict[str, str]: + headers = { + "Authorization": f"Bearer {access_token}", + "chatgpt-account-id": account_id, + "OpenAI-Beta": "responses=experimental", + "originator": "pi", + "Content-Type": "application/json", + "Accept": "text/event-stream" if stream else "application/json", + "User-Agent": "LLM-API-Key-Proxy/OpenAICodex", + } + + if extra_headers: + headers.update({k: str(v) for k, v in extra_headers.items()}) + + return headers + + def _extract_text(self, content: Any) -> str: + if content is None: + return "" + + if isinstance(content, str): + return content + + if isinstance(content, list): + parts: List[str] = [] + for item in content: + if isinstance(item, dict): + # OpenAI chat content blocks + if item.get("type") == "text" and isinstance(item.get("text"), str): + parts.append(item["text"]) + elif item.get("type") in {"input_text", "output_text"} and isinstance( + item.get("text"), str + ): + parts.append(item["text"]) + elif item.get("type") == "refusal" and isinstance(item.get("refusal"), str): + parts.append(item["refusal"]) + elif isinstance(item, str): + parts.append(item) + return "\n".join(parts) + + if isinstance(content, dict): + if isinstance(content.get("text"), str): + return content["text"] + return json.dumps(content) + + return str(content) + + def _convert_user_content_to_input_parts(self, content: Any) -> List[Dict[str, Any]]: + if isinstance(content, str): + return [{"type": "input_text", "text": content}] + + if isinstance(content, list): + parts: List[Dict[str, Any]] = [] + for item in content: + if not isinstance(item, dict): + continue + + item_type = item.get("type") + if item_type in ("text", "input_text") and isinstance(item.get("text"), str): + parts.append({"type": "input_text", "text": item["text"]}) + elif item_type == "image_url": + image_url = item.get("image_url") + if isinstance(image_url, dict): + image_url = image_url.get("url") + if isinstance(image_url, str) and image_url: + parts.append({"type": "input_image", "image_url": image_url, "detail": "auto"}) + elif item_type == "input_image": + image_url = item.get("image_url") + if isinstance(image_url, str) and image_url: + part = {"type": "input_image", "image_url": image_url} + if isinstance(item.get("detail"), str): + part["detail"] = item["detail"] + else: + part["detail"] = "auto" + parts.append(part) + + if parts: + return parts + + text = self._extract_text(content) + return [{"type": "input_text", "text": text}] + + def _convert_messages_to_codex_input( + self, + messages: List[Dict[str, Any]], + ) -> Tuple[str, List[Dict[str, Any]]]: + instructions: List[str] = [] + codex_input: List[Dict[str, Any]] = [] + + for message in messages: + role = message.get("role") + content = message.get("content") + + if role in ("system", "developer"): + text = self._extract_text(content) + if text.strip(): + instructions.append(text.strip()) + continue + + if role == "user": + codex_input.append( + { + "role": "user", + "content": self._convert_user_content_to_input_parts(content), + } + ) + continue + + if role == "assistant": + text = self._extract_text(content) + if text.strip(): + codex_input.append( + { + "role": "assistant", + "content": [{"type": "output_text", "text": text}], + } + ) + + # Carry forward assistant tool calls where provided + tool_calls = message.get("tool_calls") + if isinstance(tool_calls, list): + for tool_call in tool_calls: + if not isinstance(tool_call, dict): + continue + + call_id = tool_call.get("id") + function = tool_call.get("function", {}) + if not isinstance(function, dict): + continue + + name = function.get("name") + arguments = function.get("arguments") + if not isinstance(arguments, str): + arguments = json.dumps(arguments or {}) + + if isinstance(call_id, str) and isinstance(name, str): + codex_input.append( + { + "type": "function_call", + "call_id": call_id, + "name": name, + "arguments": arguments, + } + ) + continue + + if role == "tool": + call_id = message.get("tool_call_id") + if not isinstance(call_id, str) or not call_id: + continue + + output_text = self._extract_text(content) + codex_input.append( + { + "type": "function_call_output", + "call_id": call_id, + "output": output_text, + } + ) + + # Codex endpoint currently requires non-empty instructions + instructions_text = "\n\n".join(instructions).strip() + if not instructions_text: + instructions_text = "You are a helpful assistant." + + if not codex_input: + codex_input = [ + { + "role": "user", + "content": [ + { + "type": "input_text", + "text": "", + } + ], + } + ] + + return instructions_text, codex_input + + def _convert_tools(self, tools: Any) -> Optional[List[Dict[str, Any]]]: + if not isinstance(tools, list) or not tools: + return None + + converted: List[Dict[str, Any]] = [] + + for tool in tools: + if not isinstance(tool, dict): + continue + + # OpenAI chat format: {type:"function", function:{name,description,parameters}} + if tool.get("type") == "function" and isinstance(tool.get("function"), dict): + fn = tool["function"] + name = fn.get("name") + if not isinstance(name, str) or not name: + continue + + schema = fn.get("parameters") + if not isinstance(schema, dict): + schema = {"type": "object", "properties": {}} + + # Remove OpenAI-specific strict flag if present + schema = copy.deepcopy(schema) + schema.pop("additionalProperties", None) + + converted.append( + { + "type": "function", + "name": name, + "description": fn.get("description", ""), + "parameters": schema, + } + ) + continue + + # Already in responses format + if tool.get("type") == "function" and isinstance(tool.get("name"), str): + converted.append(copy.deepcopy(tool)) + + return converted or None + + def _normalize_tool_choice(self, tool_choice: Any, has_tools: bool) -> Any: + if not has_tools: + return None + + if isinstance(tool_choice, str): + # Codex endpoint handles "auto" reliably; map required -> auto + if tool_choice in {"auto", "none"}: + return tool_choice + if tool_choice == "required": + return "auto" + return "auto" + + if isinstance(tool_choice, dict): + if tool_choice.get("type") == "function": + fn = tool_choice.get("function") + if isinstance(fn, dict) and isinstance(fn.get("name"), str): + return {"type": "function", "name": fn["name"]} + if isinstance(tool_choice.get("name"), str): + return {"type": "function", "name": tool_choice["name"]} + if isinstance(tool_choice.get("name"), str): + return {"type": "function", "name": tool_choice["name"]} + + return "auto" + + def _build_codex_payload(self, model_name: str, **kwargs) -> Dict[str, Any]: + messages = kwargs.get("messages") or [] + instructions, codex_input = self._convert_messages_to_codex_input(messages) + + payload: Dict[str, Any] = { + "model": model_name, + "stream": True, # Endpoint currently requires stream=true + "store": False, + "instructions": instructions, + "input": codex_input, + "tool_choice": "auto", + "parallel_tool_calls": True, + } + + # Keep verbosity at medium by default (gpt-5.1-codex rejects low) + text_verbosity = os.getenv("OPENAI_CODEX_TEXT_VERBOSITY", "medium") + payload["text"] = {"verbosity": text_verbosity} + + # OpenAI chat params -> Codex responses equivalents + if kwargs.get("temperature") is not None: + payload["temperature"] = kwargs["temperature"] + if kwargs.get("top_p") is not None: + payload["top_p"] = kwargs["top_p"] + # Note: max_output_tokens is NOT supported by the Codex Responses API + # (gpt-5.3-codex returns 400 "Unsupported parameter: max_output_tokens"). + # Omit it and let the API use its default. + + converted_tools = self._convert_tools(kwargs.get("tools")) + if converted_tools: + payload["tools"] = converted_tools + payload["tool_choice"] = self._normalize_tool_choice( + kwargs.get("tool_choice"), + has_tools=True, + ) + payload["parallel_tool_calls"] = True + else: + payload.pop("tools", None) + payload.pop("tool_choice", None) + payload.pop("parallel_tool_calls", None) + + # Optional session pinning for cache affinity + session_id = kwargs.get("session_id") or kwargs.get("conversation_id") + if isinstance(session_id, str) and session_id: + payload["prompt_cache_key"] = session_id + payload["prompt_cache_retention"] = "in-memory" + + return payload + + # ========================================================================= + # SSE parsing + response conversion + # ========================================================================= + + async def _iter_sse_events( + self, response: httpx.Response + ) -> AsyncGenerator[Dict[str, Any], None]: + """Parse SSE stream into event dictionaries.""" + event_lines: List[str] = [] + + async for line in response.aiter_lines(): + if line is None: + continue + + if line == "": + if not event_lines: + continue + + data_lines = [] + for entry in event_lines: + if entry.startswith("data:"): + data_lines.append(entry[5:].lstrip()) + + event_lines = [] + if not data_lines: + continue + + payload = "\n".join(data_lines).strip() + if not payload or payload == "[DONE]": + if payload == "[DONE]": + return + continue + + try: + parsed = json.loads(payload) + if isinstance(parsed, dict): + yield parsed + except json.JSONDecodeError: + lib_logger.debug(f"OpenAI Codex SSE non-JSON payload ignored: {payload[:200]}") + continue + + event_lines.append(line) + + # Flush trailing event if stream closes without blank line + if event_lines: + data_lines = [entry[5:].lstrip() for entry in event_lines if entry.startswith("data:")] + payload = "\n".join(data_lines).strip() + if payload and payload != "[DONE]": + try: + parsed = json.loads(payload) + if isinstance(parsed, dict): + yield parsed + except json.JSONDecodeError: + pass + + def _stream_to_completion_response( + self, chunks: List[litellm.ModelResponse] + ) -> litellm.ModelResponse: + """Reassemble streamed chunks into a non-streaming ModelResponse.""" + if not chunks: + raise ValueError("No chunks provided for reassembly") + + final_message: Dict[str, Any] = {"role": "assistant"} + aggregated_tool_calls: Dict[int, Dict[str, Any]] = {} + usage_data = None + chunk_finish_reason = None + + first_chunk = chunks[0] + + for chunk in chunks: + if not hasattr(chunk, "choices") or not chunk.choices: + continue + + choice = chunk.choices[0] + delta = choice.get("delta", {}) + + if "content" in delta and delta["content"] is not None: + final_message["content"] = final_message.get("content", "") + delta["content"] + + if "tool_calls" in delta and delta["tool_calls"]: + for tc_chunk in delta["tool_calls"]: + index = tc_chunk.get("index", 0) + if index not in aggregated_tool_calls: + aggregated_tool_calls[index] = { + "type": "function", + "function": {"name": "", "arguments": ""}, + } + + if tc_chunk.get("id"): + aggregated_tool_calls[index]["id"] = tc_chunk["id"] + + if tc_chunk.get("type"): + aggregated_tool_calls[index]["type"] = tc_chunk["type"] + + if isinstance(tc_chunk.get("function"), dict): + fn = tc_chunk["function"] + if fn.get("name") is not None: + aggregated_tool_calls[index]["function"]["name"] += str(fn["name"]) + if fn.get("arguments") is not None: + aggregated_tool_calls[index]["function"]["arguments"] += str( + fn["arguments"] + ) + + if choice.get("finish_reason"): + chunk_finish_reason = choice["finish_reason"] + + for chunk in reversed(chunks): + if hasattr(chunk, "usage") and chunk.usage: + usage_data = chunk.usage + break + + if aggregated_tool_calls: + final_message["tool_calls"] = list(aggregated_tool_calls.values()) + + for field in ["content", "tool_calls", "function_call"]: + if field not in final_message: + final_message[field] = None + + if aggregated_tool_calls: + finish_reason = "tool_calls" + elif chunk_finish_reason: + finish_reason = chunk_finish_reason + else: + finish_reason = "stop" + + final_choice = { + "index": 0, + "message": final_message, + "finish_reason": finish_reason, + } + + final_response_data = { + "id": first_chunk.id, + "object": "chat.completion", + "created": first_chunk.created, + "model": first_chunk.model, + "choices": [final_choice], + "usage": usage_data, + } + + return litellm.ModelResponse(**final_response_data) + + # ========================================================================= + # Main completion flow + # ========================================================================= + + async def acompletion( + self, client: httpx.AsyncClient, **kwargs + ) -> Union[litellm.ModelResponse, AsyncGenerator[litellm.ModelResponse, None]]: + credential_identifier = kwargs.pop("credential_identifier") + transaction_context = kwargs.pop("transaction_context", None) + model = kwargs["model"] + + file_logger = ProviderLogger(transaction_context) + + async def make_request() -> Any: + # Ensure token initialized/refreshed before request + await self.initialize_token(credential_identifier) + creds = await self._load_credentials(credential_identifier) + if self._is_token_expired(creds): + creds = await self._refresh_token(credential_identifier) + + access_token, account_id = self._extract_runtime_auth(creds) + + model_name = model.split("/")[-1] + payload = self._build_codex_payload(model_name=model_name, **kwargs) + + headers = self._build_request_headers( + access_token=access_token, + account_id=account_id, + stream=True, + ) + + url = f"{self._resolve_api_base().rstrip('/')}{RESPONSES_ENDPOINT_PATH}" + file_logger.log_request(payload) + + return client.stream( + "POST", + url, + headers=headers, + json=payload, + timeout=TimeoutConfig.streaming(), + ) + + async def stream_handler( + response_stream: Any, + attempt: int = 1, + ): + try: + async with response_stream as response: + if response.status_code >= 400: + raw_error = await response.aread() + error_text = ( + raw_error.decode("utf-8", "replace") + if isinstance(raw_error, bytes) + else str(raw_error) + ) + + # Try a single forced token refresh on auth failures + if response.status_code in (401, 403) and attempt == 1: + lib_logger.warning( + "OpenAI Codex returned 401/403; forcing refresh and retrying once" + ) + await self._refresh_token(credential_identifier, force=True) + retry_stream = await make_request() + async for chunk in stream_handler(retry_stream, attempt=2): + yield chunk + return + + # Surface typed HTTPStatusError for classify_error() + raise httpx.HTTPStatusError( + f"OpenAI Codex HTTP {response.status_code}: {error_text}", + request=response.request, + response=response, + ) + + translator = CodexSSETranslator(model_id=model) + + async for event in self._iter_sse_events(response): + try: + file_logger.log_response_chunk(json.dumps(event)) + except Exception: + pass + + try: + translated_chunks = translator.process_event(event) + except CodexStreamError as stream_error: + synthetic_response = httpx.Response( + status_code=stream_error.status_code, + request=response.request, + text=stream_error.error_body, + ) + raise httpx.HTTPStatusError( + str(stream_error), + request=response.request, + response=synthetic_response, + ) + + for chunk_dict in translated_chunks: + yield litellm.ModelResponse(**chunk_dict) + + except httpx.HTTPStatusError: + raise + except Exception as e: + file_logger.log_error(f"Error during OpenAI Codex stream processing: {e}") + raise + + async def logging_stream_wrapper(): + chunks: List[litellm.ModelResponse] = [] + try: + async for chunk in stream_handler(await make_request()): + chunks.append(chunk) + yield chunk + finally: + if chunks: + try: + final_response = self._stream_to_completion_response(chunks) + if hasattr(final_response, "model_dump"): + file_logger.log_final_response(final_response.model_dump()) + else: + file_logger.log_final_response(final_response.dict()) + except Exception: + pass + + if kwargs.get("stream"): + return logging_stream_wrapper() + + async def non_stream_wrapper() -> litellm.ModelResponse: + chunks = [chunk async for chunk in logging_stream_wrapper()] + return self._stream_to_completion_response(chunks) + + return await non_stream_wrapper() + + # ========================================================================= + # Provider-specific quota parsing + # ========================================================================= + + @staticmethod + def parse_quota_error( + error: Exception, + error_body: Optional[str] = None, + ) -> Optional[Dict[str, Any]]: + """ + Parse OpenAI Codex quota/rate-limit errors. + + Supports: + - Retry-After header + - error.resets_at (unix seconds) + - error.retry_after / retry_after_seconds fields + - usage_limit / quota / rate_limit style error codes + """ + now_ts = time.time() + + response = None + if isinstance(error, httpx.HTTPStatusError): + response = error.response + + status_code = response.status_code if response is not None else None + headers = response.headers if response is not None else {} + + retry_after: Optional[int] = None + retry_header = headers.get("Retry-After") or headers.get("retry-after") + if retry_header: + try: + retry_after = max(1, int(float(retry_header))) + except ValueError: + retry_after = None + + body_text = error_body + if body_text is None and response is not None: + try: + body_text = response.text + except Exception: + body_text = None + + if not body_text: + if retry_after is not None: + return { + "retry_after": retry_after, + "reason": "RATE_LIMIT", + "reset_timestamp": None, + "quota_reset_timestamp": None, + } + return None + + parsed = None + try: + parsed = json.loads(body_text) + except Exception: + parsed = None + + if not isinstance(parsed, dict): + if retry_after is not None: + return { + "retry_after": retry_after, + "reason": "RATE_LIMIT", + "reset_timestamp": None, + "quota_reset_timestamp": None, + } + return None + + err = parsed.get("error") if isinstance(parsed.get("error"), dict) else {} + + code_raw = str(err.get("code", "") or "") + err_type_raw = str(err.get("type", "") or "") + message_raw = str(err.get("message", "") or "") + + code = code_raw.lower() + err_type = err_type_raw.lower() + + def _looks_like_rate_limit() -> bool: + if status_code == 429: + return True + if code and RATE_LIMIT_CODE_PATTERN.match(code): + return True + if err_type and RATE_LIMIT_TYPE_PATTERN.match(err_type): + return True + if message_raw and RATE_LIMIT_MESSAGE_PATTERN.search(message_raw): + return True + return False + + # Look for codex-specific reset timestamp + reset_ts = err.get("resets_at") + quota_reset_timestamp: Optional[float] = None + reset_timestamp_iso: Optional[str] = None + if isinstance(reset_ts, (int, float)): + quota_reset_timestamp = float(reset_ts) + retry_after_from_reset = int(max(1, quota_reset_timestamp - now_ts)) + retry_after = retry_after or retry_after_from_reset + reset_timestamp_iso = datetime.fromtimestamp( + quota_reset_timestamp, tz=timezone.utc + ).isoformat() + + if retry_after is None: + for key in ("retry_after", "retry_after_seconds", "retryAfter"): + value = err.get(key) + if isinstance(value, (int, float)): + retry_after = max(1, int(value)) + break + if isinstance(value, str): + try: + retry_after = max(1, int(float(value))) + break + except ValueError: + continue + + if retry_after is None and _looks_like_rate_limit(): + retry_after = 60 + + if retry_after is None: + return None + + reason = ( + str(err.get("code") or err.get("type") or "RATE_LIMIT").upper() + ) + + return { + "retry_after": retry_after, + "reason": reason, + "reset_timestamp": reset_timestamp_iso, + "quota_reset_timestamp": quota_reset_timestamp, + } diff --git a/src/rotator_library/utils/__init__.py b/src/rotator_library/utils/__init__.py index a51d1db7..478afaad 100644 --- a/src/rotator_library/utils/__init__.py +++ b/src/rotator_library/utils/__init__.py @@ -20,6 +20,15 @@ safe_read_json, safe_mkdir, ) +from .openai_codex_jwt import ( + AUTH_CLAIM, + ACCOUNT_ID_CLAIM, + decode_jwt_unverified, + extract_account_id_from_payload, + extract_explicit_email_from_payload, + extract_email_from_payload, + extract_expiry_ms_from_payload, +) from .suppress_litellm_warnings import suppress_litellm_serialization_warnings __all__ = [ @@ -37,5 +46,12 @@ "safe_log_write", "safe_read_json", "safe_mkdir", + "AUTH_CLAIM", + "ACCOUNT_ID_CLAIM", + "decode_jwt_unverified", + "extract_account_id_from_payload", + "extract_explicit_email_from_payload", + "extract_email_from_payload", + "extract_expiry_ms_from_payload", "suppress_litellm_serialization_warnings", ] diff --git a/src/rotator_library/utils/openai_codex_jwt.py b/src/rotator_library/utils/openai_codex_jwt.py new file mode 100644 index 00000000..c8dd9012 --- /dev/null +++ b/src/rotator_library/utils/openai_codex_jwt.py @@ -0,0 +1,105 @@ +# SPDX-License-Identifier: LGPL-3.0-only +# Copyright (c) 2026 Mirrowel + +"""Shared JWT parsing helpers for OpenAI Codex OAuth credentials. + +These helpers intentionally decode JWT payloads without signature verification. +They are only used for non-authoritative metadata extraction (account/email/exp), +not for auth decisions. +""" + +import base64 +import json +from typing import Any, Dict, Optional + +AUTH_CLAIM = "https://api.openai.com/auth" +ACCOUNT_ID_CLAIM = "https://api.openai.com/auth.chatgpt_account_id" + + +def decode_jwt_unverified(token: str) -> Optional[Dict[str, Any]]: + """Decode JWT payload without signature verification.""" + if not token or not isinstance(token, str): + return None + + parts = token.split(".") + if len(parts) < 2: + return None + + payload_segment = parts[1] + padding = "=" * (-len(payload_segment) % 4) + + try: + payload_bytes = base64.urlsafe_b64decode(payload_segment + padding) + payload = json.loads(payload_bytes.decode("utf-8")) + return payload if isinstance(payload, dict) else None + except Exception: + return None + + +def extract_account_id_from_payload(payload: Optional[Dict[str, Any]]) -> Optional[str]: + """Extract account ID from known OpenAI Codex JWT claim locations.""" + if not payload: + return None + + # 1) Direct dotted claim format + direct = payload.get(ACCOUNT_ID_CLAIM) + if isinstance(direct, str) and direct.strip(): + return direct.strip() + + # 2) Nested object claim format observed in real tokens + auth_claim = payload.get(AUTH_CLAIM) + if isinstance(auth_claim, dict): + nested = auth_claim.get("chatgpt_account_id") + if isinstance(nested, str) and nested.strip(): + return nested.strip() + + # 3) Fallback organizations[0].id if present + orgs = payload.get("organizations") + if isinstance(orgs, list) and orgs: + first = orgs[0] + if isinstance(first, dict): + org_id = first.get("id") + if isinstance(org_id, str) and org_id.strip(): + return org_id.strip() + + return None + + +def extract_explicit_email_from_payload(payload: Optional[Dict[str, Any]]) -> Optional[str]: + """Extract explicit email claim only (no subject fallback).""" + if not payload: + return None + + email = payload.get("email") + if isinstance(email, str) and email.strip(): + return email.strip() + + return None + + +def extract_email_from_payload(payload: Optional[Dict[str, Any]]) -> Optional[str]: + """Extract email fallback chain: email -> sub.""" + if not payload: + return None + + email = extract_explicit_email_from_payload(payload) + if email: + return email + + sub = payload.get("sub") + if isinstance(sub, str) and sub.strip(): + return sub.strip() + + return None + + +def extract_expiry_ms_from_payload(payload: Optional[Dict[str, Any]]) -> Optional[int]: + """Extract JWT exp claim and convert to milliseconds.""" + if not payload: + return None + + exp = payload.get("exp") + if isinstance(exp, (int, float)): + return int(float(exp) * 1000) + + return None diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..07ec5a39 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,9 @@ +import sys +from pathlib import Path + + +ROOT = Path(__file__).resolve().parents[1] +SRC_DIR = ROOT / "src" + +if str(SRC_DIR) not in sys.path: + sys.path.insert(0, str(SRC_DIR)) diff --git a/tests/fixtures/openai_codex/error_missing_instructions.json b/tests/fixtures/openai_codex/error_missing_instructions.json new file mode 100644 index 00000000..528acb8e --- /dev/null +++ b/tests/fixtures/openai_codex/error_missing_instructions.json @@ -0,0 +1 @@ +{"detail":"Instructions are required"} diff --git a/tests/fixtures/openai_codex/error_stream_required.json b/tests/fixtures/openai_codex/error_stream_required.json new file mode 100644 index 00000000..f90fb371 --- /dev/null +++ b/tests/fixtures/openai_codex/error_stream_required.json @@ -0,0 +1 @@ +{"detail":"Stream must be set to true"} diff --git a/tests/fixtures/openai_codex/error_unsupported_verbosity.json b/tests/fixtures/openai_codex/error_unsupported_verbosity.json new file mode 100644 index 00000000..0ab6622e --- /dev/null +++ b/tests/fixtures/openai_codex/error_unsupported_verbosity.json @@ -0,0 +1,8 @@ +{ + "error": { + "message": "Unsupported value: 'low' is not supported with the 'gpt-5.1-codex' model. Supported values are: 'medium'.", + "type": "invalid_request_error", + "param": "text.verbosity", + "code": "unsupported_value" + } +} diff --git a/tests/fixtures/openai_codex/protocol_notes.md b/tests/fixtures/openai_codex/protocol_notes.md new file mode 100644 index 00000000..2aa6f459 --- /dev/null +++ b/tests/fixtures/openai_codex/protocol_notes.md @@ -0,0 +1,72 @@ +# OpenAI Codex protocol capture (2026-02-12) + +Captured against `https://chatgpt.com/backend-api/codex/responses` using a valid Codex OAuth token from `~/.codex/auth.json`. + +## OAuth + +- Authorization endpoint: `https://auth.openai.com/oauth/authorize` +- Token endpoint: `https://auth.openai.com/oauth/token` +- Authorization code token exchange params: + - `grant_type=authorization_code` + - `client_id=app_EMoamEEZ73f0CkXaXp7hrann` + - `redirect_uri=http://localhost:/auth/callback` + - `code_verifier=` +- Refresh params: + - `grant_type=refresh_token` + - `refresh_token=` + - `client_id=app_EMoamEEZ73f0CkXaXp7hrann` + +## Endpoint + request shape + +- Endpoint: `POST /codex/responses` +- Requires `stream=true` (non-stream returns 400 with `{"detail":"Stream must be set to true"}`) +- Requires non-empty `instructions` (missing instructions returns 400 with `{"detail":"Instructions are required"}`) + +Observed working request body fields: + +- `model` +- `stream` (must be `true`) +- `store` (`false`) +- `instructions` +- `input` (Responses input format) +- `text.verbosity` (for `gpt-5.1-codex`, `low` was rejected; `medium` worked) +- `tool_choice` +- `parallel_tool_calls` + +## Headers + +Observed and/or validated for provider implementation: + +- `Authorization: Bearer ` +- `chatgpt-account-id: ` +- `OpenAI-Beta: responses=experimental` +- `originator: pi` +- `Accept: text/event-stream` +- `Content-Type: application/json` + +## SSE event taxonomy (observed) + +- `response.created` +- `response.in_progress` +- `response.output_item.added` +- `response.output_item.done` +- `response.content_part.added` +- `response.output_text.delta` +- `response.output_text.done` +- `response.content_part.done` +- `response.completed` + +Provider additionally supports planned aliases/events: + +- `response.content_part.delta` +- `response.function_call_arguments.delta` +- `response.function_call_arguments.done` +- `response.incomplete` +- `response.failed` +- `error` + +## Error body fixtures + +- `error_missing_instructions.json` +- `error_stream_required.json` +- `error_unsupported_verbosity.json` diff --git a/tests/fixtures/openai_codex/response_completed_event.json b/tests/fixtures/openai_codex/response_completed_event.json new file mode 100644 index 00000000..22f83f6a --- /dev/null +++ b/tests/fixtures/openai_codex/response_completed_event.json @@ -0,0 +1,77 @@ +{ + "type": "response.completed", + "response": { + "id": "id_redacted_10", + "object": "response", + "created_at": 1770926997, + "status": "completed", + "background": false, + "completed_at": 1770926998, + "error": null, + "frequency_penalty": 0.0, + "incomplete_details": null, + "instructions": "You are a concise assistant.", + "max_output_tokens": null, + "max_tool_calls": null, + "model": "gpt-5.1-codex", + "output": [ + { + "id": "id_redacted_11", + "type": "reasoning", + "summary": [] + }, + { + "id": "id_redacted_12", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "pong" + } + ], + "role": "assistant" + } + ], + "parallel_tool_calls": true, + "presence_penalty": 0.0, + "previous_response_id": null, + "prompt_cache_key": "prompt_cache_key_redacted_4", + "prompt_cache_retention": null, + "reasoning": { + "effort": "medium", + "summary": null + }, + "safety_identifier": "safety_identifier_redacted_4", + "service_tier": "default", + "store": false, + "temperature": 1.0, + "text": { + "format": { + "type": "text" + }, + "verbosity": "medium" + }, + "tool_choice": "auto", + "tools": [], + "top_logprobs": 0, + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 21, + "input_tokens_details": { + "cached_tokens": 0 + }, + "output_tokens": 13, + "output_tokens_details": { + "reasoning_tokens": 0 + }, + "total_tokens": 34 + }, + "user": null, + "metadata": {} + }, + "sequence_number": 10 +} \ No newline at end of file diff --git a/tests/fixtures/openai_codex/stream_content_part_delta_events.json b/tests/fixtures/openai_codex/stream_content_part_delta_events.json new file mode 100644 index 00000000..e90034cc --- /dev/null +++ b/tests/fixtures/openai_codex/stream_content_part_delta_events.json @@ -0,0 +1,44 @@ +[ + { + "type": "response.created", + "response": { + "id": "resp_delta_1", + "created_at": 1770927001, + "status": "in_progress" + } + }, + { + "type": "response.output_item.added", + "item": { + "id": "msg_1", + "type": "message", + "status": "in_progress", + "role": "assistant" + } + }, + { + "type": "response.content_part.delta", + "item_id": "msg_1", + "delta": "Hello" + }, + { + "type": "response.content_part.delta", + "item_id": "msg_1", + "delta": " world" + }, + { + "type": "response.incomplete", + "response": { + "id": "resp_delta_1", + "status": "incomplete", + "incomplete_details": { + "reason": "max_output_tokens" + }, + "usage": { + "input_tokens": 10, + "output_tokens": 20, + "total_tokens": 30 + } + } + } +] diff --git a/tests/fixtures/openai_codex/stream_success_events.json b/tests/fixtures/openai_codex/stream_success_events.json new file mode 100644 index 00000000..c0028c2b --- /dev/null +++ b/tests/fixtures/openai_codex/stream_success_events.json @@ -0,0 +1,269 @@ +[ + { + "type": "response.created", + "response": { + "id": "id_redacted_1", + "object": "response", + "created_at": 1770926997, + "status": "in_progress", + "background": false, + "completed_at": null, + "error": null, + "frequency_penalty": 0.0, + "incomplete_details": null, + "instructions": "You are a concise assistant.", + "max_output_tokens": null, + "max_tool_calls": null, + "model": "gpt-5.1-codex", + "output": [], + "parallel_tool_calls": true, + "presence_penalty": 0.0, + "previous_response_id": null, + "prompt_cache_key": "prompt_cache_key_redacted_1", + "prompt_cache_retention": null, + "reasoning": { + "effort": "medium", + "summary": null + }, + "safety_identifier": "safety_identifier_redacted_1", + "service_tier": "auto", + "store": false, + "temperature": 1.0, + "text": { + "format": { + "type": "text" + }, + "verbosity": "medium" + }, + "tool_choice": "auto", + "tools": [], + "top_logprobs": 0, + "top_p": 1.0, + "truncation": "disabled", + "usage": null, + "user": null, + "metadata": {} + }, + "sequence_number": 0 + }, + { + "type": "response.in_progress", + "response": { + "id": "id_redacted_2", + "object": "response", + "created_at": 1770926997, + "status": "in_progress", + "background": false, + "completed_at": null, + "error": null, + "frequency_penalty": 0.0, + "incomplete_details": null, + "instructions": "You are a concise assistant.", + "max_output_tokens": null, + "max_tool_calls": null, + "model": "gpt-5.1-codex", + "output": [], + "parallel_tool_calls": true, + "presence_penalty": 0.0, + "previous_response_id": null, + "prompt_cache_key": "prompt_cache_key_redacted_2", + "prompt_cache_retention": null, + "reasoning": { + "effort": "medium", + "summary": null + }, + "safety_identifier": "safety_identifier_redacted_2", + "service_tier": "auto", + "store": false, + "temperature": 1.0, + "text": { + "format": { + "type": "text" + }, + "verbosity": "medium" + }, + "tool_choice": "auto", + "tools": [], + "top_logprobs": 0, + "top_p": 1.0, + "truncation": "disabled", + "usage": null, + "user": null, + "metadata": {} + }, + "sequence_number": 1 + }, + { + "type": "response.output_item.added", + "item": { + "id": "id_redacted_3", + "type": "reasoning", + "summary": [] + }, + "output_index": 0, + "sequence_number": 2 + }, + { + "type": "response.output_item.done", + "item": { + "id": "id_redacted_4", + "type": "reasoning", + "summary": [] + }, + "output_index": 0, + "sequence_number": 3 + }, + { + "type": "response.output_item.added", + "item": { + "id": "id_redacted_5", + "type": "message", + "status": "in_progress", + "content": [], + "role": "assistant" + }, + "output_index": 1, + "sequence_number": 4 + }, + { + "type": "response.content_part.added", + "content_index": 0, + "item_id": "item_id_redacted_1", + "output_index": 1, + "part": { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "" + }, + "sequence_number": 5 + }, + { + "type": "response.output_text.delta", + "content_index": 0, + "delta": "pong", + "item_id": "item_id_redacted_2", + "logprobs": [], + "obfuscation": "obfuscation_redacted_1", + "output_index": 1, + "sequence_number": 6 + }, + { + "type": "response.output_text.done", + "content_index": 0, + "item_id": "item_id_redacted_3", + "logprobs": [], + "output_index": 1, + "sequence_number": 7, + "text": "pong" + }, + { + "type": "response.content_part.done", + "content_index": 0, + "item_id": "item_id_redacted_4", + "output_index": 1, + "part": { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "pong" + }, + "sequence_number": 8 + }, + { + "type": "response.output_item.done", + "item": { + "id": "id_redacted_6", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "pong" + } + ], + "role": "assistant" + }, + "output_index": 1, + "sequence_number": 9 + }, + { + "type": "response.completed", + "response": { + "id": "id_redacted_7", + "object": "response", + "created_at": 1770926997, + "status": "completed", + "background": false, + "completed_at": 1770926998, + "error": null, + "frequency_penalty": 0.0, + "incomplete_details": null, + "instructions": "You are a concise assistant.", + "max_output_tokens": null, + "max_tool_calls": null, + "model": "gpt-5.1-codex", + "output": [ + { + "id": "id_redacted_8", + "type": "reasoning", + "summary": [] + }, + { + "id": "id_redacted_9", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "pong" + } + ], + "role": "assistant" + } + ], + "parallel_tool_calls": true, + "presence_penalty": 0.0, + "previous_response_id": null, + "prompt_cache_key": "prompt_cache_key_redacted_3", + "prompt_cache_retention": null, + "reasoning": { + "effort": "medium", + "summary": null + }, + "safety_identifier": "safety_identifier_redacted_3", + "service_tier": "default", + "store": false, + "temperature": 1.0, + "text": { + "format": { + "type": "text" + }, + "verbosity": "medium" + }, + "tool_choice": "auto", + "tools": [], + "top_logprobs": 0, + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 21, + "input_tokens_details": { + "cached_tokens": 0 + }, + "output_tokens": 13, + "output_tokens_details": { + "reasoning_tokens": 0 + }, + "total_tokens": 34 + }, + "user": null, + "metadata": {} + }, + "sequence_number": 10 + } +] \ No newline at end of file diff --git a/tests/fixtures/openai_codex/stream_tool_call_events.json b/tests/fixtures/openai_codex/stream_tool_call_events.json new file mode 100644 index 00000000..e2aca1f5 --- /dev/null +++ b/tests/fixtures/openai_codex/stream_tool_call_events.json @@ -0,0 +1,50 @@ +[ + { + "type": "response.created", + "response": { + "id": "resp_tool_1", + "created_at": 1770927000, + "status": "in_progress" + } + }, + { + "type": "response.output_item.added", + "item": { + "id": "call_item_1", + "type": "function_call", + "call_id": "call_1", + "name": "get_weather", + "arguments": "" + } + }, + { + "type": "response.function_call_arguments.delta", + "call_id": "call_1", + "delta": "{\"city\":\"San" + }, + { + "type": "response.function_call_arguments.delta", + "call_id": "call_1", + "delta": " Francisco\"}" + }, + { + "type": "response.function_call_arguments.done", + "call_id": "call_1", + "arguments": "{\"city\":\"San Francisco\"}" + }, + { + "type": "response.completed", + "response": { + "id": "resp_tool_1", + "status": "incomplete", + "incomplete_details": { + "reason": "tool_calls" + }, + "usage": { + "input_tokens": 50, + "output_tokens": 10, + "total_tokens": 60 + } + } + } +] diff --git a/tests/test_openai_codex_auth.py b/tests/test_openai_codex_auth.py new file mode 100644 index 00000000..073265f2 --- /dev/null +++ b/tests/test_openai_codex_auth.py @@ -0,0 +1,461 @@ +import asyncio +import base64 +import json +import time +from pathlib import Path + +import httpx +import pytest +import respx + +from rotator_library.error_handler import CredentialNeedsReauthError +from rotator_library.providers.openai_codex_auth_base import ( + CALLBACK_PATH, + LEGACY_CALLBACK_PATH, + TOKEN_ENDPOINT, + OpenAICodexAuthBase, +) + + +def _build_jwt(payload: dict) -> str: + header = {"alg": "HS256", "typ": "JWT"} + + def b64url(data: dict) -> str: + raw = json.dumps(data, separators=(",", ":")).encode("utf-8") + return base64.urlsafe_b64encode(raw).decode("utf-8").rstrip("=") + + return f"{b64url(header)}.{b64url(payload)}.signature" + + +def test_callback_paths_match_codex_oauth_client_registration(): + assert CALLBACK_PATH == "/auth/callback" + assert LEGACY_CALLBACK_PATH == "/oauth2callback" + + +def test_decode_jwt_helper_valid_token(): + auth = OpenAICodexAuthBase() + payload = { + "sub": "user-123", + "email": "user@example.com", + "exp": int(time.time()) + 3600, + "https://api.openai.com/auth": {"chatgpt_account_id": "acct_123"}, + } + token = _build_jwt(payload) + + decoded = auth._decode_jwt_unverified(token) + assert decoded is not None + assert decoded["sub"] == "user-123" + + +def test_decode_jwt_helper_malformed_token(): + auth = OpenAICodexAuthBase() + + assert auth._decode_jwt_unverified("not-a-jwt") is None + assert auth._decode_jwt_unverified("a.b") is None + + +def test_decode_jwt_helper_missing_claims_fallbacks(): + auth = OpenAICodexAuthBase() + + payload = {"sub": "fallback-sub", "exp": int(time.time()) + 300} + token = _build_jwt(payload) + + decoded = auth._decode_jwt_unverified(token) + email = auth._extract_email_from_payload(decoded) + account_id = auth._extract_account_id_from_payload(decoded) + + assert email == "fallback-sub" # email -> sub fallback chain + assert account_id is None + + +def test_ensure_proxy_metadata_prefers_id_token_explicit_email(): + auth = OpenAICodexAuthBase() + + access_payload = { + "sub": "workspace-sub-shared", + "exp": int(time.time()) + 3600, + "https://api.openai.com/auth": {"chatgpt_account_id": "acct_workspace"}, + } + id_payload = { + "email": "real-user@example.com", + "sub": "user-sub-123", + "exp": int(time.time()) + 3600, + "https://api.openai.com/auth": {"chatgpt_account_id": "acct_workspace"}, + } + + creds = { + "access_token": _build_jwt(access_payload), + "id_token": _build_jwt(id_payload), + "refresh_token": "rt_test", + } + + auth._ensure_proxy_metadata(creds) + + assert creds["_proxy_metadata"]["email"] == "real-user@example.com" + assert creds["_proxy_metadata"]["account_id"] == "acct_workspace" + + +def test_expiry_logic_with_proactive_buffer_and_true_expiry(): + auth = OpenAICodexAuthBase() + + now_ms = int(time.time() * 1000) + + # still valid (outside proactive buffer) + fresh = {"expiry_date": now_ms + 20 * 60 * 1000} + assert auth._is_token_expired(fresh) is False + assert auth._is_token_truly_expired(fresh) is False + + # proactive refresh window (expired for refresh, still truly valid) + near_expiry = {"expiry_date": now_ms + 60 * 1000} + assert auth._is_token_expired(near_expiry) is True + assert auth._is_token_truly_expired(near_expiry) is False + + # truly expired + expired = {"expiry_date": now_ms - 60 * 1000} + assert auth._is_token_expired(expired) is True + assert auth._is_token_truly_expired(expired) is True + + +@pytest.mark.asyncio +async def test_env_loading_legacy_and_numbered(monkeypatch): + auth = OpenAICodexAuthBase() + + payload = { + "sub": "env-user", + "exp": int(time.time()) + 3600, + "https://api.openai.com/auth": {"chatgpt_account_id": "acct_env"}, + } + access = _build_jwt(payload) + refresh = "rt_env" + + monkeypatch.setenv("OPENAI_CODEX_ACCESS_TOKEN", access) + monkeypatch.setenv("OPENAI_CODEX_REFRESH_TOKEN", refresh) + + # legacy load + legacy = auth._load_from_env("0") + assert legacy is not None + assert legacy["access_token"] == access + assert legacy["_proxy_metadata"]["loaded_from_env"] is True + assert legacy["_proxy_metadata"]["account_id"] == "acct_env" + + # numbered load via env:// path + payload_n = { + "email": "numbered@example.com", + "exp": int(time.time()) + 3600, + "https://api.openai.com/auth": {"chatgpt_account_id": "acct_num"}, + } + access_n = _build_jwt(payload_n) + monkeypatch.setenv("OPENAI_CODEX_1_ACCESS_TOKEN", access_n) + monkeypatch.setenv("OPENAI_CODEX_1_REFRESH_TOKEN", "rt_num") + + creds = await auth._load_credentials("env://openai_codex/1") + assert creds["access_token"] == access_n + assert creds["_proxy_metadata"]["env_credential_index"] == "1" + assert creds["_proxy_metadata"]["account_id"] == "acct_num" + + +@pytest.mark.asyncio +async def test_save_load_round_trip_with_proxy_metadata(tmp_path: Path): + auth = OpenAICodexAuthBase() + cred_path = tmp_path / "openai_codex_oauth_1.json" + + payload = { + "email": "roundtrip@example.com", + "exp": int(time.time()) + 3600, + "https://api.openai.com/auth": {"chatgpt_account_id": "acct_roundtrip"}, + } + access = _build_jwt(payload) + + creds = { + "access_token": access, + "refresh_token": "rt_roundtrip", + "id_token": _build_jwt(payload), + "expiry_date": int((time.time() + 3600) * 1000), + "token_uri": "https://auth.openai.com/oauth/token", + "_proxy_metadata": { + "email": "roundtrip@example.com", + "account_id": "acct_roundtrip", + "last_check_timestamp": time.time(), + "loaded_from_env": False, + "env_credential_index": None, + }, + } + + assert await auth._save_credentials(str(cred_path), creds) is True + + # clear cache to verify disk round-trip + auth._credentials_cache.clear() + loaded = await auth._load_credentials(str(cred_path)) + + assert loaded["refresh_token"] == "rt_roundtrip" + assert loaded["_proxy_metadata"]["email"] == "roundtrip@example.com" + assert loaded["_proxy_metadata"]["account_id"] == "acct_roundtrip" + + +@pytest.mark.asyncio +async def test_is_credential_available_reauth_queue_and_ttl_cleanup(): + auth = OpenAICodexAuthBase() + path = "/tmp/openai_codex_oauth_1.json" + + # credential in active re-auth queue => unavailable + auth._unavailable_credentials[path] = time.time() + assert auth.is_credential_available(path) is False + + # stale unavailable entry should auto-clean and become available + auth._unavailable_credentials[path] = time.time() - 999 + auth._queued_credentials.add(path) + assert auth.is_credential_available(path) is True + assert path not in auth._unavailable_credentials + + # truly expired credential should be unavailable + auth._credentials_cache[path] = { + "expiry_date": int((time.time() - 10) * 1000), + "_proxy_metadata": {"loaded_from_env": False}, + } + assert auth.is_credential_available(path) is False + + # let background queue task schedule to avoid un-awaited coroutine warnings + await asyncio.sleep(0) + + +def test_find_existing_credential_identity_allows_same_email_different_account(tmp_path: Path): + auth = OpenAICodexAuthBase() + + existing = tmp_path / "openai_codex_oauth_1.json" + existing.write_text( + json.dumps( + { + "_proxy_metadata": { + "email": "shared@example.com", + "account_id": "acct_original", + } + } + ) + ) + + # Different account_id with same email should NOT be treated as an update target. + match = auth._find_existing_credential_by_identity( + email="shared@example.com", + account_id="acct_new", + base_dir=tmp_path, + ) + assert match is None + + # Exact account_id + email should still match. + match_same_identity = auth._find_existing_credential_by_identity( + email="shared@example.com", + account_id="acct_original", + base_dir=tmp_path, + ) + assert match_same_identity == existing + + # Email fallback should work when account_id is unknown. + match_email_fallback = auth._find_existing_credential_by_identity( + email="shared@example.com", + account_id=None, + base_dir=tmp_path, + ) + assert match_email_fallback == existing + + +def test_find_existing_credential_identity_allows_same_account_different_email(tmp_path: Path): + auth = OpenAICodexAuthBase() + + existing = tmp_path / "openai_codex_oauth_1.json" + existing.write_text( + json.dumps( + { + "_proxy_metadata": { + "email": "first@example.com", + "account_id": "acct_workspace", + } + } + ) + ) + + # Same account_id but different email should not auto-update when both + # identifiers are available (prevents workspace-level collisions). + match = auth._find_existing_credential_by_identity( + email="second@example.com", + account_id="acct_workspace", + base_dir=tmp_path, + ) + assert match is None + + +@pytest.mark.asyncio +async def test_setup_credential_creates_new_file_for_same_email_new_account(tmp_path: Path): + auth = OpenAICodexAuthBase() + + existing = tmp_path / "openai_codex_oauth_1.json" + existing.write_text( + json.dumps( + { + "access_token": "old_access", + "refresh_token": "old_refresh", + "expiry_date": int((time.time() + 3600) * 1000), + "token_uri": "https://auth.openai.com/oauth/token", + "_proxy_metadata": { + "email": "shared@example.com", + "account_id": "acct_original", + "loaded_from_env": False, + "env_credential_index": None, + }, + } + ) + ) + + async def fake_initialize_token(_creds): + return { + "access_token": "new_access", + "refresh_token": "new_refresh", + "id_token": "new_id", + "expiry_date": int((time.time() + 3600) * 1000), + "token_uri": "https://auth.openai.com/oauth/token", + "_proxy_metadata": { + "email": "shared@example.com", + "account_id": "acct_new", + "loaded_from_env": False, + "env_credential_index": None, + }, + } + + auth.initialize_token = fake_initialize_token + + result = await auth.setup_credential(base_dir=tmp_path) + + assert result.success is True + assert result.is_update is False + assert result.file_path is not None + assert result.file_path.endswith("openai_codex_oauth_2.json") + + files = sorted(p.name for p in tmp_path.glob("openai_codex_oauth_*.json")) + assert files == ["openai_codex_oauth_1.json", "openai_codex_oauth_2.json"] + + +@pytest.mark.asyncio +async def test_setup_credential_creates_new_file_for_same_account_new_email(tmp_path: Path): + auth = OpenAICodexAuthBase() + + existing = tmp_path / "openai_codex_oauth_1.json" + existing.write_text( + json.dumps( + { + "access_token": "old_access", + "refresh_token": "old_refresh", + "expiry_date": int((time.time() + 3600) * 1000), + "token_uri": "https://auth.openai.com/oauth/token", + "_proxy_metadata": { + "email": "first@example.com", + "account_id": "acct_workspace", + "loaded_from_env": False, + "env_credential_index": None, + }, + } + ) + ) + + async def fake_initialize_token(_creds): + return { + "access_token": "new_access", + "refresh_token": "new_refresh", + "id_token": "new_id", + "expiry_date": int((time.time() + 3600) * 1000), + "token_uri": "https://auth.openai.com/oauth/token", + "_proxy_metadata": { + "email": "second@example.com", + "account_id": "acct_workspace", + "loaded_from_env": False, + "env_credential_index": None, + }, + } + + auth.initialize_token = fake_initialize_token + + result = await auth.setup_credential(base_dir=tmp_path) + + assert result.success is True + assert result.is_update is False + assert result.file_path is not None + assert result.file_path.endswith("openai_codex_oauth_2.json") + + files = sorted(p.name for p in tmp_path.glob("openai_codex_oauth_*.json")) + assert files == ["openai_codex_oauth_1.json", "openai_codex_oauth_2.json"] + + +@pytest.mark.asyncio +async def test_queue_refresh_deduplicates_under_concurrency(monkeypatch): + auth = OpenAICodexAuthBase() + path = "/tmp/openai_codex_oauth_1.json" + + async def no_op_queue_processor_start(): + return None + + monkeypatch.setattr(auth, "_ensure_queue_processor_running", no_op_queue_processor_start) + + await asyncio.gather( + *[ + auth._queue_refresh(path, force=False, needs_reauth=False) + for _ in range(25) + ] + ) + + assert auth._refresh_queue.qsize() == 1 + + queued_path, queued_force = await auth._refresh_queue.get() + assert queued_path == path + assert queued_force is False + auth._refresh_queue.task_done() + + +@pytest.mark.asyncio +async def test_refresh_invalid_grant_queues_reauth_sync(tmp_path: Path, monkeypatch): + auth = OpenAICodexAuthBase() + cred_path = tmp_path / "openai_codex_oauth_1.json" + + payload = { + "sub": "refresh-user", + "exp": int(time.time()) + 3600, + "https://api.openai.com/auth": {"chatgpt_account_id": "acct_refresh"}, + } + + cred_path.write_text( + json.dumps( + { + "access_token": _build_jwt(payload), + "refresh_token": "rt_refresh", + "id_token": _build_jwt(payload), + "expiry_date": int((time.time() - 60) * 1000), + "token_uri": "https://auth.openai.com/oauth/token", + "_proxy_metadata": { + "email": "refresh@example.com", + "account_id": "acct_refresh", + "loaded_from_env": False, + "env_credential_index": None, + }, + } + ) + ) + + queued: list[tuple[str, bool, bool]] = [] + + async def capture_queue_refresh(path_arg: str, force: bool = False, needs_reauth: bool = False): + queued.append((path_arg, force, needs_reauth)) + + monkeypatch.setattr(auth, "_queue_refresh", capture_queue_refresh) + + with respx.mock(assert_all_called=True) as mock_router: + mock_router.post(TOKEN_ENDPOINT).mock( + return_value=httpx.Response( + status_code=400, + json={ + "error": "invalid_grant", + "error_description": "refresh token revoked", + }, + ) + ) + + with pytest.raises(CredentialNeedsReauthError): + await auth._refresh_token(str(cred_path), force=True) + + assert queued == [(str(cred_path), True, True)] diff --git a/tests/test_openai_codex_import.py b/tests/test_openai_codex_import.py new file mode 100644 index 00000000..a94c8f5a --- /dev/null +++ b/tests/test_openai_codex_import.py @@ -0,0 +1,217 @@ +import json +import os +import time +from pathlib import Path + +from rotator_library.credential_manager import CredentialManager + + +def _build_jwt(payload: dict) -> str: + import base64 + + header = {"alg": "HS256", "typ": "JWT"} + + def b64url(data: dict) -> str: + raw = json.dumps(data, separators=(",", ":")).encode("utf-8") + return base64.urlsafe_b64encode(raw).decode("utf-8").rstrip("=") + + return f"{b64url(header)}.{b64url(payload)}.sig" + + +def _write_codex_auth_json(path: Path): + payload = { + "email": "single@example.com", + "exp": int(time.time()) + 3600, + "https://api.openai.com/auth": {"chatgpt_account_id": "acct_single"}, + } + data = { + "auth_mode": "oauth", + "OPENAI_API_KEY": None, + "tokens": { + "id_token": _build_jwt(payload), + "access_token": _build_jwt(payload), + "refresh_token": "rt_single", + "account_id": "acct_single", + }, + "last_refresh": "2026-02-12T00:00:00Z", + } + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(data, indent=2)) + + +def _write_codex_accounts_json(path: Path): + payload_a = { + "email": "multi-a@example.com", + "exp": int(time.time()) + 3600, + "https://api.openai.com/auth": {"chatgpt_account_id": "acct_a"}, + } + payload_b = { + "email": "multi-b@example.com", + "exp": int(time.time()) + 7200, + "https://api.openai.com/auth": {"chatgpt_account_id": "acct_b"}, + } + + data = { + "schemaVersion": 1, + "activeLabel": "A", + "accounts": [ + { + "label": "A", + "accountId": "acct_a", + "access": _build_jwt(payload_a), + "refresh": "rt_a", + "idToken": _build_jwt(payload_a), + "expires": int((time.time() + 3600) * 1000), + }, + { + "label": "B", + "accountId": "acct_b", + "access": _build_jwt(payload_b), + "refresh": "rt_b", + "idToken": _build_jwt(payload_b), + "expires": int((time.time() + 7200) * 1000), + }, + ], + } + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(data, indent=2)) + + +def test_import_from_codex_auth_and_accounts_formats(tmp_path: Path): + oauth_dir = tmp_path / "oauth_creds" + manager = CredentialManager(env_vars={}, oauth_dir=oauth_dir) + + auth_json = tmp_path / ".codex" / "auth.json" + accounts_json = tmp_path / ".codex-accounts.json" + _write_codex_auth_json(auth_json) + _write_codex_accounts_json(accounts_json) + + imported = manager._import_openai_codex_cli_credentials( + auth_json_path=auth_json, + accounts_json_path=accounts_json, + ) + + # one from auth.json + two from accounts.json + assert len(imported) == 3 + + imported_files = sorted(oauth_dir.glob("openai_codex_oauth_*.json")) + assert len(imported_files) == 3 + + payload = json.loads(imported_files[0].read_text()) + assert payload["refresh_token"].startswith("rt_") + assert "_proxy_metadata" in payload + assert payload["_proxy_metadata"].get("account_id") + + +def test_explicit_openai_codex_oauth_path_auth_json_is_normalized(tmp_path: Path): + oauth_dir = tmp_path / "oauth_creds" + + auth_json = tmp_path / ".codex" / "auth.json" + _write_codex_auth_json(auth_json) + + manager = CredentialManager( + env_vars={"OPENAI_CODEX_OAUTH_1": str(auth_json)}, + oauth_dir=oauth_dir, + ) + discovered = manager.discover_and_prepare() + + assert "openai_codex" in discovered + assert len(discovered["openai_codex"]) == 1 + + imported_file = oauth_dir / "openai_codex_oauth_1.json" + payload = json.loads(imported_file.read_text()) + + # normalized proxy schema at root level (not nested under "tokens") + assert "tokens" not in payload + assert isinstance(payload.get("access_token"), str) + assert isinstance(payload.get("refresh_token"), str) + assert payload.get("token_uri") == "https://auth.openai.com/oauth/token" + assert "_proxy_metadata" in payload + + +def test_skip_import_when_env_openai_codex_credentials_exist(tmp_path: Path): + oauth_dir = tmp_path / "oauth_creds" + manager = CredentialManager( + env_vars={ + "OPENAI_CODEX_ACCESS_TOKEN": "env_access", + "OPENAI_CODEX_REFRESH_TOKEN": "env_refresh", + }, + oauth_dir=oauth_dir, + ) + + discovered = manager.discover_and_prepare() + + assert discovered["openai_codex"] == ["env://openai_codex/0"] + assert list(oauth_dir.glob("openai_codex_oauth_*.json")) == [] + + +def test_skip_import_when_local_openai_codex_credentials_exist(tmp_path: Path): + oauth_dir = tmp_path / "oauth_creds" + oauth_dir.mkdir(parents=True, exist_ok=True) + + existing = oauth_dir / "openai_codex_oauth_1.json" + existing.write_text( + json.dumps( + { + "access_token": "existing", + "refresh_token": "existing_rt", + "expiry_date": int((time.time() + 3600) * 1000), + "token_uri": "https://auth.openai.com/oauth/token", + "_proxy_metadata": { + "email": "existing@example.com", + "account_id": "acct_existing", + "last_check_timestamp": time.time(), + "loaded_from_env": False, + "env_credential_index": None, + }, + }, + indent=2, + ) + ) + + manager = CredentialManager(env_vars={}, oauth_dir=oauth_dir) + discovered = manager.discover_and_prepare() + + assert "openai_codex" in discovered + assert discovered["openai_codex"] == [str(existing.resolve())] + + +def test_malformed_codex_source_files_are_handled_gracefully(tmp_path: Path): + oauth_dir = tmp_path / "oauth_creds" + manager = CredentialManager(env_vars={}, oauth_dir=oauth_dir) + + auth_json = tmp_path / ".codex" / "auth.json" + accounts_json = tmp_path / ".codex-accounts.json" + auth_json.parent.mkdir(parents=True, exist_ok=True) + + auth_json.write_text("{not valid json") + accounts_json.write_text(json.dumps({"schemaVersion": 1, "accounts": ["bad-entry"]})) + + imported = manager._import_openai_codex_cli_credentials( + auth_json_path=auth_json, + accounts_json_path=accounts_json, + ) + + assert imported == [] + assert list(oauth_dir.glob("openai_codex_oauth_*.json")) == [] + + +def test_codex_source_files_never_modified_during_import(tmp_path: Path): + oauth_dir = tmp_path / "oauth_creds" + manager = CredentialManager(env_vars={}, oauth_dir=oauth_dir) + + auth_json = tmp_path / ".codex" / "auth.json" + accounts_json = tmp_path / ".codex-accounts.json" + _write_codex_auth_json(auth_json) + _write_codex_accounts_json(accounts_json) + + auth_before = auth_json.read_text() + accounts_before = accounts_json.read_text() + + manager._import_openai_codex_cli_credentials( + auth_json_path=auth_json, + accounts_json_path=accounts_json, + ) + + assert auth_json.read_text() == auth_before + assert accounts_json.read_text() == accounts_before diff --git a/tests/test_openai_codex_provider.py b/tests/test_openai_codex_provider.py new file mode 100644 index 00000000..82d5e604 --- /dev/null +++ b/tests/test_openai_codex_provider.py @@ -0,0 +1,284 @@ +import base64 +import json +import time +from pathlib import Path + +import httpx +import pytest +import respx + +from rotator_library.providers.openai_codex_provider import OpenAICodexProvider + + +def _build_jwt(payload: dict) -> str: + header = {"alg": "HS256", "typ": "JWT"} + + def b64url(data: dict) -> str: + raw = json.dumps(data, separators=(",", ":")).encode("utf-8") + return base64.urlsafe_b64encode(raw).decode("utf-8").rstrip("=") + + return f"{b64url(header)}.{b64url(payload)}.sig" + + +def _build_sse_payload(text: str = "pong") -> bytes: + events = [ + { + "type": "response.created", + "response": {"id": "resp_1", "created_at": int(time.time()), "status": "in_progress"}, + }, + { + "type": "response.output_item.added", + "item": { + "id": "msg_1", + "type": "message", + "status": "in_progress", + "content": [], + "role": "assistant", + }, + }, + { + "type": "response.content_part.added", + "item_id": "msg_1", + "part": {"type": "output_text", "text": ""}, + }, + { + "type": "response.output_text.delta", + "item_id": "msg_1", + "delta": text, + }, + { + "type": "response.completed", + "response": { + "id": "resp_1", + "status": "completed", + "usage": { + "input_tokens": 5, + "output_tokens": 3, + "total_tokens": 8, + }, + }, + }, + ] + + sse = "\n\n".join(f"data: {json.dumps(evt)}" for evt in events) + "\n\n" + return sse.encode("utf-8") + + +@pytest.fixture +def provider() -> OpenAICodexProvider: + return OpenAICodexProvider() + + +@pytest.fixture +def credential_file(tmp_path: Path) -> Path: + payload = { + "email": "provider@example.com", + "exp": int(time.time()) + 3600, + "https://api.openai.com/auth": {"chatgpt_account_id": "acct_provider"}, + } + + cred_path = tmp_path / "openai_codex_oauth_1.json" + cred_path.write_text( + json.dumps( + { + "access_token": _build_jwt(payload), + "refresh_token": "rt_provider", + "id_token": _build_jwt(payload), + "expiry_date": int((time.time() + 3600) * 1000), + "token_uri": "https://auth.openai.com/oauth/token", + "_proxy_metadata": { + "email": "provider@example.com", + "account_id": "acct_provider", + "last_check_timestamp": time.time(), + "loaded_from_env": False, + "env_credential_index": None, + }, + }, + indent=2, + ) + ) + return cred_path + + +def test_chat_request_mapping_to_codex_payload(provider: OpenAICodexProvider): + payload = provider._build_codex_payload( + model_name="gpt-5.1-codex", + messages=[ + {"role": "system", "content": "System guidance"}, + {"role": "user", "content": "hello"}, + ], + temperature=0.2, + top_p=0.9, + max_tokens=123, + tools=[ + { + "type": "function", + "function": { + "name": "lookup", + "description": "Lookup data", + "parameters": {"type": "object", "properties": {"q": {"type": "string"}}}, + }, + } + ], + tool_choice="auto", + ) + + assert payload["model"] == "gpt-5.1-codex" + assert payload["stream"] is True + assert payload["store"] is False + assert payload["instructions"] == "System guidance" + assert payload["input"][0]["role"] == "user" + assert payload["temperature"] == 0.2 + assert payload["top_p"] == 0.9 + assert "max_output_tokens" not in payload + assert payload["tool_choice"] == "auto" + assert payload["tools"][0]["name"] == "lookup" + + +@pytest.mark.asyncio +async def test_non_stream_response_mapping_and_header_construction( + provider: OpenAICodexProvider, + credential_file: Path, +): + endpoint = "https://chatgpt.com/backend-api/codex/responses" + + with respx.mock(assert_all_called=True) as mock_router: + route = mock_router.post(endpoint) + + def responder(request: httpx.Request) -> httpx.Response: + assert request.headers.get("authorization", "").startswith("Bearer ") + assert request.headers.get("chatgpt-account-id") == "acct_provider" + assert request.headers.get("openai-beta") == "responses=experimental" + assert request.headers.get("originator") == "pi" + + body = json.loads(request.content.decode("utf-8")) + assert body["stream"] is True + assert "instructions" in body + assert "input" in body + + return httpx.Response( + status_code=200, + content=_build_sse_payload("pong"), + headers={"content-type": "text/event-stream"}, + ) + + route.mock(side_effect=responder) + + async with httpx.AsyncClient() as client: + response = await provider.acompletion( + client, + model="openai_codex/gpt-5.1-codex", + messages=[{"role": "user", "content": "say pong"}], + stream=False, + credential_identifier=str(credential_file), + ) + + assert response.choices[0]["message"]["content"] == "pong" + assert response.usage["prompt_tokens"] == 5 + assert response.usage["completion_tokens"] == 3 + + +@pytest.mark.asyncio +async def test_env_credential_identifier_supported(monkeypatch): + provider = OpenAICodexProvider() + + payload = { + "email": "env-provider@example.com", + "exp": int(time.time()) + 3600, + "https://api.openai.com/auth": {"chatgpt_account_id": "acct_env_provider"}, + } + + monkeypatch.setenv("OPENAI_CODEX_1_ACCESS_TOKEN", _build_jwt(payload)) + monkeypatch.setenv("OPENAI_CODEX_1_REFRESH_TOKEN", "rt_env_provider") + + endpoint = "https://chatgpt.com/backend-api/codex/responses" + + with respx.mock(assert_all_called=True) as mock_router: + route = mock_router.post(endpoint) + + def responder(request: httpx.Request) -> httpx.Response: + assert request.headers.get("chatgpt-account-id") == "acct_env_provider" + return httpx.Response( + status_code=200, + content=_build_sse_payload("env-ok"), + headers={"content-type": "text/event-stream"}, + ) + + route.mock(side_effect=responder) + + async with httpx.AsyncClient() as client: + response = await provider.acompletion( + client, + model="openai_codex/gpt-5.1-codex", + messages=[{"role": "user", "content": "test env"}], + stream=False, + credential_identifier="env://openai_codex/1", + ) + + assert response.choices[0]["message"]["content"] == "env-ok" + + +def test_parse_quota_error_from_retry_after_header(provider: OpenAICodexProvider): + request = httpx.Request("POST", "https://chatgpt.com/backend-api/codex/responses") + response = httpx.Response( + status_code=429, + request=request, + headers={"Retry-After": "42"}, + text=json.dumps({"error": {"code": "rate_limit", "message": "Too many requests"}}), + ) + error = httpx.HTTPStatusError("Rate limited", request=request, response=response) + + parsed = provider.parse_quota_error(error) + assert parsed is not None + assert parsed["retry_after"] == 42 + assert parsed["reason"] == "RATE_LIMIT" + + +def test_parse_quota_error_from_resets_at_field(provider: OpenAICodexProvider): + now = int(time.time()) + reset_ts = now + 120 + + request = httpx.Request("POST", "https://chatgpt.com/backend-api/codex/responses") + response = httpx.Response( + status_code=429, + request=request, + text=json.dumps( + { + "error": { + "code": "usage_limit", + "message": "quota exceeded", + "resets_at": reset_ts, + } + } + ), + ) + error = httpx.HTTPStatusError("Quota hit", request=request, response=response) + + parsed = provider.parse_quota_error(error) + assert parsed is not None + assert parsed["reason"] == "USAGE_LIMIT" + assert parsed["quota_reset_timestamp"] == float(reset_ts) + assert isinstance(parsed["retry_after"], int) + assert parsed["retry_after"] >= 1 + + +def test_parse_quota_error_does_not_match_generic_quota_substrings( + provider: OpenAICodexProvider, +): + request = httpx.Request("POST", "https://chatgpt.com/backend-api/codex/responses") + response = httpx.Response( + status_code=400, + request=request, + text=json.dumps( + { + "error": { + "code": "invalid_request_error", + "message": "quota project ID is invalid", + } + } + ), + ) + error = httpx.HTTPStatusError("Bad request", request=request, response=response) + + parsed = provider.parse_quota_error(error) + assert parsed is None diff --git a/tests/test_openai_codex_sse.py b/tests/test_openai_codex_sse.py new file mode 100644 index 00000000..ec1411f7 --- /dev/null +++ b/tests/test_openai_codex_sse.py @@ -0,0 +1,110 @@ +import json +from pathlib import Path + +import pytest + +from rotator_library.providers.openai_codex_provider import ( + CodexSSETranslator, + CodexStreamError, +) + + +FIXTURES_DIR = Path(__file__).parent / "fixtures" / "openai_codex" + + +def _load_events(name: str): + return json.loads((FIXTURES_DIR / name).read_text()) + + +def test_fixture_driven_event_sequence_to_expected_chunks(): + events = _load_events("stream_success_events.json") + translator = CodexSSETranslator(model_id="openai_codex/gpt-5.1-codex") + + chunks = [] + for event in events: + chunks.extend(translator.process_event(event)) + + # content delta chunk present + content_chunks = [ + c for c in chunks if c["choices"][0]["delta"].get("content") + ] + assert content_chunks + assert content_chunks[-1]["choices"][0]["delta"]["content"] == "pong" + + # terminal chunk contains usage mapping + final_chunk = chunks[-1] + assert final_chunk["choices"][0]["finish_reason"] == "stop" + assert final_chunk["usage"]["prompt_tokens"] == 21 + assert final_chunk["usage"]["completion_tokens"] == 13 + assert final_chunk["usage"]["total_tokens"] == 34 + + +def test_tool_call_deltas_and_finish_reason_mapping(): + events = _load_events("stream_tool_call_events.json") + translator = CodexSSETranslator(model_id="openai_codex/gpt-5.1-codex") + + chunks = [] + for event in events: + chunks.extend(translator.process_event(event)) + + tool_chunks = [ + c for c in chunks if c["choices"][0]["delta"].get("tool_calls") + ] + assert tool_chunks + + # Validate streaming argument assembly appears in deltas + all_args = "".join( + tc["function"]["arguments"] + for chunk in tool_chunks + for tc in chunk["choices"][0]["delta"]["tool_calls"] + ) + assert "San" in all_args + assert "Francisco" in all_args + + final_chunk = chunks[-1] + assert final_chunk["choices"][0]["finish_reason"] == "tool_calls" + assert final_chunk["usage"]["total_tokens"] == 60 + + +def test_content_part_delta_alias_and_length_finish_reason(): + events = _load_events("stream_content_part_delta_events.json") + translator = CodexSSETranslator(model_id="openai_codex/gpt-5.1-codex") + + chunks = [] + for event in events: + chunks.extend(translator.process_event(event)) + + text = "".join( + c["choices"][0]["delta"].get("content", "") + for c in chunks + ) + assert text == "Hello world" + + final_chunk = chunks[-1] + assert final_chunk["choices"][0]["finish_reason"] == "length" + assert final_chunk["usage"]["total_tokens"] == 30 + + +def test_error_event_propagation(): + translator = CodexSSETranslator(model_id="openai_codex/gpt-5.1-codex") + + with pytest.raises(CodexStreamError) as exc: + translator.process_event( + { + "type": "error", + "error": { + "code": "usage_limit_reached", + "message": "quota reached", + "type": "rate_limit_error", + }, + } + ) + + assert exc.value.status_code == 429 + assert "quota" in str(exc.value).lower() + + +def test_unknown_event_tolerance(): + translator = CodexSSETranslator(model_id="openai_codex/gpt-5.1-codex") + chunks = translator.process_event({"type": "response.some_unknown_event"}) + assert chunks == [] diff --git a/tests/test_openai_codex_wiring.py b/tests/test_openai_codex_wiring.py new file mode 100644 index 00000000..d1a44830 --- /dev/null +++ b/tests/test_openai_codex_wiring.py @@ -0,0 +1,26 @@ +from rotator_library.credential_manager import CredentialManager +from rotator_library.provider_factory import get_provider_auth_class +from rotator_library.providers import PROVIDER_PLUGINS +from rotator_library.providers.openai_codex_auth_base import OpenAICodexAuthBase + + +def test_credential_discovery_recognizes_openai_codex_env_vars(tmp_path): + env_vars = { + "OPENAI_CODEX_1_ACCESS_TOKEN": "access-1", + "OPENAI_CODEX_1_REFRESH_TOKEN": "refresh-1", + } + + manager = CredentialManager(env_vars=env_vars, oauth_dir=tmp_path / "oauth_creds") + discovered = manager.discover_and_prepare() + + assert "openai_codex" in discovered + assert discovered["openai_codex"] == ["env://openai_codex/1"] + + +def test_provider_factory_returns_openai_codex_auth_base(): + auth_class = get_provider_auth_class("openai_codex") + assert auth_class is OpenAICodexAuthBase + + +def test_provider_auto_registration_includes_openai_codex(): + assert "openai_codex" in PROVIDER_PLUGINS