fix(auth): remove bogus codex oauth responses probe

This commit is contained in:
Ayaan Zaidi
2026-03-06 15:01:17 +05:30
committed by Ayaan Zaidi
parent cbb96d9fe7
commit bdd368533f
3 changed files with 2 additions and 116 deletions

View File

@@ -46,8 +46,7 @@ Docs: https://docs.openclaw.ai
- TUI/model indicator freshness: prevent stale session snapshots from overwriting freshly patched model selection (and reset per-session freshness when switching session keys) so `/model` updates reflect immediately instead of lagging by one or more commands. (#21255) Thanks @kowza.
- TUI/final-error rendering fallback: when a chat `final` event has no renderable assistant content but includes envelope `errorMessage`, render the formatted error text instead of collapsing to `"(no output)"`, preserving actionable failure context in-session. (#14687) Thanks @Mquarmoc.
- TUI/session-key alias event matching: treat chat events whose session keys are canonical aliases (for example `agent:<id>:main` vs `main`) as the same session while preserving cross-agent isolation, so assistant replies no longer disappear or surface in another terminal window due to strict key-form mismatch. (#33937) Thanks @yjh1412.
- OpenAI Codex OAuth/login hardening: fail OAuth completion early when the returned token is missing `api.responses.write`, and allow `openclaw models auth login --provider openai-codex` to use the built-in OAuth path even when no provider plugins are installed. (#36660) Thanks @driesvints.
- OpenAI Codex OAuth/scope request parity: augment the OAuth authorize URL with required API scopes (`api.responses.write`, `model.request`, `api.model.read`) before browser handoff so OAuth tokens include runtime model/request permissions expected by OpenAI API calls. (#24720) Thanks @Skippy-Gunboat.
- OpenAI Codex OAuth/login parity: keep `openclaw models auth login --provider openai-codex` on the built-in path even without provider plugins, preserve Pi-generated authorize URLs without local scope rewriting, and stop validating successful Codex sign-ins against the public OpenAI Responses API after callback. (follow-up to #36660 and #24720) Thanks @driesvints, @Skippy-Gunboat, and @obviyus.
- Agents/config schema lookup: add `gateway` tool action `config.schema.lookup` so agents can inspect one config path at a time before edits without loading the full schema into prompt context. (#37266) Thanks @gumadeiras.
- Onboarding/API key input hardening: strip non-Latin1 Unicode artifacts from normalized secret input (while preserving Latin-1 content and internal spaces) so malformed copied API keys cannot trigger HTTP header `ByteString` construction crashes; adds regression coverage for shared normalization and MiniMax auth header usage. (#24496) Thanks @fa6maalassaf.
- Kimi Coding/Anthropic tools compatibility: normalize `anthropic-messages` tool payloads to OpenAI-style `tools[].function` + compatible `tool_choice` when targeting Kimi Coding endpoints, restoring tool-call workflows that regressed after v2026.3.2. (#37038) Thanks @mochimochimochi-hub.

View File

@@ -1,4 +1,4 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { beforeEach, describe, expect, it, vi } from "vitest";
import type { RuntimeEnv } from "../runtime.js";
import type { WizardPrompter } from "../wizard/prompts.js";
@@ -56,30 +56,10 @@ async function runCodexOAuth(params: { isRemote: boolean }) {
}
describe("loginOpenAICodexOAuth", () => {
let restoreFetch: (() => void) | null = null;
beforeEach(() => {
vi.clearAllMocks();
mocks.runOpenAIOAuthTlsPreflight.mockResolvedValue({ ok: true });
mocks.formatOpenAIOAuthTlsPreflightFix.mockReturnValue("tls fix");
const originalFetch = globalThis.fetch;
const fetchMock = vi.fn(
async () =>
new Response('{"error":{"message":"model is required"}}', {
status: 400,
headers: { "content-type": "application/json" },
}),
);
globalThis.fetch = fetchMock as unknown as typeof fetch;
restoreFetch = () => {
globalThis.fetch = originalFetch;
};
});
afterEach(() => {
restoreFetch?.();
restoreFetch = null;
});
it("returns credentials on successful oauth login", async () => {
@@ -188,52 +168,6 @@ describe("loginOpenAICodexOAuth", () => {
expect(prompter.note).not.toHaveBeenCalledWith("tls fix", "OAuth prerequisites");
});
it("fails with actionable error when token is missing api.responses.write scope", async () => {
mocks.createVpsAwareOAuthHandlers.mockReturnValue({
onAuth: vi.fn(),
onPrompt: vi.fn(),
});
mocks.loginOpenAICodex.mockResolvedValue({
provider: "openai-codex" as const,
access: "access-token",
refresh: "refresh-token",
expires: Date.now() + 60_000,
email: "user@example.com",
});
globalThis.fetch = vi.fn(
async () =>
new Response('{"error":{"message":"Missing scopes: api.responses.write"}}', {
status: 401,
headers: { "content-type": "application/json" },
}),
) as unknown as typeof fetch;
await expect(runCodexOAuth({ isRemote: false })).rejects.toThrow(
"missing required scope: api.responses.write",
);
});
it("does not fail oauth completion when scope probe is unavailable", async () => {
const creds = {
provider: "openai-codex" as const,
access: "access-token",
refresh: "refresh-token",
expires: Date.now() + 60_000,
email: "user@example.com",
};
mocks.createVpsAwareOAuthHandlers.mockReturnValue({
onAuth: vi.fn(),
onPrompt: vi.fn(),
});
mocks.loginOpenAICodex.mockResolvedValue(creds);
globalThis.fetch = vi.fn(async () => {
throw new Error("network down");
}) as unknown as typeof fetch;
const { result } = await runCodexOAuth({ isRemote: false });
expect(result).toEqual(creds);
});
it("fails early with actionable message when TLS preflight fails", async () => {
mocks.runOpenAIOAuthTlsPreflight.mockResolvedValue({
ok: false,

View File

@@ -8,41 +8,6 @@ import {
runOpenAIOAuthTlsPreflight,
} from "./oauth-tls-preflight.js";
const OPENAI_RESPONSES_ENDPOINT = "https://api.openai.com/v1/responses";
const OPENAI_RESPONSES_WRITE_SCOPE = "api.responses.write";
function extractResponsesScopeErrorMessage(status: number, bodyText: string): string | null {
if (status !== 401) {
return null;
}
const normalized = bodyText.toLowerCase();
if (
normalized.includes("missing scope") &&
normalized.includes(OPENAI_RESPONSES_WRITE_SCOPE.toLowerCase())
) {
return bodyText.trim() || `Missing scopes: ${OPENAI_RESPONSES_WRITE_SCOPE}`;
}
return null;
}
async function detectMissingResponsesWriteScope(accessToken: string): Promise<string | null> {
try {
const response = await fetch(OPENAI_RESPONSES_ENDPOINT, {
method: "POST",
headers: {
Authorization: `Bearer ${accessToken}`,
"Content-Type": "application/json",
},
body: "{}",
});
const bodyText = await response.text();
return extractResponsesScopeErrorMessage(response.status, bodyText);
} catch {
// Best effort only: network/TLS issues should not block successful OAuth completion.
return null;
}
}
export async function loginOpenAICodexOAuth(params: {
prompter: WizardPrompter;
runtime: RuntimeEnv;
@@ -90,18 +55,6 @@ export async function loginOpenAICodexOAuth(params: {
onPrompt,
onProgress: (msg) => spin.update(msg),
});
if (creds?.access) {
const scopeError = await detectMissingResponsesWriteScope(creds.access);
if (scopeError) {
throw new Error(
[
`OpenAI OAuth token is missing required scope: ${OPENAI_RESPONSES_WRITE_SCOPE}.`,
`Provider response: ${scopeError}`,
"Re-authenticate with OpenAI Codex OAuth or use OPENAI_API_KEY with openai/* models.",
].join(" "),
);
}
}
spin.stop("OpenAI OAuth complete");
return creds ?? null;
} catch (err) {