fix: handle Qwen 3 reasoning field in Ollama responses

Qwen 3 (and potentially other reasoning-capable models served via Ollama)
returns its final answer in a `reasoning` field with an empty `content`
field. This causes blank/empty responses since OpenClaw only reads `content`.

Changes:
- Add `reasoning?` to OllamaChatResponse message type
- Fall back to `reasoning` when `content` is empty in buildAssistantMessage
- Accumulate `reasoning` chunks during streaming when `content` is empty

This allows Qwen 3 to work correctly both with and without /no_think mode.
This commit is contained in:
SK Heavy Industries
2026-02-16 17:42:03 -05:00
committed by Peter Steinberger
parent 46bf210e04
commit 4928717b92

View File

@@ -54,6 +54,7 @@ interface OllamaChatResponse {
message: {
role: "assistant";
content: string;
reasoning?: string;
tool_calls?: OllamaToolCall[];
};
done: boolean;
@@ -191,8 +192,12 @@ export function buildAssistantMessage(
): AssistantMessage {
const content: (TextContent | ToolCall)[] = [];
if (response.message.content) {
content.push({ type: "text", text: response.message.content });
// Qwen 3 (and potentially other reasoning models) may return their final
// answer in a `reasoning` field with an empty `content`. Fall back to
// `reasoning` so the response isn't silently dropped.
const text = response.message.content || response.message.reasoning || "";
if (text) {
content.push({ type: "text", text });
}
const toolCalls = response.message.tool_calls;
@@ -347,6 +352,9 @@ export function createOllamaStreamFn(baseUrl: string): StreamFn {
for await (const chunk of parseNdjsonStream(reader)) {
if (chunk.message?.content) {
accumulatedContent += chunk.message.content;
} else if (chunk.message?.reasoning) {
// Qwen 3 reasoning mode: content may be empty, output in reasoning
accumulatedContent += chunk.message.reasoning;
}
// Ollama sends tool_calls in intermediate (done:false) chunks,