test: clear openai transport stream broad matchers

This commit is contained in:
Peter Steinberger
2026-05-10 06:55:25 +01:00
parent 07df423557
commit 555cfed534

View File

@@ -66,6 +66,15 @@ async function* streamChunks(chunks: readonly unknown[]): AsyncGenerator<never>
}
}
function expectRecordFields(record: unknown, expected: Record<string, unknown>) {
expect(record).toBeDefined();
const actual = record as Record<string, unknown>;
for (const [key, value] of Object.entries(expected)) {
expect(actual[key]).toEqual(value);
}
return actual;
}
describe("openai transport stream", () => {
it("adds OpenClaw attribution to native OpenAI transport headers and protects it from pi", () => {
vi.stubEnv("OPENCLAW_VERSION", "2026.3.22");
@@ -95,7 +104,7 @@ describe("openai transport stream", () => {
},
);
expect(headers).toMatchObject({
expectRecordFields(headers, {
originator: "openclaw",
version: "2026.3.22",
"User-Agent": "openclaw/2026.3.22",
@@ -126,7 +135,7 @@ describe("openai transport stream", () => {
{ systemPrompt: "", messages: [] } as never,
);
expect(headers).toMatchObject({
expectRecordFields(headers, {
originator: "openclaw",
version: "2026.3.22",
"User-Agent": "openclaw/2026.3.22",
@@ -294,7 +303,7 @@ describe("openai transport stream", () => {
const prepared = prepareTransportAwareSimpleModel(model);
expect(resolveTransportAwareSimpleApi(model.api)).toBe("openclaw-openai-responses-transport");
expect(prepared).toMatchObject({
expectRecordFields(prepared, {
api: "openclaw-openai-responses-transport",
provider: "openai",
id: "gpt-5.4",
@@ -327,7 +336,7 @@ describe("openai transport stream", () => {
const prepared = prepareTransportAwareSimpleModel(model);
expect(resolveTransportAwareSimpleApi(model.api)).toBe("openclaw-openai-responses-transport");
expect(prepared).toMatchObject({
expectRecordFields(prepared, {
api: "openclaw-openai-responses-transport",
provider: "openai-codex",
id: "codex-mini-latest",
@@ -360,7 +369,7 @@ describe("openai transport stream", () => {
const prepared = prepareTransportAwareSimpleModel(model);
expect(resolveTransportAwareSimpleApi(model.api)).toBe("openclaw-anthropic-messages-transport");
expect(prepared).toMatchObject({
expectRecordFields(prepared, {
api: "openclaw-anthropic-messages-transport",
provider: "anthropic",
id: "claude-sonnet-4-6",
@@ -418,7 +427,7 @@ describe("openai transport stream", () => {
);
expect(resolveTransportAwareSimpleApi(model.api)).toBe("openclaw-openai-responses-transport");
expect(prepareTransportAwareSimpleModel(model)).toMatchObject({
expectRecordFields(prepareTransportAwareSimpleModel(model), {
api: "openclaw-openai-responses-transport",
provider: "github-copilot",
id: "gpt-5.4",
@@ -449,7 +458,7 @@ describe("openai transport stream", () => {
);
expect(resolveTransportAwareSimpleApi(model.api)).toBe("openclaw-anthropic-messages-transport");
expect(prepareTransportAwareSimpleModel(model)).toMatchObject({
expectRecordFields(prepareTransportAwareSimpleModel(model), {
api: "openclaw-anthropic-messages-transport",
provider: "github-copilot",
id: "claude-sonnet-4.6",
@@ -755,7 +764,7 @@ describe("openai transport stream", () => {
maxTokens: 8192,
} satisfies Model<"openai-completions">;
expect(
expectRecordFields(
parseTransportChunkUsage(
{
prompt_tokens: 10,
@@ -766,14 +775,15 @@ describe("openai transport stream", () => {
},
model,
),
).toMatchObject({
input: 7,
output: 20,
cacheRead: 3,
totalTokens: 30,
});
{
input: 7,
output: 20,
cacheRead: 3,
totalTokens: 30,
},
);
expect(
expectRecordFields(
parseTransportChunkUsage(
{
prompt_tokens: 2,
@@ -783,12 +793,13 @@ describe("openai transport stream", () => {
},
model,
),
).toMatchObject({
input: 0,
output: 5,
cacheRead: 4,
totalTokens: 9,
});
{
input: 0,
output: 5,
cacheRead: 4,
totalTokens: 9,
},
);
});
it("records usage from OpenAI-compatible streaming usage chunks", async () => {
@@ -854,7 +865,7 @@ describe("openai transport stream", () => {
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
expect(output.usage).toMatchObject({
expectRecordFields(output.usage, {
input: 8,
output: 10,
cacheRead: 0,
@@ -1197,11 +1208,7 @@ describe("openai transport stream", () => {
} as never,
);
expect(params).toMatchObject({
reasoning: {
effort: "high",
},
});
expect(params.reasoning).toEqual({ effort: "high" });
});
it("keeps OpenRouter thinking format for native OpenRouter hosts behind custom provider ids", () => {
@@ -1236,11 +1243,7 @@ describe("openai transport stream", () => {
} as never,
);
expect(params).toMatchObject({
reasoning: {
effort: "high",
},
});
expect(params.reasoning).toEqual({ effort: "high" });
});
it("does not build OpenRouter reasoning params for Hunter Alpha when reasoning is disabled", () => {
@@ -1293,7 +1296,7 @@ describe("openai transport stream", () => {
undefined,
) as { input?: Array<{ role?: string }> };
expect(params.input?.[0]).toMatchObject({ role: "system" });
expect(params.input?.[0]?.role).toBe("system");
});
it("keeps developer role for native OpenAI reasoning responses models", () => {
@@ -1318,7 +1321,7 @@ describe("openai transport stream", () => {
undefined,
) as { input?: Array<{ role?: string }> };
expect(params.input?.[0]).toMatchObject({ role: "developer" });
expect(params.input?.[0]?.role).toBe("developer");
});
it("uses model maxTokens for Responses params when runtime maxTokens is omitted", () => {
@@ -1590,7 +1593,7 @@ describe("openai transport stream", () => {
};
const reasoningItem = params.input?.find((item) => item.type === "reasoning");
expect(reasoningItem).toMatchObject({
expectRecordFields(reasoningItem, {
type: "reasoning",
encrypted_content: "ciphertext",
});
@@ -1598,14 +1601,14 @@ describe("openai transport stream", () => {
const assistantMessage = params.input?.find(
(item) => item.type === "message" && item.role === "assistant",
);
expect(assistantMessage).toMatchObject({
expectRecordFields(assistantMessage, {
type: "message",
role: "assistant",
phase: "commentary",
});
expect(assistantMessage?.id).toBeUndefined();
const functionCall = params.input?.find((item) => item.type === "function_call");
expect(functionCall).toMatchObject({
expectRecordFields(functionCall, {
type: "function_call",
call_id: "call_abc",
});
@@ -1691,14 +1694,14 @@ describe("openai transport stream", () => {
const assistantMessage = params.input?.find(
(item) => item.type === "message" && item.role === "assistant",
);
expect(assistantMessage).toMatchObject({
expectRecordFields(assistantMessage, {
type: "message",
role: "assistant",
id: "msg_prior",
phase: "commentary",
});
const functionCall = params.input?.find((item) => item.type === "function_call");
expect(functionCall).toMatchObject({
expectRecordFields(functionCall, {
type: "function_call",
id: "fc_prior",
call_id: "call_abc",
@@ -2073,7 +2076,7 @@ describe("openai transport stream", () => {
};
const assistantItem = params.input?.find((item) => item.role === "assistant");
expect(assistantItem).toMatchObject({
expectRecordFields(assistantItem, {
role: "assistant",
phase: "commentary",
});
@@ -2138,7 +2141,7 @@ describe("openai transport stream", () => {
) as { tools?: Array<{ strict?: boolean }> };
expect(params.tools?.[0]?.strict).toBe(true);
expect(params.tools?.[0]).toMatchObject({
expectRecordFields(params.tools?.[0], {
parameters: {
type: "object",
properties: {},
@@ -2244,7 +2247,7 @@ describe("openai transport stream", () => {
) as { tools?: Array<{ strict?: boolean; parameters?: Record<string, unknown> }> };
expect(params.tools?.[0]).not.toHaveProperty("strict");
expect(params.tools?.[0]?.parameters).toMatchObject({
expectRecordFields(params.tools?.[0]?.parameters, {
type: "object",
properties: {},
});
@@ -2282,7 +2285,7 @@ describe("openai transport stream", () => {
) as { tools?: Array<{ strict?: boolean; parameters?: Record<string, unknown> }> };
expect(params.tools?.[0]?.strict).toBe(false);
expect(params.tools?.[0]?.parameters).toMatchObject({
expectRecordFields(params.tools?.[0]?.parameters, {
type: "object",
properties: { path: { type: "string" } },
required: [],
@@ -2317,7 +2320,7 @@ describe("openai transport stream", () => {
},
) as { metadata?: Record<string, string> };
expect(params.metadata).toMatchObject({
expectRecordFields(params.metadata, {
openclaw_session_id: "session-123",
openclaw_turn_id: "turn-123",
openclaw_turn_attempt: "1",
@@ -2449,7 +2452,7 @@ describe("openai transport stream", () => {
undefined,
) as { input?: Array<{ role?: string }> };
expect(params.input?.[0]).toMatchObject({ role: "system" });
expect(params.input?.[0]?.role).toBe("system");
});
it("uses system role for Moonshot default-route completions providers", () => {
@@ -2474,7 +2477,7 @@ describe("openai transport stream", () => {
undefined,
) as { messages?: Array<{ role?: string }> };
expect(params.messages?.[0]).toMatchObject({ role: "system" });
expect(params.messages?.[0]?.role).toBe("system");
});
it("strips the internal cache boundary from OpenAI completions system prompts", () => {
@@ -2611,14 +2614,10 @@ describe("openai transport stream", () => {
} as never,
) as { reasoning_effort?: unknown; tools?: unknown };
expect(params.tools).toEqual([
expect.objectContaining({
type: "function",
function: expect.objectContaining({
name: "lookup_weather",
}),
}),
]);
expect(params.tools).toHaveLength(1);
const tool = (params.tools as Array<Record<string, unknown>>)[0];
expectRecordFields(tool, { type: "function" });
expectRecordFields(tool.function, { name: "lookup_weather" });
expect(params).not.toHaveProperty("reasoning_effort");
});
@@ -2812,8 +2811,8 @@ describe("openai transport stream", () => {
stream_options?: { include_usage?: boolean };
};
expect(params.messages?.[0]).toMatchObject({ role: "system" });
expect(params.stream_options).toMatchObject({ include_usage: true });
expect(params.messages?.[0]?.role).toBe("system");
expect(params.stream_options?.include_usage).toBe(true);
});
it("enables streaming usage compat for generic providers on native DashScope endpoints", () => {
@@ -2840,7 +2839,7 @@ describe("openai transport stream", () => {
stream_options?: { include_usage?: boolean };
};
expect(params.stream_options).toMatchObject({ include_usage: true });
expect(params.stream_options?.include_usage).toBe(true);
});
it("honors explicit streaming usage compat for configured custom providers", () => {
@@ -2868,7 +2867,7 @@ describe("openai transport stream", () => {
stream_options?: { include_usage?: boolean };
};
expect(params.stream_options).toMatchObject({ include_usage: true });
expect(params.stream_options?.include_usage).toBe(true);
});
it("always includes stream_options.include_usage for known local backends like llama-cpp", () => {
@@ -2995,9 +2994,9 @@ describe("openai transport stream", () => {
tools?: Array<{ function?: { strict?: boolean } }>;
};
expect(params.messages?.[0]).toMatchObject({ role: "system" });
expect(params.messages?.[0]?.role).toBe("system");
expect(params).not.toHaveProperty("reasoning_effort");
expect(params.stream_options).toMatchObject({ include_usage: true });
expect(params.stream_options).toEqual({ include_usage: true });
expect(params).not.toHaveProperty("store");
expect(params.tools?.[0]?.function).not.toHaveProperty("strict");
});
@@ -3033,8 +3032,8 @@ describe("openai transport stream", () => {
undefined,
) as { messages?: Array<{ role?: string; content?: unknown }> };
expect(params.messages?.[0]).toMatchObject({ role: "system", content: "system" });
expect(params.messages?.[1]).toMatchObject({ role: "user", content: "What is 2 + 2?" });
expect(params.messages?.[0]).toEqual({ role: "system", content: "system" });
expect(params.messages?.[1]).toEqual({ role: "user", content: "What is 2 + 2?" });
});
it("uses max_tokens for Chutes default-route completions providers without relying on baseUrl host sniffing", () => {
@@ -3342,7 +3341,7 @@ describe("openai transport stream", () => {
push() {},
});
expect(output.content[0]).toMatchObject({
expectRecordFields(output.content[0], {
type: "toolCall",
id: "call_abc",
name: "echo_value",
@@ -3555,9 +3554,7 @@ describe("openai transport stream", () => {
} as never,
);
expect(params).toMatchObject({
max_tokens: 2048,
});
expect(params.max_tokens).toBe(2048);
expect(params).not.toHaveProperty("max_completion_tokens");
expect(params).not.toHaveProperty("store");
expect(params).not.toHaveProperty("reasoning_effort");
@@ -3588,9 +3585,7 @@ describe("openai transport stream", () => {
} as never,
);
expect(params).toMatchObject({
max_tokens: 2048,
});
expect(params.max_tokens).toBe(2048);
expect(params).not.toHaveProperty("max_completion_tokens");
expect(params).not.toHaveProperty("store");
expect(params).not.toHaveProperty("reasoning_effort");
@@ -3646,7 +3641,7 @@ describe("openai transport stream", () => {
};
const functionCall = params.input?.find((item) => item.type === "function_call");
expect(functionCall).toMatchObject({
expectRecordFields(functionCall, {
type: "function_call",
arguments: "not valid json",
});
@@ -4101,10 +4096,18 @@ describe("openai transport stream", () => {
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
expect(output.stopReason).toBe("toolUse");
expect(output.content).toMatchObject([
{ type: "thinking", thinking: "Need a tool.", thinkingSignature: "reasoning_details" },
{ type: "toolCall", id: "call_1", name: "lookup", arguments: { query: "qwen3" } },
]);
expect(output.content).toHaveLength(2);
expectRecordFields(output.content[0], {
type: "thinking",
thinking: "Need a tool.",
thinkingSignature: "reasoning_details",
});
expectRecordFields(output.content[1], {
type: "toolCall",
id: "call_1",
name: "lookup",
arguments: { query: "qwen3" },
});
});
it("treats singular tool_call finish_reason as tool use", async () => {
@@ -4189,9 +4192,10 @@ describe("openai transport stream", () => {
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
expect(output.stopReason).toBe("toolUse");
expect(output.content).toContainEqual(
expect.objectContaining({ type: "toolCall", id: "call_1", name: "lookup" }),
const toolCall = (output.content as Array<{ type?: string }>).find(
(item) => item.type === "toolCall",
);
expectRecordFields(toolCall, { type: "toolCall", id: "call_1", name: "lookup" });
});
it("keeps streamed tool call arguments intact when reasoning_details repeats", async () => {
@@ -4294,11 +4298,19 @@ describe("openai transport stream", () => {
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
expect(output.stopReason).toBe("toolUse");
expect(output.content).toMatchObject([
{ type: "thinking", thinking: "Need a tool." },
{ type: "toolCall", id: "call_1", name: "lookup", arguments: { query: "qwen3" } },
{ type: "thinking", thinking: " Still thinking.", thinkingSignature: "reasoning_details" },
]);
expect(output.content).toHaveLength(3);
expectRecordFields(output.content[0], { type: "thinking", thinking: "Need a tool." });
expectRecordFields(output.content[1], {
type: "toolCall",
id: "call_1",
name: "lookup",
arguments: { query: "qwen3" },
});
expectRecordFields(output.content[2], {
type: "thinking",
thinking: " Still thinking.",
thinkingSignature: "reasoning_details",
});
});
it("surfaces visible OpenRouter response text from reasoning_details without dropping tools", async () => {
@@ -4383,15 +4395,19 @@ describe("openai transport stream", () => {
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
expect(output.stopReason).toBe("toolUse");
expect(output.content).toMatchObject([
{
type: "thinking",
thinking: "Need to look something up.",
thinkingSignature: "reasoning_details",
},
{ type: "text", text: "Working on it." },
{ type: "toolCall", id: "call_1", name: "lookup", arguments: { query: "weather" } },
]);
expect(output.content).toHaveLength(3);
expectRecordFields(output.content[0], {
type: "thinking",
thinking: "Need to look something up.",
thinkingSignature: "reasoning_details",
});
expectRecordFields(output.content[1], { type: "text", text: "Working on it." });
expectRecordFields(output.content[2], {
type: "toolCall",
id: "call_1",
name: "lookup",
arguments: { query: "weather" },
});
});
it("does not surface ambiguous reasoning_details text without explicit compat opt-in", async () => {
@@ -4468,13 +4484,12 @@ describe("openai transport stream", () => {
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
expect(output.content).toMatchObject([
{
type: "thinking",
thinking: "Internal thought.",
thinkingSignature: "reasoning_details",
},
]);
expect(output.content).toHaveLength(1);
expectRecordFields(output.content[0], {
type: "thinking",
thinking: "Internal thought.",
thinkingSignature: "reasoning_details",
});
});
it("preserves reasoning_details item order when visible text and thinking are interleaved", async () => {
@@ -4540,15 +4555,14 @@ describe("openai transport stream", () => {
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
expect(output.content).toMatchObject([
{ type: "text", text: "Visible first." },
{
type: "thinking",
thinking: " Hidden second.",
thinkingSignature: "reasoning_details",
},
{ type: "text", text: " Visible third." },
]);
expect(output.content).toHaveLength(3);
expectRecordFields(output.content[0], { type: "text", text: "Visible first." });
expectRecordFields(output.content[1], {
type: "thinking",
thinking: " Hidden second.",
thinkingSignature: "reasoning_details",
});
expectRecordFields(output.content[2], { type: "text", text: " Visible third." });
});
it("does not duplicate fallback reasoning fields when reasoning_details already provided thinking", async () => {
@@ -4611,13 +4625,12 @@ describe("openai transport stream", () => {
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
expect(output.content).toMatchObject([
{
type: "thinking",
thinking: "Primary reasoning.",
thinkingSignature: "reasoning_details",
},
]);
expect(output.content).toHaveLength(1);
expectRecordFields(output.content[0], {
type: "thinking",
thinking: "Primary reasoning.",
thinkingSignature: "reasoning_details",
});
});
it("keeps fallback thinking when reasoning_details only carries visible text", async () => {
@@ -4680,14 +4693,13 @@ describe("openai transport stream", () => {
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
expect(output.content).toMatchObject([
{ type: "text", text: "Visible answer." },
{
type: "thinking",
thinking: "Hidden fallback reasoning.",
thinkingSignature: "reasoning",
},
]);
expect(output.content).toHaveLength(2);
expectRecordFields(output.content[0], { type: "text", text: "Visible answer." });
expectRecordFields(output.content[1], {
type: "thinking",
thinking: "Hidden fallback reasoning.",
thinkingSignature: "reasoning",
});
});
it("keeps a streaming tool call intact when visible reasoning text arrives mid-call", async () => {
@@ -4789,10 +4801,14 @@ describe("openai transport stream", () => {
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
expect(output.stopReason).toBe("toolUse");
expect(output.content).toMatchObject([
{ type: "toolCall", id: "call_1", name: "lookup", arguments: { query: "weather" } },
{ type: "text", text: "Working on it." },
]);
expect(output.content).toHaveLength(2);
expectRecordFields(output.content[0], {
type: "toolCall",
id: "call_1",
name: "lookup",
arguments: { query: "weather" },
});
expectRecordFields(output.content[1], { type: "text", text: "Working on it." });
});
it("keeps a streaming tool call intact when visible reasoning text arrives between chunks", async () => {
@@ -4907,10 +4923,14 @@ describe("openai transport stream", () => {
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
expect(output.stopReason).toBe("toolUse");
expect(output.content).toMatchObject([
{ type: "toolCall", id: "call_1", name: "lookup", arguments: { query: "weather" } },
{ type: "text", text: "Working on it." },
]);
expect(output.content).toHaveLength(2);
expectRecordFields(output.content[0], {
type: "toolCall",
id: "call_1",
name: "lookup",
arguments: { query: "weather" },
});
expectRecordFields(output.content[1], { type: "text", text: "Working on it." });
});
it("fails fast when post-tool-call buffering grows beyond the safety cap", async () => {