optimize ui

This commit is contained in:
ropzislaw
2026-03-08 20:43:19 +08:00
committed by GitHub
13 changed files with 1005 additions and 114 deletions

View File

@@ -2,7 +2,7 @@ import type { ChatStatus } from "ai";
import { ClockIcon } from "lucide-react";
import { useCallback, useEffect, useMemo, useState } from "react";
import { useTranslation } from "../../../i18n/context";
import { fetchModelsForSelector } from "../../../lib/models";
import { fetchModelsForSelector, onModelListChange } from "../../../lib/models";
import { cn } from "../../../lib/utils";
import type { ContextItem, InputAreaProps } from "../../../types";
import {
@@ -91,8 +91,19 @@ export function DefaultInputArea({
setIsLoadingModels(false);
}
});
// Subscribe to background model list updates (e.g. server returned newer data)
const unsubscribe = onModelListChange((updatedModels) => {
if (!cancelled) {
setFetchedModels(
updatedModels.map((m) => ({ name: m.name, value: m.id })),
);
}
});
return () => {
cancelled = true;
unsubscribe();
};
}, []);

View File

@@ -285,7 +285,7 @@ export function useChat(
return;
}
// Remove last assistant message
// Remove last assistant message from UI
const removed = adapter.removeLastAssistantMessage();
if (!removed) return;
@@ -302,6 +302,9 @@ export function useChat(
const text = textPart?.type === "text" ? textPart.text : "";
if (sessionId && text) {
// Roll back the session so the agent doesn't see the old assistant turn
await agent.rollbackLastAssistantTurn(sessionId);
adapter.setStatus("submitted");
const events = agent.chat(text, { sessionId });
await processAgentEvents(events);

View File

@@ -70,8 +70,10 @@ const FALLBACK_MODELS: ModelInfo[] = [
];
const MODELS_API_URL = "https://www.claudechrome.com/api/models";
const STORAGE_KEY = "cachedModelList";
const STORAGE_TIMESTAMP_KEY = "cachedModelListTimestamp";
const MAX_MODELS = 200;
// Convert API pricing to price level
function getPriceLevel(
pricing: ApiModelPricing,
): "cheap" | "normal" | "expensive" {
@@ -81,7 +83,6 @@ function getPriceLevel(
return "expensive";
}
// Convert API model to internal ModelInfo
function convertApiModel(apiModel: ApiModel): ModelInfo {
return {
id: apiModel.id,
@@ -97,7 +98,6 @@ function convertApiModel(apiModel: ApiModel): ModelInfo {
};
}
// Validate that the API response matches the expected schema
function isValidApiResponse(data: unknown): data is ApiResponse {
if (typeof data !== "object" || data === null) return false;
const obj = data as Record<string, unknown>;
@@ -105,7 +105,6 @@ function isValidApiResponse(data: unknown): data is ApiResponse {
if (typeof obj.data !== "object" || obj.data === null) return false;
const d = obj.data as Record<string, unknown>;
if (!Array.isArray(d.models)) return false;
// Validate first model shape if present
if (d.models.length > 0) {
const first = d.models[0] as Record<string, unknown>;
if (typeof first.id !== "string" || typeof first.name !== "string") {
@@ -115,53 +114,164 @@ function isValidApiResponse(data: unknown): data is ApiResponse {
return true;
}
// Cache for models
// --- Persistent storage helpers ---
async function loadFromStorage(): Promise<ModelInfo[] | null> {
try {
if (typeof chrome !== "undefined" && chrome.storage?.local) {
const result = await chrome.storage.local.get([STORAGE_KEY]);
const models = result[STORAGE_KEY];
if (Array.isArray(models) && models.length > 0) {
return models as ModelInfo[];
}
}
} catch {
// Storage not available (e.g. in tests)
}
return null;
}
async function saveToStorage(
models: ModelInfo[],
serverTimestamp: number,
): Promise<void> {
try {
if (typeof chrome !== "undefined" && chrome.storage?.local) {
await chrome.storage.local.set({
[STORAGE_KEY]: models,
[STORAGE_TIMESTAMP_KEY]: serverTimestamp,
});
}
} catch {
// Ignore storage errors
}
}
async function getStoredTimestamp(): Promise<number> {
try {
if (typeof chrome !== "undefined" && chrome.storage?.local) {
const result = await chrome.storage.local.get([STORAGE_TIMESTAMP_KEY]);
const ts = result[STORAGE_TIMESTAMP_KEY];
if (typeof ts === "number") return ts;
}
} catch {
// Ignore
}
return 0;
}
// --- In-memory cache (fast path) ---
let cachedModels: ModelInfo[] | null = null;
let lastFetchTime = 0;
const CACHE_DURATION = 5 * 60 * 1000; // 5 minutes
const MAX_MODELS = 200; // Safety cap on number of models
let cachedServerTimestamp = 0;
let storageLoaded = false;
/**
* Fetch models from the server API with caching and fallback.
* Returns cached result if still valid (5 min TTL).
* Falls back to FALLBACK_MODELS on any error.
* Fetch models with a two-tier cache:
* 1. In-memory cache (instant)
* 2. chrome.storage.local (survives service worker restarts)
*
* On the first call, returns storage-cached models immediately.
* A background fetch updates both caches when the server reports new data.
*/
export async function fetchModels(): Promise<ModelInfo[]> {
// Return cached models if still valid
if (cachedModels && Date.now() - lastFetchTime < CACHE_DURATION) {
// 1. Fast path: in-memory cache
if (cachedModels) {
// Trigger background refresh (fire-and-forget)
void refreshFromServer();
return cachedModels;
}
// 2. Try loading from persistent storage
if (!storageLoaded) {
storageLoaded = true;
const stored = await loadFromStorage();
if (stored) {
cachedModels = stored;
cachedServerTimestamp = await getStoredTimestamp();
// Trigger background refresh
void refreshFromServer();
return cachedModels;
}
}
// 3. Nothing cached: fetch synchronously and return
return await fetchFromServer();
}
let refreshInFlight = false;
async function refreshFromServer(): Promise<void> {
if (refreshInFlight) return;
refreshInFlight = true;
try {
await fetchFromServer();
} finally {
refreshInFlight = false;
}
}
async function fetchFromServer(): Promise<ModelInfo[]> {
try {
const response = await fetch(MODELS_API_URL);
console.log("response", response);
if (!response.ok) {
throw new Error(`API request failed: ${response.status}`);
}
const data: unknown = await response.json();
console.log("data", data);
if (!isValidApiResponse(data)) {
throw new Error("Invalid API response structure");
}
if (data.success && data.data.models.length > 0) {
// Apply safety cap
const models = data.data.models.slice(0, MAX_MODELS).map(convertApiModel);
cachedModels = models;
lastFetchTime = Date.now();
return cachedModels;
const serverTimestamp = data.data.cache?.lastUpdate ?? Date.now();
// Only update if the server data is newer
if (serverTimestamp > cachedServerTimestamp) {
const models = data.data.models
.slice(0, MAX_MODELS)
.map(convertApiModel);
cachedModels = models;
cachedServerTimestamp = serverTimestamp;
await saveToStorage(models, serverTimestamp);
// Notify listeners that models changed
notifyModelChange(models);
}
return cachedModels ?? FALLBACK_MODELS;
}
throw new Error("Empty model list from API");
} catch (_error) {
// Return fallback - do not log sensitive details
return FALLBACK_MODELS;
} catch {
return cachedModels ?? FALLBACK_MODELS;
}
}
// --- Change notification for components ---
type ModelChangeListener = (models: ModelInfo[]) => void;
const modelChangeListeners = new Set<ModelChangeListener>();
function notifyModelChange(models: ModelInfo[]): void {
for (const listener of modelChangeListeners) {
try {
listener(models);
} catch {
// Don't let listener errors break the loop
}
}
}
/**
* Subscribe to model list updates (triggered when server returns new data).
* Returns an unsubscribe function.
*/
export function onModelListChange(listener: ModelChangeListener): () => void {
modelChangeListeners.add(listener);
return () => modelChangeListeners.delete(listener);
}
/**
* Fetch models and convert to the {name, value} format used by the model selector.
*/

View File

@@ -30,7 +30,7 @@
"dependencies": {
"@ai-sdk/anthropic": "^3.0.44",
"@ai-sdk/google": "^3.0.22",
"@ai-sdk/openai": "^3.0.25",
"@ai-sdk/openai": "^3.0.41",
"@ai-sdk/openai-compatible": "^2.0.18",
"@aipexstudio/aipex-core": "workspace:*",
"@aipexstudio/aipex-react": "workspace:*",

View File

@@ -1,5 +1,5 @@
import { describe, expect, it, vi } from "vitest";
import { createAIProvider } from "./ai-provider";
import { createAIProvider, createEmptyToolArgsFinalizer } from "./ai-provider";
// Provide minimal mock for import.meta.env
vi.stubGlobal("import", { meta: { env: { PROD: false } } });
@@ -111,3 +111,356 @@ describe("createAIProvider", () => {
});
});
});
// --- SSE stream transform tests ---
function sseLinesToStream(lines: string[]): ReadableStream<Uint8Array> {
const encoder = new TextEncoder();
const text = `${lines.join("\n")}\n`;
return new ReadableStream<Uint8Array>({
start(controller) {
controller.enqueue(encoder.encode(text));
controller.close();
},
});
}
async function readStreamLines(
stream: ReadableStream<Uint8Array>,
): Promise<string[]> {
const decoder = new TextDecoder();
const reader = stream.getReader();
let result = "";
while (true) {
const { done, value } = await reader.read();
if (done) break;
result += decoder.decode(value, { stream: true });
}
return result.split("\n").filter((l) => l.length > 0);
}
describe("createEmptyToolArgsFinalizer", () => {
it("should inject {} for parameterless tools when finish_reason is tool_calls", async () => {
const sseLines = [
`data: {"id":"gen-1","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"id":"call_1","type":"function","function":{"name":"get_current_tab","arguments":""}}]},"finish_reason":null}]}`,
`data: {"id":"gen-1","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":""}}]},"finish_reason":null}]}`,
`data: {"id":"gen-1","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":""}}]},"finish_reason":null}]}`,
`data: {"id":"gen-1","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":"","role":"assistant"},"finish_reason":"tool_calls"}]}`,
`data: [DONE]`,
];
const input = sseLinesToStream(sseLines);
const output = createEmptyToolArgsFinalizer(input);
const outputLines = await readStreamLines(output);
const dataLines = outputLines.filter(
(l) => l.startsWith("data: ") && l !== "data: [DONE]",
);
// Should have 5 data lines: 3 original + 1 synthetic + 1 finish
expect(dataLines.length).toBe(5);
// The synthetic line should be right before the finish line
const syntheticLine = dataLines[3]!;
const syntheticData = JSON.parse(syntheticLine.slice(6));
expect(syntheticData.choices[0].delta.tool_calls[0].index).toBe(0);
expect(
syntheticData.choices[0].delta.tool_calls[0].function.arguments,
).toBe("{}");
// The finish line should still be present and unchanged
const finishLine = dataLines[4]!;
const finishData = JSON.parse(finishLine.slice(6));
expect(finishData.choices[0].finish_reason).toBe("tool_calls");
});
it("should NOT inject {} for tools that have real arguments", async () => {
const sseLines = [
`data: {"id":"gen-2","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"id":"call_1","type":"function","function":{"name":"search_elements","arguments":""}}]},"finish_reason":null}]}`,
`data: {"id":"gen-2","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":""}}]},"finish_reason":null}]}`,
`data: {"id":"gen-2","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":"{\\"tabId\\": "}}]},"finish_reason":null}]}`,
`data: {"id":"gen-2","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":"127183"}}]},"finish_reason":null}]}`,
`data: {"id":"gen-2","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":"9286"}}]},"finish_reason":null}]}`,
`data: {"id":"gen-2","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":", \\"query\\""}}]},"finish_reason":null}]}`,
`data: {"id":"gen-2","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":": \\"button*\\""}}]},"finish_reason":null}]}`,
`data: {"id":"gen-2","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":", \\"contextLevels\\": 1}"}}]},"finish_reason":null}]}`,
`data: {"id":"gen-2","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":"","role":"assistant"},"finish_reason":"tool_calls"}]}`,
`data: [DONE]`,
];
const input = sseLinesToStream(sseLines);
const output = createEmptyToolArgsFinalizer(input);
const outputLines = await readStreamLines(output);
const dataLines = outputLines.filter(
(l) => l.startsWith("data: ") && l !== "data: [DONE]",
);
// No synthetic line should be injected -- same count as input data lines
expect(dataLines.length).toBe(9);
// All lines should pass through unchanged
for (let i = 0; i < sseLines.length - 1; i++) {
// Skip "data: [DONE]" comparison
if (sseLines[i] === "data: [DONE]") continue;
expect(outputLines[i]).toBe(sseLines[i]);
}
});
it("should handle multiple parameterless tools in a single response", async () => {
const sseLines = [
`data: {"id":"gen-3","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"id":"call_1","type":"function","function":{"name":"get_current_tab","arguments":""}}]},"finish_reason":null}]}`,
`data: {"id":"gen-3","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":""}}]},"finish_reason":null}]}`,
`data: {"id":"gen-3","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":""}}]},"finish_reason":null}]}`,
`data: {"id":"gen-3","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":1,"id":"call_2","type":"function","function":{"name":"get_all_tabs","arguments":""}}]},"finish_reason":null}]}`,
`data: {"id":"gen-3","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":1,"function":{"arguments":""}}]},"finish_reason":null}]}`,
`data: {"id":"gen-3","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":1,"function":{"arguments":""}}]},"finish_reason":null}]}`,
`data: {"id":"gen-3","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":"","role":"assistant"},"finish_reason":"tool_calls"}]}`,
`data: [DONE]`,
];
const input = sseLinesToStream(sseLines);
const output = createEmptyToolArgsFinalizer(input);
const outputLines = await readStreamLines(output);
const dataLines = outputLines.filter(
(l) => l.startsWith("data: ") && l !== "data: [DONE]",
);
// 7 original data lines + 2 synthetic (one per tool) = 9
expect(dataLines.length).toBe(9);
// The two synthetic lines should be injected before the finish chunk
// Find synthetic lines (they have function.arguments === "{}")
const syntheticLines = dataLines.filter((line) => {
const data = JSON.parse(line.slice(6));
const tc = data.choices?.[0]?.delta?.tool_calls?.[0];
return tc?.function?.arguments === "{}";
});
expect(syntheticLines.length).toBe(2);
const syntheticIndices = syntheticLines.map((line) => {
const data = JSON.parse(line.slice(6));
return data.choices[0].delta.tool_calls[0].index;
});
expect(syntheticIndices).toContain(0);
expect(syntheticIndices).toContain(1);
// Finish line should be the very last data line
const lastDataLine = dataLines[dataLines.length - 1]!;
const lastData = JSON.parse(lastDataLine.slice(6));
expect(lastData.choices[0].finish_reason).toBe("tool_calls");
});
it("should pass through non-tool-call streams unchanged", async () => {
const sseLines = [
`data: {"id":"gen-4","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":"Hello","role":"assistant"},"finish_reason":null}]}`,
`data: {"id":"gen-4","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":" world"},"finish_reason":"stop"}]}`,
`data: [DONE]`,
];
const input = sseLinesToStream(sseLines);
const output = createEmptyToolArgsFinalizer(input);
const outputLines = await readStreamLines(output);
expect(outputLines).toEqual(sseLines);
});
it("should work with exact real-world SSE data (double-newline separators, get_all_tabs)", async () => {
// Exact SSE data from the user's bug report, using real \n\n SSE separators
const rawSSE =
`data: {"id":"gen-1772969079-EhEx5DeV7JqM43lpl47Y","object":"chat.completion.chunk","created":1772969079,"model":"anthropic/claude-4.5-haiku-20251001","provider":"Amazon Bedrock","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"id":"toolu_bdrk_01AMXFNuQYF6fxS1hryPtu9K","type":"function","function":{"name":"get_all_tabs","arguments":""}}]},"finish_reason":null,"native_finish_reason":null}]}\n` +
`\n` +
`data: {"id":"gen-1772969079-EhEx5DeV7JqM43lpl47Y","object":"chat.completion.chunk","created":1772969079,"model":"anthropic/claude-4.5-haiku-20251001","provider":"Amazon Bedrock","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":""}}]},"finish_reason":null,"native_finish_reason":null}]}\n` +
`\n` +
`data: {"id":"gen-1772969079-EhEx5DeV7JqM43lpl47Y","object":"chat.completion.chunk","created":1772969079,"model":"anthropic/claude-4.5-haiku-20251001","provider":"Amazon Bedrock","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":""}}]},"finish_reason":null,"native_finish_reason":null}]}\n` +
`\n` +
`data: {"id":"gen-1772969079-EhEx5DeV7JqM43lpl47Y","object":"chat.completion.chunk","created":1772969079,"model":"anthropic/claude-4.5-haiku-20251001","provider":"Amazon Bedrock","choices":[{"index":0,"delta":{"content":"","role":"assistant"},"finish_reason":"tool_calls","native_finish_reason":"tool_calls"}]}\n` +
`\n` +
`data: {"id":"gen-1772969079-EhEx5DeV7JqM43lpl47Y","object":"chat.completion.chunk","created":1772969079,"model":"anthropic/claude-4.5-haiku-20251001","provider":"Amazon Bedrock","choices":[],"usage":{"prompt_tokens":25105,"completion_tokens":55,"total_tokens":25160}}\n` +
`\n` +
`data: [DONE]\n`;
const encoder = new TextEncoder();
const input = new ReadableStream<Uint8Array>({
start(controller) {
controller.enqueue(encoder.encode(rawSSE));
controller.close();
},
});
const output = createEmptyToolArgsFinalizer(input);
const outputLines = await readStreamLines(output);
const dataLines = outputLines.filter(
(l) => l.startsWith("data: ") && l !== "data: [DONE]",
);
// Find the synthetic line with arguments "{}"
const syntheticLines = dataLines.filter((line) => {
try {
const data = JSON.parse(line.slice(6));
const tc = data.choices?.[0]?.delta?.tool_calls?.[0];
return tc?.function?.arguments === "{}";
} catch {
return false;
}
});
expect(syntheticLines.length).toBe(1);
const syntheticData = JSON.parse(syntheticLines[0]!.slice(6));
expect(syntheticData.choices[0].delta.tool_calls[0].index).toBe(0);
expect(
syntheticData.choices[0].delta.tool_calls[0].function.arguments,
).toBe("{}");
// The finish line should still be present
const finishLines = dataLines.filter((line) => {
try {
const data = JSON.parse(line.slice(6));
return data.choices?.[0]?.finish_reason === "tool_calls";
} catch {
return false;
}
});
expect(finishLines.length).toBe(1);
});
it("should produce valid individually-parseable SSE events (no event boundary merging)", async () => {
// Regression test: synthetic chunks must be terminated by a blank line so that
// EventSourceParserStream treats them as separate events from the finish chunk.
// Without the fix, synthetic + finish were emitted as consecutive data: lines in one
// event, producing "{synthetic_json}\n{finish_json}" which is not valid JSON.
const rawSSE =
`data: {"id":"gen-1","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"id":"call_1","type":"function","function":{"name":"get_all_tabs","arguments":""}}]},"finish_reason":null}]}\n` +
`\n` +
`data: {"id":"gen-1","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":""}}]},"finish_reason":null}]}\n` +
`\n` +
`data: {"id":"gen-1","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":"","role":"assistant"},"finish_reason":"tool_calls"}]}\n` +
`\n` +
`data: [DONE]\n`;
const encoder = new TextEncoder();
const input = new ReadableStream<Uint8Array>({
start(controller) {
controller.enqueue(encoder.encode(rawSSE));
controller.close();
},
});
const output = createEmptyToolArgsFinalizer(input);
// Read entire output as text and split into SSE events by double-newline,
// matching how EventSourceParserStream works.
const decoder = new TextDecoder();
const reader = output.getReader();
let text = "";
while (true) {
const { done, value } = await reader.read();
if (done) break;
text += decoder.decode(value, { stream: true });
}
// Split by blank lines to get raw SSE events (same as EventSourceParserStream does)
const rawEvents = text.split(/\n\n+/).filter((e) => e.trim().length > 0);
const dataEvents = rawEvents
.map((raw) => {
const dataLines = raw
.split("\n")
.filter((l) => l.startsWith("data: "))
.map((l) => l.slice(6));
return dataLines.join("\n");
})
.filter((d) => d.length > 0 && d !== "[DONE]");
// Should have: 2 original tool_call chunks + 1 synthetic + 1 finish = 4
expect(dataEvents.length).toBe(4);
// Every event must contain individually valid JSON (not two JSON objects merged)
for (const eventData of dataEvents) {
expect(
() => JSON.parse(eventData),
`Expected valid JSON but got: ${eventData}`,
).not.toThrow();
}
// The synthetic event must be its own event with arguments: "{}"
const syntheticIdx = dataEvents.findIndex((d) => {
const parsed = JSON.parse(d);
return (
parsed.choices?.[0]?.delta?.tool_calls?.[0]?.function?.arguments ===
"{}"
);
});
expect(syntheticIdx).toBeGreaterThanOrEqual(0);
expect(
JSON.parse(dataEvents[syntheticIdx]!).choices[0].finish_reason,
).toBeNull();
// The finish event must be a separate event after the synthetic one
const finishIdx = dataEvents.findIndex((d) => {
const parsed = JSON.parse(d);
return parsed.choices?.[0]?.finish_reason === "tool_calls";
});
expect(finishIdx).toBeGreaterThan(syntheticIdx);
});
it("should work with chunked delivery (data arriving in small pieces)", async () => {
const rawSSE =
`data: {"id":"gen-c","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"id":"call_c1","type":"function","function":{"name":"get_all_tabs","arguments":""}}]},"finish_reason":null}]}\n` +
`\n` +
`data: {"id":"gen-c","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"index":0,"function":{"arguments":""}}]},"finish_reason":null}]}\n` +
`\n` +
`data: {"id":"gen-c","object":"chat.completion.chunk","created":1,"model":"test","choices":[{"index":0,"delta":{"content":"","role":"assistant"},"finish_reason":"tool_calls"}]}\n` +
`\n` +
`data: [DONE]\n`;
const encoder = new TextEncoder();
const bytes = encoder.encode(rawSSE);
// Deliver in chunks of 37 bytes (deliberately awkward size to split mid-line)
const chunkSize = 37;
const input = new ReadableStream<Uint8Array>({
start(controller) {
for (let i = 0; i < bytes.length; i += chunkSize) {
controller.enqueue(
bytes.slice(i, Math.min(i + chunkSize, bytes.length)),
);
}
controller.close();
},
});
const output = createEmptyToolArgsFinalizer(input);
const outputLines = await readStreamLines(output);
const dataLines = outputLines.filter(
(l) => l.startsWith("data: ") && l !== "data: [DONE]",
);
// Find synthetic line
const syntheticLines = dataLines.filter((line) => {
try {
const data = JSON.parse(line.slice(6));
const tc = data.choices?.[0]?.delta?.tool_calls?.[0];
return tc?.function?.arguments === "{}";
} catch {
return false;
}
});
expect(syntheticLines.length).toBe(1);
// Finish line must still be present
const finishLines = dataLines.filter((line) => {
try {
const data = JSON.parse(line.slice(6));
return data.choices?.[0]?.finish_reason === "tool_calls";
} catch {
return false;
}
});
expect(finishLines.length).toBe(1);
});
});

View File

@@ -121,6 +121,146 @@ export function createAIProvider(settings: AppSettings) {
}
}
/**
* Stateful SSE stream transform that fixes parameterless tool calls from
* providers like Anthropic via OpenRouter/proxy.
*
* Some providers stream tool_calls with `"arguments":""` for every chunk when
* the tool has no parameters. The AI SDK uses `isParsableJson` to decide when
* a tool call is complete, and `""` never passes that check, so the tool call
* is silently dropped.
*
* A naive text-replacement of `""` → `"{}"` on every chunk would break tools
* that DO have arguments (the first empty chunk would be treated as complete
* `{}`, and all subsequent real-argument chunks would be discarded).
*
* This transform tracks tool call state across the stream:
* - Passes all SSE lines through **unchanged** during streaming
* - When `finish_reason: "tool_calls"` arrives, injects a synthetic SSE chunk
* with `"arguments":"{}"` for every tool call whose accumulated arguments
* are still empty — right before the finish chunk
*/
export function createEmptyToolArgsFinalizer(
original: ReadableStream<Uint8Array>,
): ReadableStream<Uint8Array> {
const decoder = new TextDecoder();
const encoder = new TextEncoder();
let buffer = "";
// Track accumulated arguments per tool call index
const toolCallArgs = new Map<
number,
{ id: string; name: string; args: string }
>();
// Capture the chunk id so synthetic events look like they belong to the same response
let streamId: string | undefined;
function processLine(
line: string,
controller: ReadableStreamDefaultController<Uint8Array>,
) {
if (!line.startsWith("data: ") || line === "data: [DONE]") {
controller.enqueue(encoder.encode(`${line}\n`));
return;
}
let parsed: any;
try {
parsed = JSON.parse(line.slice(6));
} catch {
controller.enqueue(encoder.encode(`${line}\n`));
return;
}
if (!streamId && parsed.id) {
streamId = parsed.id;
}
const choice = parsed.choices?.[0];
// Track tool call arguments
const toolCalls = choice?.delta?.tool_calls;
if (Array.isArray(toolCalls)) {
for (const tc of toolCalls) {
const idx = tc.index;
if (typeof idx !== "number") continue;
const existing = toolCallArgs.get(idx);
if (!existing) {
toolCallArgs.set(idx, {
id: tc.id ?? "",
name: tc.function?.name ?? "",
args: tc.function?.arguments ?? "",
});
} else {
if (tc.function?.arguments != null) {
existing.args += tc.function.arguments;
}
}
}
}
// When finish_reason is tool_calls, inject synthetic chunks for empty args
if (choice?.finish_reason === "tool_calls") {
for (const [idx, tc] of toolCallArgs) {
if (tc.args === "") {
const synthetic = {
id: streamId ?? parsed.id ?? "",
object: "chat.completion.chunk",
created: parsed.created ?? 0,
model: parsed.model ?? "",
choices: [
{
index: 0,
delta: {
tool_calls: [
{
index: idx,
function: { arguments: "{}" },
},
],
},
finish_reason: null,
},
],
};
controller.enqueue(
encoder.encode(`data: ${JSON.stringify(synthetic)}\n\n`),
);
}
}
}
controller.enqueue(encoder.encode(`${line}\n`));
}
return new ReadableStream<Uint8Array>({
async start(controller) {
const reader = original.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) {
if (buffer.length > 0) {
processLine(buffer, controller);
}
controller.close();
break;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop()!;
for (const line of lines) {
processLine(line, controller);
}
}
} catch (err) {
controller.error(err);
}
},
});
}
/**
* Create an AI SDK provider for proxy mode (non-BYOK).
*
@@ -128,26 +268,31 @@ export function createAIProvider(settings: AppSettings) {
* requests and authenticates via session cookies.
*/
export function createProxyProvider(): OpenAIProvider["chat"] {
// The proxy endpoint is OpenAI-compatible.
// We pass an empty API key because auth is handled by cookies injected in
// a custom fetch wrapper.
const openai = createOpenAI({
apiKey: "proxy-no-key",
baseURL: PROXY_API_URL,
// Custom fetch that injects cookie headers for authentication
fetch: async (input, init) => {
const cookieHeader = await getProxyCookieHeader();
const headers = new Headers(init?.headers);
if (cookieHeader) {
headers.set("Cookie", cookieHeader);
}
// Remove the Authorization header proxy uses cookies, not API keys
headers.delete("Authorization");
return globalThis.fetch(input, { ...init, headers });
const response = await globalThis.fetch(input, { ...init, headers });
const contentType = response.headers.get("content-type") ?? "";
if (contentType.includes("text/event-stream") && response.body) {
const patched = createEmptyToolArgsFinalizer(response.body);
return new Response(patched, {
status: response.status,
statusText: response.statusText,
headers: response.headers,
});
}
return response;
},
});
// Return the chat sub-provider to force Chat Completions API (/completions)
// instead of the default Responses API (/responses) used by AI SDK v5+
return openai.chat;
}

View File

@@ -27,7 +27,7 @@
--accent: oklch(0.97 0 0);
--accent-foreground: oklch(0.205 0 0);
--destructive: oklch(0.577 0.245 27.325);
--destructive-foreground: oklch(0.577 0.245 27.325);
--destructive-foreground: oklch(0.985 0 0);
--border: oklch(0.922 0 0);
--input: oklch(0.922 0 0);
--ring: oklch(0.708 0 0);

View File

@@ -35,16 +35,17 @@ import {
getAllTabsTool,
getCurrentTabTool,
getTabInfoTool,
switchToTabTool,
ungroupTabsTool,
} from "./tab";
import { downloadChatImagesTool, downloadImageTool } from "./tools/downloads";
import { uploadFileToInputTool } from "./tools/upload-file";
/**
* All browser tools registered for AI use
* Total: 32 tools (28 core + 4 intervention tools)
* Total: 34 tools (30 core + 4 intervention tools)
*
* Disabled tools (per aipex):
* - switch_to_tab (causes context switching issues)
* - duplicate_tab (not in aipex)
* - wait (replaced by computer tool's wait action)
* - capture_screenshot_to_clipboard (not enabled in aipex default bundle)
@@ -61,22 +62,24 @@ type BrowserFunctionTool = FunctionTool<
>;
const browserFunctionTools: BrowserFunctionTool[] = [
// Browser/Tab Management (6 tools)
// Browser/Tab Management (7 tools)
// Note: organize_tabs temporarily disabled (stub/not shipped)
getAllTabsTool,
getCurrentTabTool,
switchToTabTool,
createNewTabTool,
getTabInfoTool,
closeTabTool,
ungroupTabsTool,
// UI Operations (7 tools) - computer tool replaces visual XY tools
// UI Operations (8 tools) - computer tool replaces visual XY tools
searchElementsTool,
clickTool,
fillElementByUidTool,
getEditorValueTool,
fillFormTool,
hoverElementByUidTool,
uploadFileToInputTool,
computerTool,
// Page Content (4 tools)

View File

@@ -0,0 +1,160 @@
import { tool } from "@aipexstudio/aipex-core";
import { z } from "zod";
import { CdpCommander } from "../../../automation/cdp-commander";
import { debuggerManager } from "../../../automation/debugger-manager";
/**
* Resolve the target file input element on the page.
*
* Strategy:
* 1. If uid provided: try UID-based lookup via [data-aipex-nodeid]
* 2. Fallback: CSS querySelectorAll('input[type=file]')[inputIndex]
* This finds ALL file inputs including display:none hidden ones.
*/
async function resolveFileInputNodeId(
_tabId: number,
cdp: CdpCommander,
rootNodeId: number,
uid: string | undefined,
inputIndex: number,
): Promise<{ nodeId: number; error?: string }> {
if (uid) {
const esc = uid.replace(/\\/g, "\\\\").replace(/"/g, '\\"');
const result = await cdp.sendCommand<{ nodeId: number }>(
"DOM.querySelector",
{
nodeId: rootNodeId,
selector: `[data-aipex-nodeid="${esc}"]`,
},
);
if (result.nodeId) {
return { nodeId: result.nodeId };
}
}
const result = await cdp.sendCommand<{ nodeIds: number[] }>(
"DOM.querySelectorAll",
{ nodeId: rootNodeId, selector: "input[type=file]" },
);
const nodeIds = result.nodeIds ?? [];
if (nodeIds.length === 0) {
return {
nodeId: 0,
error: 'No <input type="file"> found on this page',
};
}
if (inputIndex >= nodeIds.length) {
return {
nodeId: 0,
error: `input_index ${inputIndex} out of range — page has ${nodeIds.length} file input(s)`,
};
}
return { nodeId: nodeIds[inputIndex]! };
}
export const uploadFileToInputTool = tool({
name: "upload_file_to_input",
description: `Upload a file to a file input element (<input type="file">) on the page using a local file path.
Uses Chrome DevTools Protocol to set the file directly — no file content is read into memory.
WORKFLOW:
1. Provide the tabId and a local file_path
2. The tool automatically finds the file input (including hidden ones)
3. If the page has multiple file inputs, use input_index to select which one (0 = first)
4. Optionally provide uid from a snapshot if you know the exact element
NOTE: Most websites hide the actual <input type="file"> behind a styled button. This tool handles both visible and hidden file inputs automatically.
AFTER UPLOAD: take a screenshot to verify the file was accepted, then proceed to submit the form.`,
parameters: z.object({
tabId: z
.number()
.describe("The ID of the tab containing the file input element"),
file_path: z
.string()
.describe(
"Absolute local file path to upload (e.g. '/Users/me/resume.pdf'). " +
"Chrome reads the file natively via CDP — no file content is sent to the AI.",
),
uid: z
.string()
.optional()
.describe(
"UID of the <input type='file'> element from the page snapshot. " +
"OPTIONAL — if omitted, the tool automatically finds the file input.",
),
input_index: z
.number()
.optional()
.describe(
"0-based index to select which file input to target when the page has multiple. " +
"Defaults to 0. Only used when uid is not provided or not found.",
),
}),
execute: async ({ tabId, file_path, uid, input_index }) => {
const inputIndex = input_index ?? 0;
const attached = await debuggerManager.safeAttachDebugger(tabId);
if (!attached) {
return { success: false, message: "Failed to attach debugger to tab" };
}
const cdp = new CdpCommander(tabId);
try {
await cdp.sendCommand("DOM.enable", {});
const { root } = (await cdp.sendCommand("DOM.getDocument", {
depth: 0,
})) as { root: { nodeId: number } };
const resolved = await resolveFileInputNodeId(
tabId,
cdp,
root.nodeId,
uid,
inputIndex,
);
if (!resolved.nodeId) {
return {
success: false,
message: resolved.error ?? "File input element not found",
};
}
await cdp.sendCommand("DOM.setFileInputFiles", {
nodeId: resolved.nodeId,
files: [file_path],
});
const filename = file_path.split(/[\\/]/).pop() ?? file_path;
return {
success: true,
message: `File "${filename}" successfully uploaded to the file input element`,
filename,
};
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
if (msg.includes("No node") || msg.toLowerCase().includes("nodeid")) {
return {
success: false,
message:
"File input element not found. Use search_elements to verify the element exists.",
};
}
if (
msg.includes("File not found") ||
msg.includes("ENOENT") ||
msg.includes("no such file")
) {
return {
success: false,
message: `Local file not found: ${file_path}`,
};
}
return { success: false, message: `CDP error: ${msg}` };
}
},
});

View File

@@ -40,8 +40,8 @@
"license": "MIT",
"type": "module",
"dependencies": {
"@openai/agents": "^0.4.3",
"@openai/agents-extensions": "^0.4.10",
"@openai/agents": "^0.5.4",
"@openai/agents-extensions": "^0.5.4",
"lru-cache": "^11.2.4",
"zod": "^4.3.6"
},

View File

@@ -901,6 +901,49 @@ describe("AIPex", () => {
}
});
it("should default empty-string arguments to empty object", async () => {
vi.mocked(run).mockResolvedValue(
createMockRunResult({
finalOutput: "",
streamEvents: [
{
type: "run_item_stream_event",
name: "tool_called",
item: { rawItem: { name: "screenshot", arguments: "" } },
},
],
}),
);
const agent = AIPex.create({
instructions: "Tools",
model: mockModel,
});
const events: AgentEvent[] = [];
for await (const event of agent.chat("take screenshot")) {
events.push(event);
}
const toolStart = events.find(
(event) => event.type === "tool_call_start",
);
expect(toolStart).toBeDefined();
if (toolStart?.type === "tool_call_start") {
expect(toolStart.toolName).toBe("screenshot");
expect(toolStart.params).toEqual({});
}
const argsComplete = events.find(
(event) => event.type === "tool_call_args_streaming_complete",
);
expect(argsComplete).toBeDefined();
if (argsComplete?.type === "tool_call_args_streaming_complete") {
expect(argsComplete.toolName).toBe("screenshot");
expect(argsComplete.params).toEqual({});
}
});
it("should emit tool lifecycle events", async () => {
vi.mocked(run).mockResolvedValue(
createMockRunResult({

View File

@@ -151,6 +151,9 @@ export class AIPex {
});
let streamedOutput = "";
let toolCallsDetectedInRaw = 0;
let toolCallsEmittedByRunner = 0;
for await (const streamEvent of result) {
if (streamEvent.type === "raw_model_stream_event") {
// New response boundary: reset per-response tool args tracking.
@@ -162,6 +165,30 @@ export class AIPex {
continue;
}
// Log response_done events for debugging tool call assembly
if (
(streamEvent.data as unknown as { type?: string })?.type ===
"response_done"
) {
const response = (
streamEvent.data as unknown as {
response?: { output?: unknown[] };
}
)?.response;
const outputItems = response?.output;
if (Array.isArray(outputItems)) {
const functionCalls = outputItems.filter(
(item: any) => item?.type === "function_call",
);
if (functionCalls.length > 0) {
console.log(
`[AIPex] response_done contains ${functionCalls.length} function_call(s):`,
functionCalls.map((fc: any) => fc.name),
);
}
}
}
// Best-effort: detect tool call argument streaming from raw provider events.
// For OpenAI ChatCompletions streaming, the raw chunk is available under
// streamEvent.data.type === "model" with a shape like:
@@ -175,6 +202,7 @@ export class AIPex {
const delta = Array.isArray(choices) ? choices?.[0]?.delta : null;
const toolCalls = delta?.tool_calls;
if (Array.isArray(toolCalls)) {
toolCallsDetectedInRaw++;
for (const tcDelta of toolCalls) {
const index = tcDelta?.index;
if (typeof index !== "number") continue;
@@ -220,6 +248,7 @@ export class AIPex {
// Emit tool args "complete" right before the tool call starts, so UIs can
// show a "parameters ready" transition even if they couldn't observe args streaming.
if (streamEvent.name === "tool_called") {
toolCallsEmittedByRunner++;
const toolName = this.extractToolName(streamEvent.item);
const params = this.extractToolArguments(streamEvent.item);
const argsCompleteEvent: AgentEvent = {
@@ -239,6 +268,13 @@ export class AIPex {
}
}
if (toolCallsDetectedInRaw > 0 || toolCallsEmittedByRunner > 0) {
console.log(
`[AIPex] Stream complete: ${toolCallsDetectedInRaw} raw tool_call chunks, ` +
`${toolCallsEmittedByRunner} runner tool_called events`,
);
}
const finalOutput =
typeof result.finalOutput === "string" && result.finalOutput.length > 0
? result.finalOutput
@@ -395,6 +431,46 @@ export class AIPex {
yield* this.runExecution(finalInput, session);
}
/**
* Roll back the session to the state just after the last user message,
* removing any assistant/tool items that followed it.
* Used by regenerate to avoid duplicate history when re-running.
*/
async rollbackLastAssistantTurn(sessionId: string): Promise<boolean> {
if (!this.conversationManager) return false;
const session = await this.conversationManager.getSession(sessionId);
if (!session) return false;
const items = await session.getItems();
if (items.length === 0) return false;
let lastUserIndex = -1;
for (let i = items.length - 1; i >= 0; i--) {
const item = items[i] as Record<string, unknown>;
// AgentInputItem is a discriminated union; user messages have
// type === "message" (or undefined) and role === "user".
const isUserMessage =
(item.type === "message" || item.type === undefined) &&
item.role === "user";
if (isUserMessage) {
lastUserIndex = i;
break;
}
}
if (lastUserIndex === -1) return false;
if (lastUserIndex === items.length - 1) return false;
const itemsToRemove = items.length - 1 - lastUserIndex;
for (let i = 0; i < itemsToRemove; i++) {
await session.popItem();
}
await this.conversationManager.saveSession(session);
return true;
}
getConversationManager(): ConversationManager | undefined {
return this.conversationManager;
}
@@ -457,6 +533,7 @@ export class AIPex {
const raw = item as unknown as { rawItem?: { arguments?: unknown } };
const args = raw.rawItem?.arguments;
if (typeof args === "string") {
if (args === "") return {};
const parsed = safeJsonParse<unknown>(args);
if (parsed !== undefined) return parsed;
return args;

128
pnpm-lock.yaml generated
View File

@@ -181,8 +181,8 @@ importers:
specifier: ^3.0.22
version: 3.0.22(zod@4.3.6)
'@ai-sdk/openai':
specifier: ^3.0.25
version: 3.0.25(zod@4.3.6)
specifier: ^3.0.41
version: 3.0.41(zod@4.3.6)
'@ai-sdk/openai-compatible':
specifier: ^2.0.18
version: 2.0.21(zod@4.3.6)
@@ -408,11 +408,11 @@ importers:
specifier: ^3.0.0
version: 3.0.10(zod@4.3.6)
'@openai/agents':
specifier: ^0.4.3
version: 0.4.3(ws@8.19.0)(zod@4.3.6)
specifier: ^0.5.4
version: 0.5.4(ws@8.19.0)(zod@4.3.6)
'@openai/agents-extensions':
specifier: ^0.4.10
version: 0.4.10(@ai-sdk/provider@3.0.8)(@openai/agents@0.4.3(ws@8.19.0)(zod@4.3.6))(ai@6.0.105(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
specifier: ^0.5.4
version: 0.5.4(@ai-sdk/provider@3.0.8)(@openai/agents@0.5.4(ws@8.19.0)(zod@4.3.6))(ai@6.0.105(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@openrouter/ai-sdk-provider':
specifier: ^2.0.0
version: 2.1.1(ai@6.0.105(zod@4.3.6))(zod@4.3.6)
@@ -489,6 +489,12 @@ packages:
peerDependencies:
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/openai@3.0.41':
resolution: {integrity: sha512-IZ42A+FO+vuEQCVNqlnAPYQnnUpUfdJIwn1BEDOBywiEHa23fw7PahxVtlX9zm3/zMvTW4JKPzWyvAgDu+SQ2A==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/provider-utils@4.0.10':
resolution: {integrity: sha512-VeDAiCH+ZK8Xs4hb9Cw7pHlujWNL52RKe8TExOkrw6Ir1AmfajBZTb9XUdKOZO08RwQElIKA8+Ltm+Gqfo8djQ==}
engines: {node: '>=18'}
@@ -519,6 +525,12 @@ packages:
peerDependencies:
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/provider-utils@4.0.19':
resolution: {integrity: sha512-3eG55CrSWCu2SXlqq2QCsFjo3+E7+Gmg7i/oRVoSZzIodTuDSfLb3MRje67xE9RFea73Zao7Lm4mADIfUETKGg==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/provider-utils@4.0.4':
resolution: {integrity: sha512-VxhX0B/dWGbpNHxrKCWUAJKXIXV015J4e7qYjdIU9lLWeptk0KMLGcqkB4wFxff5Njqur8dt8wRi1MN9lZtDqg==}
engines: {node: '>=18'}
@@ -1181,24 +1193,16 @@ packages:
resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==}
engines: {node: '>= 8'}
'@openai/agents-core@0.4.10':
resolution: {integrity: sha512-U2uu22OZGFZ53Ogm5Qtzymg1Oc1FFNdkh+fg0QWDJ7mERQU5G4LzhbTiwS/jylVgKPj74e2uBb8oj/X5rHwxDQ==}
'@openai/agents-core@0.5.4':
resolution: {integrity: sha512-qAT9zGIIM7GT5/WGkLpp8Fuar7NL5qu30b5+o2jP3mE6aMfx9OZjdj0za/iYLeV5kzQ5pOcbvRXenfzHrhvd/A==}
peerDependencies:
zod: ^4.0.0
peerDependenciesMeta:
zod:
optional: true
'@openai/agents-core@0.4.3':
resolution: {integrity: sha512-2cz7ptgdHp2NzoN9uImBypRmR+e8ExvBXhxdDnNE6Cw0R8LQxd/KfdDupg7BB1inleOKLgDoasBXD7jRZ0iSQA==}
peerDependencies:
zod: ^4.0.0
peerDependenciesMeta:
zod:
optional: true
'@openai/agents-extensions@0.4.10':
resolution: {integrity: sha512-5pOeoP1tdTd1gTeqwFlW0QQqHE/DwkJ3Xd/axj4iw/bHQomWrevu+EqmFjywvKce65Rmb90HRckx7TpkvOdnbg==}
'@openai/agents-extensions@0.5.4':
resolution: {integrity: sha512-YqS/5VZX4LAtThjujvJcZU7ck2WeyH6/C+69ErbbVzLhPxyg13TAY1556RsEi9A/pmkS0lwWIyMZ7x660CHFzA==}
peerDependencies:
'@ai-sdk/provider': ^2.0.0 || ^3.0.0
'@openai/agents': '>=0.0.0'
@@ -1214,18 +1218,18 @@ packages:
ai:
optional: true
'@openai/agents-openai@0.4.3':
resolution: {integrity: sha512-dBQDr6IJR9+h/7+0Xn0CQhBbVWvn3PE3a56WuwbKr3cYHU9QxUbVrsRprx3ERboww2qy2D9Hk6Cik2q9MHDnZg==}
'@openai/agents-openai@0.5.4':
resolution: {integrity: sha512-1uDEu9iwM7oB3oWNxvT/yzkcr7WtjHe1ekbQOAsasEv9S0MKTT8uP2kknRVgxzgw+awTZBrhO2vfGhD1iKinuQ==}
peerDependencies:
zod: ^4.0.0
'@openai/agents-realtime@0.4.3':
resolution: {integrity: sha512-hfe69MfEM0KRN/auLzca7UlMl8mcNMhnIQfOb01R0SbFpU1yl332kQEZw3a2o+n9YRCVAmP6QKuJjejsZwqbvg==}
'@openai/agents-realtime@0.5.4':
resolution: {integrity: sha512-qlrhMWD3Xpzfrxplt/jvc1nlGtjNnRmyzgRAj6J5HX/bcnP0W4UdYHEJOreiIC8inj27kcVjQslyu0DAjVuXsA==}
peerDependencies:
zod: ^4.0.0
'@openai/agents@0.4.3':
resolution: {integrity: sha512-vj1vRkmhex4r+aQ3EjI9s9u4veFx/kT6NsZmj5qEvBTn8tk7HxPjZJR601dIVBISspH+dqh276I9e2thydPc3w==}
'@openai/agents@0.5.4':
resolution: {integrity: sha512-INstpf2vZ0rV6Zq9jcSzqq/oL2/D84YGGKCXnU2otAcQ0ji/VZm+zplDow/+oENnvKiXKdVtOrGMsXqNFL7W+Q==}
peerDependencies:
zod: ^4.0.0
@@ -4023,18 +4027,6 @@ packages:
onnxruntime-web@1.24.1:
resolution: {integrity: sha512-i2u395dv+ZEQBdH+aORvlu19Bzvlg5AXJ7wjxnL350hknOP9z0UeP3pVfjkpMEWMPy2T6nCQxetKTmNia6wSzg==}
openai@6.15.0:
resolution: {integrity: sha512-F1Lvs5BoVvmZtzkUEVyh8mDQPPFolq4F+xdsx/DO8Hee8YF3IGAlZqUIsF+DVGhqf4aU0a3bTghsxB6OIsRy1g==}
hasBin: true
peerDependencies:
ws: ^8.18.0
zod: ^3.25 || ^4.0
peerDependenciesMeta:
ws:
optional: true
zod:
optional: true
openai@6.22.0:
resolution: {integrity: sha512-7Yvy17F33Bi9RutWbsaYt5hJEEJ/krRPOrwan+f9aCPuMat1WVsb2VNSII5W1EksKT6fF69TG/xj4XzodK3JZw==}
hasBin: true
@@ -5170,6 +5162,12 @@ snapshots:
'@ai-sdk/provider-utils': 4.0.13(zod@4.3.6)
zod: 4.3.6
'@ai-sdk/openai@3.0.41(zod@4.3.6)':
dependencies:
'@ai-sdk/provider': 3.0.8
'@ai-sdk/provider-utils': 4.0.19(zod@4.3.6)
zod: 4.3.6
'@ai-sdk/provider-utils@4.0.10(zod@4.3.6)':
dependencies:
'@ai-sdk/provider': 3.0.5
@@ -5205,6 +5203,13 @@ snapshots:
eventsource-parser: 3.0.6
zod: 4.3.6
'@ai-sdk/provider-utils@4.0.19(zod@4.3.6)':
dependencies:
'@ai-sdk/provider': 3.0.8
'@standard-schema/spec': 1.1.0
eventsource-parser: 3.0.6
zod: 4.3.6
'@ai-sdk/provider-utils@4.0.4(zod@4.3.6)':
dependencies:
'@ai-sdk/provider': 3.0.2
@@ -5914,7 +5919,7 @@ snapshots:
'@nodelib/fs.scandir': 2.1.5
fastq: 1.19.1
'@openai/agents-core@0.4.10(ws@8.19.0)(zod@4.3.6)':
'@openai/agents-core@0.5.4(ws@8.19.0)(zod@4.3.6)':
dependencies:
debug: 4.4.3
openai: 6.22.0(ws@8.19.0)(zod@4.3.6)
@@ -5926,22 +5931,10 @@ snapshots:
- supports-color
- ws
'@openai/agents-core@0.4.3(ws@8.19.0)(zod@4.3.6)':
'@openai/agents-extensions@0.5.4(@ai-sdk/provider@3.0.8)(@openai/agents@0.5.4(ws@8.19.0)(zod@4.3.6))(ai@6.0.105(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)':
dependencies:
debug: 4.4.3
openai: 6.15.0(ws@8.19.0)(zod@4.3.6)
optionalDependencies:
'@modelcontextprotocol/sdk': 1.26.0(zod@4.3.6)
zod: 4.3.6
transitivePeerDependencies:
- '@cfworker/json-schema'
- supports-color
- ws
'@openai/agents-extensions@0.4.10(@ai-sdk/provider@3.0.8)(@openai/agents@0.4.3(ws@8.19.0)(zod@4.3.6))(ai@6.0.105(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)':
dependencies:
'@openai/agents': 0.4.3(ws@8.19.0)(zod@4.3.6)
'@openai/agents-core': 0.4.10(ws@8.19.0)(zod@4.3.6)
'@openai/agents': 0.5.4(ws@8.19.0)(zod@4.3.6)
'@openai/agents-core': 0.5.4(ws@8.19.0)(zod@4.3.6)
'@types/ws': 8.18.1
debug: 4.4.3
ws: 8.19.0
@@ -5953,20 +5946,20 @@ snapshots:
- '@cfworker/json-schema'
- supports-color
'@openai/agents-openai@0.4.3(ws@8.19.0)(zod@4.3.6)':
'@openai/agents-openai@0.5.4(ws@8.19.0)(zod@4.3.6)':
dependencies:
'@openai/agents-core': 0.4.3(ws@8.19.0)(zod@4.3.6)
'@openai/agents-core': 0.5.4(ws@8.19.0)(zod@4.3.6)
debug: 4.4.3
openai: 6.15.0(ws@8.19.0)(zod@4.3.6)
openai: 6.22.0(ws@8.19.0)(zod@4.3.6)
zod: 4.3.6
transitivePeerDependencies:
- '@cfworker/json-schema'
- supports-color
- ws
'@openai/agents-realtime@0.4.3(zod@4.3.6)':
'@openai/agents-realtime@0.5.4(zod@4.3.6)':
dependencies:
'@openai/agents-core': 0.4.3(ws@8.19.0)(zod@4.3.6)
'@openai/agents-core': 0.5.4(ws@8.19.0)(zod@4.3.6)
'@types/ws': 8.18.1
debug: 4.4.3
ws: 8.19.0
@@ -5977,13 +5970,13 @@ snapshots:
- supports-color
- utf-8-validate
'@openai/agents@0.4.3(ws@8.19.0)(zod@4.3.6)':
'@openai/agents@0.5.4(ws@8.19.0)(zod@4.3.6)':
dependencies:
'@openai/agents-core': 0.4.3(ws@8.19.0)(zod@4.3.6)
'@openai/agents-openai': 0.4.3(ws@8.19.0)(zod@4.3.6)
'@openai/agents-realtime': 0.4.3(zod@4.3.6)
'@openai/agents-core': 0.5.4(ws@8.19.0)(zod@4.3.6)
'@openai/agents-openai': 0.5.4(ws@8.19.0)(zod@4.3.6)
'@openai/agents-realtime': 0.5.4(zod@4.3.6)
debug: 4.4.3
openai: 6.15.0(ws@8.19.0)(zod@4.3.6)
openai: 6.22.0(ws@8.19.0)(zod@4.3.6)
zod: 4.3.6
transitivePeerDependencies:
- '@cfworker/json-schema'
@@ -6841,7 +6834,6 @@ snapshots:
'@types/node@25.3.0':
dependencies:
undici-types: 7.18.2
optional: true
'@types/parse-json@4.0.2': {}
@@ -6879,7 +6871,7 @@ snapshots:
'@types/ws@8.18.1':
dependencies:
'@types/node': 25.2.3
'@types/node': 25.3.0
'@types/yauzl@2.10.3':
dependencies:
@@ -8976,11 +8968,6 @@ snapshots:
platform: 1.3.6
protobufjs: 7.5.4
openai@6.15.0(ws@8.19.0)(zod@4.3.6):
optionalDependencies:
ws: 8.19.0
zod: 4.3.6
openai@6.22.0(ws@8.19.0)(zod@4.3.6):
optionalDependencies:
ws: 8.19.0
@@ -9945,8 +9932,7 @@ snapshots:
undici-types@7.16.0: {}
undici-types@7.18.2:
optional: true
undici-types@7.18.2: {}
undici@7.22.0: {}