fix(gateway): preserve batched client tool calls

This commit is contained in:
Peter Steinberger
2026-05-11 12:52:53 +01:00
parent 0e6aca34db
commit 1f49d34c5f
15 changed files with 402 additions and 24 deletions

View File

@@ -412,7 +412,7 @@ Docs: https://docs.openclaw.ai
- Channels/iMessage: honor `channels.imessage.groups.<chat_id>.systemPrompt` (and the `groups["*"]` wildcard) by forwarding it as `GroupSystemPrompt` on inbound group turns, mirroring the byte-identical resolver semantic from WhatsApp where defining the key as an empty string on a specific group suppresses the wildcard fallback. Brings iMessage to parity with the per-group `systemPrompt` pattern already supported by Discord, Telegram, IRC, Slack, GoogleChat, and the retired BlueBubbles channel. Fixes #78285. (#79383) Thanks @omarshahine.
- iMessage: add opt-in inbound catchup that replays messages received while the gateway was offline (crash, restart, mac sleep) on next startup. Enable with `channels.imessage.catchup.enabled: true`; tunables for `maxAgeMinutes`, `perRunLimit`, `firstRunLookbackMinutes`, and `maxFailureRetries`. Persists a per-account cursor under the OpenClaw state dir (`<openclawStateDir>/imessage/catchup/`), replays each row through the live dispatch path so allowlists/group policy/dedupe behave identically on replayed and live messages, and force-advances past wedged guids after `maxFailureRetries` to prevent stuck cursors. Extends the persisted echo-cache retention window so the agent's own outbound rows from before a gap are not re-fed as inbound on replay. Includes a regenerated `src/config/bundled-channel-config-metadata.generated.ts` so the runtime AJV schema accepts the new `channels.imessage.catchup` block. Fixes #78649. (#79387) Thanks @omarshahine.
- Channels/Yuanbao: bump the bundled `openclaw-plugin-yuanbao` npm spec from `2.11.0` to `2.13.0` in the official external channel catalog and refresh the pinned integrity hash, so fresh installs and catalog-driven reinstalls pick up the newer Yuanbao channel plugin release. (#79620) Thanks @loongfay.
- Gateway/OpenAI-compatible Chat Completions: support function `tools`, `tool_choice`, `tool_calls`, and `role: "tool"` follow-up turns while keeping tool-call stream finalization aligned with the command result and reporting client-tool name conflicts as invalid requests.
- Gateway/OpenAI-compatible Chat Completions: support function `tools`, `tool_choice`, `tool_calls`, and `role: "tool"` follow-up turns while keeping tool-call stream finalization aligned with the command result and reporting client-tool name conflicts as invalid requests. (#66278) Thanks @Lellansin.
- Providers/Mistral: add `mistral-medium-3-5` to the bundled catalog with reasoning support. Thanks @sliekens.
- Docs/Mistral: document Medium 3.5 setup, local infer smoke usage, adjustable reasoning, and the Mistral HTTP 400 caveat for `reasoning_effort="high"` with `temperature: 0`.

View File

@@ -81,15 +81,15 @@ or Docker-facing stages need it.
The Docker release-path stage runs these chunks when `live_suite_filter` is
empty:
| Chunk | Coverage |
| --------------------------------------------------------------- | -------------------------------------------------------------------------------- |
| `core` | Core Docker release-path smoke lanes. |
| `package-update-openai` | OpenAI package install/update behavior, including Codex on-demand install. |
| `package-update-anthropic` | Anthropic package install and update behavior. |
| `package-update-core` | Provider-neutral package and update behavior. |
| `plugins-runtime-plugins` | Plugin runtime lanes that exercise plugin behavior. |
| `plugins-runtime-services` | Service-backed and live plugin runtime lanes; includes OpenWebUI when requested. |
| `plugins-runtime-install-a` through `plugins-runtime-install-h` | Plugin install/runtime batches split for parallel release validation. |
| Chunk | Coverage |
| --------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- |
| `core` | Core Docker release-path smoke lanes. |
| `package-update-openai` | OpenAI package install/update behavior, Codex on-demand install, and Chat Completions tool calls. |
| `package-update-anthropic` | Anthropic package install and update behavior. |
| `package-update-core` | Provider-neutral package and update behavior. |
| `plugins-runtime-plugins` | Plugin runtime lanes that exercise plugin behavior. |
| `plugins-runtime-services` | Service-backed and live plugin runtime lanes; includes OpenWebUI when requested. |
| `plugins-runtime-install-a` through `plugins-runtime-install-h` | Plugin install/runtime batches split for parallel release validation. |
Use targeted `docker_lanes=<lane[,lane]>` on the reusable live/E2E workflow when
only one Docker lane failed. The release artifacts include per-lane rerun

View File

@@ -1601,6 +1601,7 @@
"test:docker:npm-onboard-slack-channel-agent": "OPENCLAW_NPM_ONBOARD_CHANNEL=slack bash scripts/e2e/npm-onboard-channel-agent-docker.sh",
"test:docker:npm-telegram-live": "bash scripts/e2e/npm-telegram-live-docker.sh",
"test:docker:onboard": "bash scripts/e2e/onboard-docker.sh",
"test:docker:openai-chat-tools": "bash scripts/e2e/openai-chat-tools-docker.sh",
"test:docker:openai-image-auth": "bash scripts/e2e/openai-image-auth-docker.sh",
"test:docker:openai-web-search-minimal": "bash scripts/e2e/openai-web-search-minimal-docker.sh",
"test:docker:openwebui": "bash scripts/e2e/openwebui-docker.sh",

View File

@@ -14,7 +14,12 @@ const packageJson = JSON.parse(readText("package.json"));
const packageScripts = new Set(Object.keys(packageJson.scripts ?? {}));
// These lanes prove package-installed surfaces against live auth, so they
// intentionally need both live credentials and a package-backed image.
const livePackageBackedLanes = new Set(["live-codex-npm-plugin", "live-plugin-tool", "openwebui"]);
const livePackageBackedLanes = new Set([
"live-codex-npm-plugin",
"live-plugin-tool",
"openai-chat-tools",
"openwebui",
]);
function readText(relativePath) {
return fs.readFileSync(path.join(ROOT_DIR, relativePath), "utf8");

View File

@@ -0,0 +1,100 @@
const port = process.env.PORT;
const token = process.env.OPENCLAW_GATEWAY_TOKEN;
const backendModel = process.env.MODEL_REF || "openai/gpt-5.4-mini";
const timeoutSeconds = Number.parseInt(
process.env.OPENCLAW_OPENAI_CHAT_TOOLS_TIMEOUT_SECONDS ?? "180",
10,
);
if (!port || !token) {
throw new Error("missing PORT/OPENCLAW_GATEWAY_TOKEN");
}
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), timeoutSeconds * 1000);
const started = Date.now();
const response = await fetch(`http://127.0.0.1:${port}/v1/chat/completions`, {
method: "POST",
headers: {
authorization: `Bearer ${token}`,
"content-type": "application/json",
"x-openclaw-model": backendModel,
},
body: JSON.stringify({
model: "openclaw",
stream: false,
messages: [
{
role: "user",
content:
"Use the get_weather tool exactly once for Paris, France. Return the tool call only.",
},
],
tool_choice: "auto",
tools: [
{
type: "function",
function: {
name: "get_weather",
description: "Return weather for a city.",
strict: true,
parameters: {
type: "object",
additionalProperties: false,
properties: {
city: { type: "string", description: "City and country." },
},
required: ["city"],
},
},
},
],
}),
signal: controller.signal,
});
clearTimeout(timeout);
const text = await response.text();
let body;
try {
body = text ? JSON.parse(text) : {};
} catch {
throw new Error(`non-JSON response ${response.status}: ${text}`);
}
if (!response.ok) {
throw new Error(`chat completions request failed ${response.status}: ${JSON.stringify(body)}`);
}
const choice = body.choices?.[0];
const toolCalls = choice?.message?.tool_calls;
if (choice?.finish_reason !== "tool_calls") {
throw new Error(`expected finish_reason tool_calls: ${JSON.stringify(body)}`);
}
if (!Array.isArray(toolCalls) || toolCalls.length !== 1) {
throw new Error(`expected exactly one tool call: ${JSON.stringify(body)}`);
}
const [toolCall] = toolCalls;
if (toolCall?.type !== "function" || toolCall?.function?.name !== "get_weather") {
throw new Error(`unexpected tool call: ${JSON.stringify(toolCall)}`);
}
let args = {};
try {
args = JSON.parse(toolCall.function.arguments || "{}");
} catch {
throw new Error(`tool arguments were not valid JSON: ${toolCall.function.arguments}`);
}
if (typeof args.city !== "string" || !/paris/i.test(args.city)) {
throw new Error(`expected Paris city argument: ${JSON.stringify(args)}`);
}
console.log(
JSON.stringify({
ok: true,
elapsedMs: Date.now() - started,
finishReason: choice.finish_reason,
toolName: toolCall.function.name,
args,
}),
);

View File

@@ -0,0 +1,86 @@
#!/usr/bin/env bash
set -euo pipefail
source scripts/lib/openclaw-e2e-instance.sh
openclaw_e2e_eval_test_state_from_b64 "${OPENCLAW_TEST_STATE_SCRIPT_B64:?missing OPENCLAW_TEST_STATE_SCRIPT_B64}"
export OPENCLAW_SKIP_CHANNELS=1
export OPENCLAW_SKIP_GMAIL_WATCHER=1
export OPENCLAW_SKIP_CRON=1
export OPENCLAW_SKIP_CANVAS_HOST=1
export OPENCLAW_SKIP_BROWSER_CONTROL_SERVER=1
export OPENCLAW_SKIP_ACPX_RUNTIME=1
export OPENCLAW_SKIP_ACPX_RUNTIME_PROBE=1
export OPENCLAW_AGENT_HARNESS_FALLBACK=none
for profile_path in "$HOME/.profile" /home/appuser/.profile; do
if [ -f "$profile_path" ] && [ -r "$profile_path" ]; then
set +e +u
# shellcheck disable=SC1090
source "$profile_path"
set -euo pipefail
break
fi
done
if [ -z "${OPENAI_API_KEY:-}" ]; then
echo "ERROR: OPENAI_API_KEY was not available after sourcing ~/.profile." >&2
exit 1
fi
export OPENAI_API_KEY
if [ -n "${OPENAI_BASE_URL:-}" ]; then
export OPENAI_BASE_URL
fi
PORT="${PORT:?missing PORT}"
TOKEN="${OPENCLAW_GATEWAY_TOKEN:?missing OPENCLAW_GATEWAY_TOKEN}"
MODEL_REF="${OPENCLAW_OPENAI_CHAT_TOOLS_MODEL:?missing OPENCLAW_OPENAI_CHAT_TOOLS_MODEL}"
GATEWAY_LOG="/tmp/openclaw-openai-chat-tools-gateway.log"
CLIENT_LOG="/tmp/openclaw-openai-chat-tools-client.log"
gateway_pid=""
cleanup() {
openclaw_e2e_stop_process "$gateway_pid"
}
trap cleanup EXIT
dump_debug_logs() {
local status="$1"
echo "OpenAI Chat Completions tools Docker E2E failed with exit code $status" >&2
openclaw_e2e_dump_logs "$GATEWAY_LOG" "$CLIENT_LOG"
if [ -f "$OPENCLAW_CONFIG_PATH" ]; then
echo "--- $OPENCLAW_CONFIG_PATH keys ---" >&2
node -e "const fs=require('fs'); const cfg=JSON.parse(fs.readFileSync(process.argv[1],'utf8')); console.error(JSON.stringify({model:cfg.agents?.defaults?.model, tools:cfg.tools, provider:cfg.models?.providers?.openai && {api:cfg.models.providers.openai.api, baseUrl:cfg.models.providers.openai.baseUrl, agentRuntime:cfg.models.providers.openai.agentRuntime}}, null, 2));" "$OPENCLAW_CONFIG_PATH" || true
fi
}
trap 'status=$?; dump_debug_logs "$status"; exit "$status"' ERR
entry="$(openclaw_e2e_resolve_entrypoint)"
mkdir -p "$OPENCLAW_STATE_DIR" "$OPENCLAW_TEST_WORKSPACE_DIR"
node scripts/e2e/lib/openai-chat-tools/write-config.mjs
gateway_pid="$(openclaw_e2e_start_gateway "$entry" "$PORT" "$GATEWAY_LOG")"
for _ in $(seq 1 360); do
if ! kill -0 "$gateway_pid" 2>/dev/null; then
echo "gateway exited before listening" >&2
exit 1
fi
if node "$entry" gateway health \
--url "ws://127.0.0.1:$PORT" \
--token "$TOKEN" \
--timeout 120000 \
--json >/dev/null 2>&1; then
break
fi
sleep 0.25
done
node "$entry" gateway health \
--url "ws://127.0.0.1:$PORT" \
--token "$TOKEN" \
--timeout 120000 \
--json >/dev/null
PORT="$PORT" OPENCLAW_GATEWAY_TOKEN="$TOKEN" MODEL_REF="$MODEL_REF" \
node scripts/e2e/lib/openai-chat-tools/client.mjs >"$CLIENT_LOG" 2>&1
cat "$CLIENT_LOG"
echo "OpenAI Chat Completions tools Docker E2E passed"

View File

@@ -0,0 +1,90 @@
import fs from "node:fs";
import path from "node:path";
function requireEnv(name) {
const value = process.env[name];
if (!value) {
throw new Error(`missing ${name}`);
}
return value;
}
const configPath = requireEnv("OPENCLAW_CONFIG_PATH");
const stateDir = requireEnv("OPENCLAW_STATE_DIR");
const workspaceDir = requireEnv("OPENCLAW_TEST_WORKSPACE_DIR");
const modelRef = requireEnv("OPENCLAW_OPENAI_CHAT_TOOLS_MODEL");
const token = requireEnv("OPENCLAW_GATEWAY_TOKEN");
const timeoutSeconds = Number.parseInt(
process.env.OPENCLAW_OPENAI_CHAT_TOOLS_TIMEOUT_SECONDS ?? "180",
10,
);
const [providerId, modelId] = modelRef.split("/");
if (providerId !== "openai" || !modelId) {
throw new Error(`OPENCLAW_OPENAI_CHAT_TOOLS_MODEL must be openai/*, got ${modelRef}`);
}
const config = {
gateway: {
port: Number.parseInt(process.env.PORT ?? "18789", 10),
bind: "loopback",
auth: { mode: "token", token },
controlUi: { enabled: false },
http: {
endpoints: {
chatCompletions: { enabled: true },
},
},
},
models: {
mode: "merge",
providers: {
openai: {
api: "openai-responses",
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" },
baseUrl: (process.env.OPENAI_BASE_URL || "https://api.openai.com/v1").trim(),
agentRuntime: { id: "pi" },
timeoutSeconds,
models: [
{
id: modelId,
name: modelId,
api: "openai-responses",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128000,
contextTokens: 64000,
maxTokens: 512,
},
],
},
},
},
agents: {
defaults: {
model: { primary: modelRef, fallbacks: [] },
models: {
[modelRef]: {
agentRuntime: { id: "pi" },
params: { transport: "sse", openaiWsWarmup: false },
},
},
workspace: workspaceDir,
skipBootstrap: true,
timeoutSeconds,
contextTokens: 64000,
},
},
plugins: {
enabled: true,
allow: ["openai"],
entries: { openai: { enabled: true } },
},
skills: { allowBundled: [] },
tools: { allow: ["get_weather"] },
};
fs.mkdirSync(path.dirname(configPath), { recursive: true });
fs.mkdirSync(workspaceDir, { recursive: true });
fs.writeFileSync(configPath, `${JSON.stringify(config, null, 2)}\n`);
fs.mkdirSync(path.join(stateDir, "logs"), { recursive: true });

View File

@@ -0,0 +1,43 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
source "$ROOT_DIR/scripts/lib/docker-e2e-image.sh"
IMAGE_NAME="$(docker_e2e_resolve_image "openclaw-openai-chat-tools-e2e" OPENCLAW_OPENAI_CHAT_TOOLS_E2E_IMAGE)"
SKIP_BUILD="${OPENCLAW_OPENAI_CHAT_TOOLS_E2E_SKIP_BUILD:-0}"
PORT="${OPENCLAW_OPENAI_CHAT_TOOLS_PORT:-18789}"
TOKEN="openai-chat-tools-e2e-$$"
PROFILE_FILE="${OPENCLAW_OPENAI_CHAT_TOOLS_PROFILE_FILE:-${OPENCLAW_TESTBOX_PROFILE_FILE:-$HOME/.openclaw-testbox-live.profile}}"
if [ ! -f "$PROFILE_FILE" ] && [ -f "$HOME/.profile" ]; then
PROFILE_FILE="$HOME/.profile"
fi
docker_e2e_build_or_reuse "$IMAGE_NAME" openai-chat-tools "$ROOT_DIR/scripts/e2e/Dockerfile" "$ROOT_DIR" "" "$SKIP_BUILD"
OPENCLAW_TEST_STATE_SCRIPT_B64="$(docker_e2e_test_state_shell_b64 openai-chat-tools empty)"
PROFILE_MOUNT=()
PROFILE_STATUS="none"
if [ -f "$PROFILE_FILE" ] && [ -r "$PROFILE_FILE" ]; then
set -a
# shellcheck disable=SC1090
source "$PROFILE_FILE"
set +a
PROFILE_MOUNT=(-v "$PROFILE_FILE":/home/appuser/.profile:ro)
PROFILE_STATUS="$PROFILE_FILE"
fi
echo "Running OpenAI Chat Completions tools Docker E2E..."
echo "Profile file: $PROFILE_STATUS"
docker_e2e_run_logged_with_harness openai-chat-tools \
-e COREPACK_ENABLE_DOWNLOAD_PROMPT=0 \
-e OPENAI_API_KEY \
-e OPENAI_BASE_URL \
-e "OPENCLAW_GATEWAY_TOKEN=$TOKEN" \
-e "OPENCLAW_OPENAI_CHAT_TOOLS_MODEL=${OPENCLAW_OPENAI_CHAT_TOOLS_MODEL:-openai/gpt-5.4-mini}" \
-e "OPENCLAW_OPENAI_CHAT_TOOLS_TIMEOUT_SECONDS=${OPENCLAW_OPENAI_CHAT_TOOLS_TIMEOUT_SECONDS:-180}" \
-e "OPENCLAW_TEST_STATE_SCRIPT_B64=$OPENCLAW_TEST_STATE_SCRIPT_B64" \
-e "PORT=$PORT" \
"${PROFILE_MOUNT[@]}" \
"$IMAGE_NAME" \
bash scripts/e2e/lib/openai-chat-tools/scenario.sh

View File

@@ -333,6 +333,7 @@ function laneCredentialRequirements(poolLane) {
}
if (
poolLane.name === "openwebui" ||
poolLane.name === "openai-chat-tools" ||
poolLane.name === "openai-web-search-minimal" ||
poolLane.name === "live-codex-npm-plugin" ||
poolLane.name === "live-plugin-tool"

View File

@@ -141,6 +141,22 @@ function livePluginToolLane() {
);
}
function liveOpenAiChatToolsLane() {
return liveLane(
"openai-chat-tools",
"OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:openai-chat-tools",
{
e2eImageKind: "functional",
needsLiveImage: false,
provider: "openai",
resources: ["service"],
stateScenario: "empty",
timeoutMs: 10 * 60 * 1000,
weight: 2,
},
);
}
export const mainLanes = [
liveLane("live-models", liveDockerScriptCommand("test-live-models-docker.sh"), {
providers: ["claude-cli", "codex-cli", "google-gemini-cli"],
@@ -539,6 +555,7 @@ const releasePathPackageInstallOpenAiLanes = [
weight: 3,
},
),
liveOpenAiChatToolsLane(),
npmLane("codex-on-demand", "OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:codex-on-demand", {
resources: ["service"],
stateScenario: "empty",

View File

@@ -1244,7 +1244,7 @@ async function main() {
if (buildEnabled) {
const buildEntries = [];
if (scheduledLanes.some((poolLane) => poolLane.live)) {
if (scheduledLanes.some((poolLane) => poolLane.needsLiveImage)) {
buildEntries.push({
command: liveDockerHarnessScriptCommand("test-live-build-docker.sh"),
label: "shared live-test image once",

View File

@@ -124,9 +124,10 @@ function makeClientTool(name: string): ClientToolDefinition {
};
}
async function executeClientTool(
params: unknown,
): Promise<{ calledWith: Record<string, unknown> | undefined }> {
async function executeClientTool(params: unknown): Promise<{
calledWith: Record<string, unknown> | undefined;
result: Awaited<ReturnType<ToolExecute>>;
}> {
let captured: Record<string, unknown> | undefined;
const [def] = toClientToolDefinitions([makeClientTool("search")], (_name, p) => {
captured = p;
@@ -134,14 +135,40 @@ async function executeClientTool(
if (!def) {
throw new Error("missing client tool definition");
}
await def.execute("call-c1", params, undefined, undefined, extensionContext);
return { calledWith: captured };
const result = await def.execute("call-c1", params, undefined, undefined, extensionContext);
return { calledWith: captured, result };
}
describe("toClientToolDefinitions param coercion", () => {
it("returns terminal pending results for each client tool in a batch", async () => {
const completed: Array<{ id: string; name: string; params: Record<string, unknown> }> = [];
const defs = toClientToolDefinitions([makeClientTool("search"), makeClientTool("lookup")], {
complete: (id, name, params) => {
completed.push({ id, name, params });
},
});
const [search, lookup] = defs;
if (!search || !lookup) {
throw new Error("missing client tool definition");
}
const [searchResult, lookupResult] = await Promise.all([
search.execute("call-search", { query: "first" }, undefined, undefined, extensionContext),
lookup.execute("call-lookup", { query: "second" }, undefined, undefined, extensionContext),
]);
expect(searchResult.terminate).toBe(true);
expect(lookupResult.terminate).toBe(true);
expect(completed).toEqual([
{ id: "call-search", name: "search", params: { query: "first" } },
{ id: "call-lookup", name: "lookup", params: { query: "second" } },
]);
});
it("passes plain object params through unchanged", async () => {
const { calledWith } = await executeClientTool({ query: "hello" });
const { calledWith, result } = await executeClientTool({ query: "hello" });
expect(calledWith).toEqual({ query: "hello" });
expect(result.terminate).toBe(true);
});
it("parses a JSON string into an object (streaming delta accumulation)", async () => {

View File

@@ -377,12 +377,15 @@ export function toClientToolDefinitions(
}
throw err;
}
// Return a pending result - the client will execute this tool
return jsonResult({
status: "pending",
tool: func.name,
message: "Tool execution delegated to client",
});
// Return a terminal pending result; the client will execute the tool.
return {
...jsonResult({
status: "pending",
tool: func.name,
message: "Tool execution delegated to client",
}),
terminate: true,
};
},
} satisfies ToolDefinition;
});

View File

@@ -707,6 +707,7 @@ describe("OpenAI-compatible HTTP API (e2e)", () => {
expect(clientTools[0]?.type).toBe("function");
expect(clientTools[0]?.function?.name).toBe("get_time");
expect(clientTools[0]?.function?.strict).toBe(true);
expect(firstCall).not.toHaveProperty("toolsAllow");
await res.text();
}

View File

@@ -112,6 +112,7 @@ describe("scripts/lib/docker-e2e-plan", () => {
});
expect(plan.credentials).toEqual(["anthropic", "openai"]);
expect(plan.lanes.map((lane) => lane.name)).toContain("install-e2e-openai");
expect(plan.lanes.map((lane) => lane.name)).toContain("openai-chat-tools");
expect(plan.lanes.map((lane) => lane.name)).toContain("codex-on-demand");
expect(plan.lanes.map((lane) => lane.name)).toContain("install-e2e-anthropic");
expect(plan.lanes.map((lane) => lane.name)).toContain("mcp-channels");
@@ -155,6 +156,7 @@ describe("scripts/lib/docker-e2e-plan", () => {
const laneNames = plan.lanes.map((lane) => lane.name);
expect(plan.releaseProfile).toBe("beta");
expect(laneNames).toContain("install-e2e-openai");
expect(laneNames).toContain("openai-chat-tools");
expect(laneNames).toContain("install-e2e-anthropic");
expect(laneNames).toContain("update-channel-switch");
expect(laneNames).not.toContain("plugins");
@@ -243,6 +245,7 @@ describe("scripts/lib/docker-e2e-plan", () => {
expect(packageInstallOpenAi.lanes.map((lane) => lane.name)).toEqual([
"install-e2e-openai",
"openai-chat-tools",
"codex-on-demand",
]);
expect(packageInstallAnthropic.lanes.map((lane) => lane.name)).toEqual([
@@ -468,6 +471,7 @@ describe("scripts/lib/docker-e2e-plan", () => {
expect(packageUpdate.lanes.map((lane) => lane.name)).toEqual([
"install-e2e-openai",
"openai-chat-tools",
"codex-on-demand",
"install-e2e-anthropic",
"npm-onboard-channel-agent",