ci(release): make Open WebUI release smoke deterministic

This commit is contained in:
Peter Steinberger
2026-05-10 01:30:01 +01:00
parent 10caa76473
commit 5832839353
4 changed files with 15 additions and 3 deletions

View File

@@ -712,6 +712,9 @@ OpenClaw gateway container with the OpenAI-compatible HTTP endpoints enabled,
starts a pinned Open WebUI container against that gateway, signs in through
Open WebUI, verifies `/api/models` exposes `openclaw/default`, then sends a
real chat request through Open WebUI's `/api/chat/completions` proxy.
Set `OPENWEBUI_SMOKE_MODE=models` for release-path CI checks that should stop
after Open WebUI sign-in and model discovery, without waiting on a live model
completion.
The first run can be noticeably slower because Docker may need to pull the
Open WebUI image and Open WebUI may need to finish its own cold-start setup.
This lane expects a usable live model key, and `OPENCLAW_PROFILE_FILE`

View File

@@ -8,12 +8,17 @@ const prompt = process.env.OPENWEBUI_PROMPT ?? "";
const modelAttempts = Number.parseInt(process.env.OPENWEBUI_MODEL_ATTEMPTS ?? "72", 10);
const modelRetryMs = Number.parseInt(process.env.OPENWEBUI_MODEL_RETRY_MS ?? "5000", 10);
const fetchTimeoutMs = Number.parseInt(process.env.OPENWEBUI_FETCH_TIMEOUT_MS ?? "720000", 10);
const smokeMode =
process.env.OPENWEBUI_SMOKE_MODE ?? process.env.OPENCLAW_OPENWEBUI_SMOKE_MODE ?? "chat";
setGlobalDispatcher(new Agent({ bodyTimeout: fetchTimeoutMs, headersTimeout: fetchTimeoutMs }));
if (!baseUrl || !email || !password || !expectedNonce || !prompt) {
throw new Error("Missing required OPENWEBUI_* environment variables");
}
if (smokeMode !== "models" && smokeMode !== "chat") {
throw new Error(`Unsupported OPENWEBUI_SMOKE_MODE: ${smokeMode}`);
}
function getCookieHeader(res) {
const raw = res.headers.get("set-cookie");
@@ -105,6 +110,10 @@ if (!targetModel) {
`openclaw model missing from Open WebUI model list after retry: ${JSON.stringify(modelIds)} (${lastModelsError})`,
);
}
if (smokeMode === "models") {
console.log(JSON.stringify({ ok: true, mode: smokeMode, model: targetModel }, null, 2));
process.exit(0);
}
const chatRes = await fetch(`${baseUrl}/api/chat/completions`, {
method: "POST",

View File

@@ -8,7 +8,7 @@ const LIVE_CLI_TIMEOUT_MS = 20 * 60 * 1000;
const LIVE_PROFILE_TIMEOUT_MS = 30 * 60 * 1000;
const OPENWEBUI_TIMEOUT_MS = 20 * 60 * 1000;
const RELEASE_OPENWEBUI_COMMAND =
"OPENCLAW_OPENWEBUI_MODEL=openai/gpt-5.4-mini OPENCLAW_OPENWEBUI_PROVIDER_TIMEOUT_SECONDS=300 OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:openwebui";
"OPENCLAW_OPENWEBUI_MODEL=openai/gpt-5.4-mini OPENWEBUI_SMOKE_MODE=models OPENCLAW_OPENWEBUI_PROVIDER_TIMEOUT_SECONDS=300 OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:openwebui";
export const BUNDLED_PLUGIN_INSTALL_UNINSTALL_SHARDS = 24;
const upgradeSurvivorCommand = "OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:upgrade-survivor";
const updateRestartAuthCommand =

View File

@@ -331,7 +331,7 @@ describe("scripts/lib/docker-e2e-plan", () => {
},
{
command:
"OPENCLAW_OPENWEBUI_MODEL=openai/gpt-5.4-mini OPENCLAW_OPENWEBUI_PROVIDER_TIMEOUT_SECONDS=300 OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:openwebui",
"OPENCLAW_OPENWEBUI_MODEL=openai/gpt-5.4-mini OPENWEBUI_SMOKE_MODE=models OPENCLAW_OPENWEBUI_PROVIDER_TIMEOUT_SECONDS=300 OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:openwebui",
imageKind: "functional",
live: true,
name: "openwebui",
@@ -654,7 +654,7 @@ describe("scripts/lib/docker-e2e-plan", () => {
expect(plan.lanes.map(summarizeLane)).toEqual([
{
command:
"OPENCLAW_OPENWEBUI_MODEL=openai/gpt-5.4-mini OPENCLAW_OPENWEBUI_PROVIDER_TIMEOUT_SECONDS=300 OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:openwebui",
"OPENCLAW_OPENWEBUI_MODEL=openai/gpt-5.4-mini OPENWEBUI_SMOKE_MODE=models OPENCLAW_OPENWEBUI_PROVIDER_TIMEOUT_SECONDS=300 OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:openwebui",
imageKind: "functional",
live: true,
name: "openwebui",