mirror of
https://github.com/moltbot/moltbot.git
synced 2026-05-13 15:47:28 +00:00
ci: align pnpm pins and vitest config
This commit is contained in:
2
.github/actions/setup-node-env/action.yml
vendored
2
.github/actions/setup-node-env/action.yml
vendored
@@ -14,7 +14,7 @@ inputs:
|
||||
pnpm-version:
|
||||
description: pnpm version for corepack.
|
||||
required: false
|
||||
default: "10.23.0"
|
||||
default: "10.32.1"
|
||||
install-bun:
|
||||
description: Whether to install Bun alongside Node.
|
||||
required: false
|
||||
|
||||
@@ -4,7 +4,7 @@ inputs:
|
||||
pnpm-version:
|
||||
description: pnpm version to activate via corepack.
|
||||
required: false
|
||||
default: "10.23.0"
|
||||
default: "10.32.1"
|
||||
cache-key-suffix:
|
||||
description: Suffix appended to the cache key.
|
||||
required: false
|
||||
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -950,7 +950,7 @@ jobs:
|
||||
- name: Setup pnpm + cache store
|
||||
uses: ./.github/actions/setup-pnpm-store-cache
|
||||
with:
|
||||
pnpm-version: "10.23.0"
|
||||
pnpm-version: "10.32.1"
|
||||
cache-key-suffix: "node24"
|
||||
# Sticky disk mount currently retries/fails on every shard and adds ~50s
|
||||
# before install while still yielding zero pnpm store reuse.
|
||||
|
||||
2
.github/workflows/macos-release.yml
vendored
2
.github/workflows/macos-release.yml
vendored
@@ -20,7 +20,7 @@ concurrency:
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
NODE_VERSION: "24.x"
|
||||
PNPM_VERSION: "10.23.0"
|
||||
PNPM_VERSION: "10.32.1"
|
||||
|
||||
jobs:
|
||||
validate_macos_release_request:
|
||||
|
||||
2
.github/workflows/openclaw-npm-release.yml
vendored
2
.github/workflows/openclaw-npm-release.yml
vendored
@@ -37,7 +37,7 @@ concurrency:
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
NODE_VERSION: "24.x"
|
||||
PNPM_VERSION: "10.23.0"
|
||||
PNPM_VERSION: "10.32.1"
|
||||
|
||||
jobs:
|
||||
preflight_openclaw_npm:
|
||||
|
||||
2
.github/workflows/plugin-clawhub-release.yml
vendored
2
.github/workflows/plugin-clawhub-release.yml
vendored
@@ -23,7 +23,7 @@ concurrency:
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
NODE_VERSION: "24.x"
|
||||
PNPM_VERSION: "10.23.0"
|
||||
PNPM_VERSION: "10.32.1"
|
||||
CLAWHUB_REGISTRY: "https://clawhub.ai"
|
||||
CLAWHUB_REPOSITORY: "openclaw/clawhub"
|
||||
# Pinned to a reviewed ClawHub commit so release behavior stays reproducible.
|
||||
|
||||
2
.github/workflows/plugin-npm-release.yml
vendored
2
.github/workflows/plugin-npm-release.yml
vendored
@@ -38,7 +38,7 @@ concurrency:
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
NODE_VERSION: "24.x"
|
||||
PNPM_VERSION: "10.23.0"
|
||||
PNPM_VERSION: "10.32.1"
|
||||
|
||||
jobs:
|
||||
preview_plugins_npm:
|
||||
|
||||
@@ -17,9 +17,8 @@ describe("resolveLocalVitestMaxWorkers", () => {
|
||||
loadAverage1m: 0,
|
||||
totalMemoryBytes: 64 * 1024 ** 3,
|
||||
},
|
||||
"threads",
|
||||
),
|
||||
).toBe(2);
|
||||
).toBe(3);
|
||||
});
|
||||
|
||||
it("lets OPENCLAW_VITEST_MAX_WORKERS override the inferred cap", () => {
|
||||
@@ -61,9 +60,8 @@ describe("resolveLocalVitestMaxWorkers", () => {
|
||||
loadAverage1m: 0,
|
||||
totalMemoryBytes: 16 * 1024 ** 3,
|
||||
},
|
||||
"threads",
|
||||
),
|
||||
).toBe(1);
|
||||
).toBe(2);
|
||||
});
|
||||
|
||||
it("lets roomy hosts use more local parallelism", () => {
|
||||
@@ -75,9 +73,8 @@ describe("resolveLocalVitestMaxWorkers", () => {
|
||||
loadAverage1m: 0,
|
||||
totalMemoryBytes: 128 * 1024 ** 3,
|
||||
},
|
||||
"threads",
|
||||
),
|
||||
).toBe(3);
|
||||
).toBe(4);
|
||||
});
|
||||
|
||||
it("backs off further when the host is already busy", () => {
|
||||
@@ -89,36 +86,34 @@ describe("resolveLocalVitestMaxWorkers", () => {
|
||||
loadAverage1m: 16,
|
||||
totalMemoryBytes: 128 * 1024 ** 3,
|
||||
},
|
||||
"threads",
|
||||
),
|
||||
).toBe(1);
|
||||
).toBe(2);
|
||||
});
|
||||
|
||||
it("keeps fork pools less conservative than thread pools on roomy hosts", () => {
|
||||
it("caps very large hosts at six local workers", () => {
|
||||
expect(
|
||||
resolveLocalVitestMaxWorkers(
|
||||
{},
|
||||
{
|
||||
cpuCount: 16,
|
||||
cpuCount: 32,
|
||||
loadAverage1m: 0,
|
||||
totalMemoryBytes: 128 * 1024 ** 3,
|
||||
totalMemoryBytes: 256 * 1024 ** 3,
|
||||
},
|
||||
"forks",
|
||||
),
|
||||
).toBe(4);
|
||||
).toBe(6);
|
||||
});
|
||||
});
|
||||
|
||||
describe("base vitest config", () => {
|
||||
it("defaults the base pool to threads", () => {
|
||||
expect(resolveDefaultVitestPool()).toBe("threads");
|
||||
expect(baseConfig.test?.pool).toBe("threads");
|
||||
it("defaults the base pool to forks", () => {
|
||||
expect(resolveDefaultVitestPool()).toBe("forks");
|
||||
expect(baseConfig.test?.pool).toBe("forks");
|
||||
});
|
||||
|
||||
it("lets OPENCLAW_VITEST_POOL force forks for local debugging", () => {
|
||||
it("keeps forks even when non-fork pools are requested", () => {
|
||||
expect(
|
||||
resolveDefaultVitestPool({
|
||||
OPENCLAW_VITEST_POOL: "forks",
|
||||
OPENCLAW_VITEST_POOL: "threads",
|
||||
}),
|
||||
).toBe("forks");
|
||||
});
|
||||
@@ -141,7 +136,7 @@ describe("test scripts", () => {
|
||||
};
|
||||
|
||||
expect(pkg.scripts?.["test:serial"]).toBe(
|
||||
"OPENCLAW_VITEST_MAX_WORKERS=1 vitest run --config vitest.config.ts",
|
||||
"OPENCLAW_VITEST_MAX_WORKERS=1 node scripts/test-projects.mjs",
|
||||
);
|
||||
expect(pkg.scripts?.["test:single"]).toBeUndefined();
|
||||
});
|
||||
|
||||
@@ -44,7 +44,6 @@ function detectVitestHostInfo(): Required<VitestHostInfo> {
|
||||
export function resolveLocalVitestMaxWorkers(
|
||||
env: Record<string, string | undefined> = process.env,
|
||||
system: VitestHostInfo = detectVitestHostInfo(),
|
||||
pool: OpenClawVitestPool = resolveDefaultVitestPool(env),
|
||||
): number {
|
||||
const override = parsePositiveInt(env.OPENCLAW_VITEST_MAX_WORKERS ?? env.OPENCLAW_TEST_WORKERS);
|
||||
if (override !== null) {
|
||||
@@ -77,19 +76,6 @@ export function resolveLocalVitestMaxWorkers(
|
||||
inferred = Math.max(1, inferred - 1);
|
||||
}
|
||||
|
||||
if (pool === "threads") {
|
||||
// Thread workers are faster per slot on the steady state, but their startup
|
||||
// compile pressure is much burstier. Keep headroom so a second local Vitest
|
||||
// run can start without immediately saturating the host.
|
||||
inferred = Math.min(inferred, 4);
|
||||
if (cpuCount >= 8) {
|
||||
inferred = Math.max(1, inferred - 1);
|
||||
}
|
||||
if (loadRatio >= 0.5) {
|
||||
inferred = Math.max(1, inferred - 1);
|
||||
}
|
||||
}
|
||||
|
||||
return clamp(inferred, 1, 16);
|
||||
}
|
||||
|
||||
@@ -107,7 +93,7 @@ const repoRoot = path.dirname(fileURLToPath(import.meta.url));
|
||||
const isCI = process.env.CI === "true" || process.env.GITHUB_ACTIONS === "true";
|
||||
const isWindows = process.platform === "win32";
|
||||
const defaultPool = resolveDefaultVitestPool();
|
||||
const localWorkers = resolveLocalVitestMaxWorkers(process.env, detectVitestHostInfo(), defaultPool);
|
||||
const localWorkers = resolveLocalVitestMaxWorkers(process.env, detectVitestHostInfo());
|
||||
const ciWorkers = isWindows ? 2 : 3;
|
||||
|
||||
export const sharedVitestConfig = {
|
||||
|
||||
Reference in New Issue
Block a user