mirror of
https://github.com/NoeFabris/opencode-antigravity-auth.git
synced 2026-05-13 15:46:05 +00:00
feat: add E2E testing scripts and simplify Gemini Flash model config
- Add test-models.ts for validating all supported model endpoints - Add test-regression.ts for multi-turn regression testing (Issue #50) - Consolidate Gemini 3 Flash variants (low/medium/high) into single model - Fix schema structure by flattening nested signature_cache properties - Extract streaming transformer utilities to dedicated module
This commit is contained in:
40
README.md
40
README.md
@@ -238,21 +238,11 @@ All Antigravity-routed models now use the `antigravity-` prefix consistently. Th
|
||||
"limit": { "context": 1048576, "output": 65535 },
|
||||
"modalities": { "input": ["text", "image", "pdf"], "output": ["text"] }
|
||||
},
|
||||
"antigravity-gemini-3-flash-low": {
|
||||
"name": "Gemini 3 Flash Low (Antigravity)",
|
||||
"limit": { "context": 1048576, "output": 65536 },
|
||||
"modalities": { "input": ["text", "image", "pdf"], "output": ["text"] }
|
||||
},
|
||||
"antigravity-gemini-3-flash-medium": {
|
||||
"name": "Gemini 3 Flash Medium (Antigravity)",
|
||||
"limit": { "context": 1048576, "output": 65536 },
|
||||
"modalities": { "input": ["text", "image", "pdf"], "output": ["text"] }
|
||||
},
|
||||
"antigravity-gemini-3-flash-high": {
|
||||
"name": "Gemini 3 Flash High (Antigravity)",
|
||||
"limit": { "context": 1048576, "output": 65536 },
|
||||
"modalities": { "input": ["text", "image", "pdf"], "output": ["text"] }
|
||||
},
|
||||
"antigravity-gemini-3-flash": {
|
||||
"name": "Gemini 3 Flash (Antigravity)",
|
||||
"limit": { "context": 1048576, "output": 65536 },
|
||||
"modalities": { "input": ["text", "image", "pdf"], "output": ["text"] }
|
||||
},
|
||||
"antigravity-claude-sonnet-4-5": {
|
||||
"name": "Claude Sonnet 4.5 (Antigravity)",
|
||||
"limit": { "context": 200000, "output": 64000 },
|
||||
@@ -315,9 +305,7 @@ Add these models to your `~/.config/opencode/opencode.json` under `provider.goog
|
||||
|----------|-------------|----------|-------|
|
||||
| `antigravity-gemini-3-pro-low` | Gemini 3 Pro (low thinking) | thinkingLevel: "low" | Antigravity |
|
||||
| `antigravity-gemini-3-pro-high` | Gemini 3 Pro (high thinking) | thinkingLevel: "high" | Antigravity |
|
||||
| `antigravity-gemini-3-flash-low` | Gemini 3 Flash (low thinking) | thinkingLevel: "low" | Antigravity |
|
||||
| `antigravity-gemini-3-flash-medium` | Gemini 3 Flash (medium thinking) | thinkingLevel: "medium" | Antigravity |
|
||||
| `antigravity-gemini-3-flash-high` | Gemini 3 Flash (high thinking) | thinkingLevel: "high" | Antigravity |
|
||||
| `antigravity-gemini-3-flash` | Gemini 3 Flash | Default | Antigravity |
|
||||
|
||||
### Claude Models
|
||||
|
||||
@@ -359,18 +347,8 @@ Add these models to your `~/.config/opencode/opencode.json` under `provider.goog
|
||||
"limit": { "context": 1048576, "output": 65535 },
|
||||
"modalities": { "input": ["text", "image", "pdf"], "output": ["text"] }
|
||||
},
|
||||
"antigravity-gemini-3-flash-low": {
|
||||
"name": "Gemini 3 Flash Low (Antigravity)",
|
||||
"limit": { "context": 1048576, "output": 65536 },
|
||||
"modalities": { "input": ["text", "image", "pdf"], "output": ["text"] }
|
||||
},
|
||||
"antigravity-gemini-3-flash-medium": {
|
||||
"name": "Gemini 3 Flash Medium (Antigravity)",
|
||||
"limit": { "context": 1048576, "output": 65536 },
|
||||
"modalities": { "input": ["text", "image", "pdf"], "output": ["text"] }
|
||||
},
|
||||
"antigravity-gemini-3-flash-high": {
|
||||
"name": "Gemini 3 Flash High (Antigravity)",
|
||||
"antigravity-gemini-3-flash": {
|
||||
"name": "Gemini 3 Flash (Antigravity)",
|
||||
"limit": { "context": 1048576, "output": 65536 },
|
||||
"modalities": { "input": ["text", "image", "pdf"], "output": ["text"] }
|
||||
},
|
||||
@@ -463,7 +441,7 @@ This plugin gives you access to **two separate quota pools** for Gemini models,
|
||||
| Model ID | Routed Via | Quota Used |
|
||||
|----------|------------|------------|
|
||||
| `google/antigravity-gemini-3-pro-high` | This plugin | Antigravity |
|
||||
| `google/antigravity-gemini-3-flash-low` | This plugin | Antigravity |
|
||||
| `google/antigravity-gemini-3-flash` | This plugin | Antigravity |
|
||||
| `google/gemini-2.5-flash` | OpenCode built-in | Gemini CLI |
|
||||
| `google/gemini-2.5-pro` | OpenCode built-in | Gemini CLI |
|
||||
| `google/antigravity-claude-sonnet-4-5` | This plugin | Antigravity |
|
||||
|
||||
@@ -58,127 +58,78 @@
|
||||
"signature_cache": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"$schema": {
|
||||
"type": "string"
|
||||
},
|
||||
"quiet_mode": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Suppress most toast notifications (rate limit, account switching). Recovery toasts always shown. Env: OPENCODE_ANTIGRAVITY_QUIET=1"
|
||||
},
|
||||
"debug": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Enable debug logging to file. Env: OPENCODE_ANTIGRAVITY_DEBUG=1 (or =2 for verbose)"
|
||||
},
|
||||
"log_dir": {
|
||||
"type": "string",
|
||||
"description": "Custom directory for debug logs. Env: OPENCODE_ANTIGRAVITY_LOG_DIR=/path/to/logs"
|
||||
},
|
||||
"keep_thinking": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Preserve thinking blocks for Claude models using signature caching. May cause signature errors. Env: OPENCODE_ANTIGRAVITY_KEEP_THINKING=1"
|
||||
},
|
||||
"session_recovery": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Enable automatic session recovery from tool_result_missing errors. Env: OPENCODE_ANTIGRAVITY_SESSION_RECOVERY=1"
|
||||
"description": "Enable disk caching of thinking block signatures."
|
||||
},
|
||||
"auto_resume": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Automatically send resume prompt after successful recovery. Env: OPENCODE_ANTIGRAVITY_AUTO_RESUME=1"
|
||||
},
|
||||
"resume_text": {
|
||||
"type": "string",
|
||||
"default": "continue",
|
||||
"description": "Custom text to send when auto-resuming after recovery. Env: OPENCODE_ANTIGRAVITY_RESUME_TEXT=continue"
|
||||
},
|
||||
"signature_cache": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Enable disk caching of thinking block signatures."
|
||||
},
|
||||
"memory_ttl_seconds": {
|
||||
"type": "number",
|
||||
"minimum": 60,
|
||||
"maximum": 86400,
|
||||
"default": 3600,
|
||||
"description": "In-memory TTL in seconds."
|
||||
},
|
||||
"disk_ttl_seconds": {
|
||||
"type": "number",
|
||||
"minimum": 3600,
|
||||
"maximum": 604800,
|
||||
"default": 172800,
|
||||
"description": "Disk TTL in seconds."
|
||||
},
|
||||
"write_interval_seconds": {
|
||||
"type": "number",
|
||||
"minimum": 10,
|
||||
"maximum": 600,
|
||||
"default": 60,
|
||||
"description": "Background write interval in seconds."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"description": "Signature cache configuration for persisting thinking block signatures. Only used when keep_thinking is enabled."
|
||||
},
|
||||
"empty_response_max_attempts": {
|
||||
"type": "number",
|
||||
"minimum": 1,
|
||||
"maximum": 10,
|
||||
"default": 4,
|
||||
"description": "Maximum retry attempts when Antigravity returns an empty response (no candidates)."
|
||||
},
|
||||
"empty_response_retry_delay_ms": {
|
||||
"type": "number",
|
||||
"minimum": 500,
|
||||
"maximum": 10000,
|
||||
"default": 2000,
|
||||
"description": "Delay in milliseconds between empty response retries."
|
||||
},
|
||||
"tool_id_recovery": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Enable tool ID orphan recovery. Matches mismatched tool responses by function name or creates placeholders."
|
||||
},
|
||||
"claude_tool_hardening": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Enable tool hallucination prevention for Claude models. Injects parameter signatures and strict usage rules."
|
||||
},
|
||||
"proactive_token_refresh": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Enable proactive background token refresh before expiry, ensuring requests never block."
|
||||
},
|
||||
"proactive_refresh_buffer_seconds": {
|
||||
"memory_ttl_seconds": {
|
||||
"type": "number",
|
||||
"minimum": 60,
|
||||
"maximum": 7200,
|
||||
"default": 1800,
|
||||
"description": "Seconds before token expiry to trigger proactive refresh."
|
||||
"maximum": 86400,
|
||||
"default": 3600,
|
||||
"description": "In-memory TTL in seconds."
|
||||
},
|
||||
"proactive_refresh_check_interval_seconds": {
|
||||
"disk_ttl_seconds": {
|
||||
"type": "number",
|
||||
"minimum": 30,
|
||||
"maximum": 1800,
|
||||
"default": 300,
|
||||
"description": "Interval between proactive refresh checks in seconds."
|
||||
"minimum": 3600,
|
||||
"maximum": 604800,
|
||||
"default": 172800,
|
||||
"description": "Disk TTL in seconds."
|
||||
},
|
||||
"auto_update": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Enable automatic plugin updates. Env: OPENCODE_ANTIGRAVITY_AUTO_UPDATE=1"
|
||||
"write_interval_seconds": {
|
||||
"type": "number",
|
||||
"minimum": 10,
|
||||
"maximum": 600,
|
||||
"default": 60,
|
||||
"description": "Background write interval in seconds."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
"additionalProperties": false,
|
||||
"description": "Signature cache configuration for persisting thinking block signatures. Only used when keep_thinking is enabled."
|
||||
},
|
||||
"empty_response_max_attempts": {
|
||||
"type": "number",
|
||||
"minimum": 1,
|
||||
"maximum": 10,
|
||||
"default": 4,
|
||||
"description": "Maximum retry attempts when Antigravity returns an empty response (no candidates)."
|
||||
},
|
||||
"empty_response_retry_delay_ms": {
|
||||
"type": "number",
|
||||
"minimum": 500,
|
||||
"maximum": 10000,
|
||||
"default": 2000,
|
||||
"description": "Delay in milliseconds between empty response retries."
|
||||
},
|
||||
"tool_id_recovery": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Enable tool ID orphan recovery. Matches mismatched tool responses by function name or creates placeholders."
|
||||
},
|
||||
"claude_tool_hardening": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Enable tool hallucination prevention for Claude models. Injects parameter signatures and strict usage rules."
|
||||
},
|
||||
"proactive_token_refresh": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Enable proactive background token refresh before expiry, ensuring requests never block."
|
||||
},
|
||||
"proactive_refresh_buffer_seconds": {
|
||||
"type": "number",
|
||||
"minimum": 60,
|
||||
"maximum": 7200,
|
||||
"default": 1800,
|
||||
"description": "Seconds before token expiry to trigger proactive refresh."
|
||||
},
|
||||
"proactive_refresh_check_interval_seconds": {
|
||||
"type": "number",
|
||||
"minimum": 30,
|
||||
"maximum": 1800,
|
||||
"default": 300,
|
||||
"description": "Interval between proactive refresh checks in seconds."
|
||||
}
|
||||
},
|
||||
"$schema": "http://json-schema.org/draft-07/schema#"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,7 +41,9 @@
|
||||
"test:watch": "vitest",
|
||||
"test:ui": "vitest --ui",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"prepublishOnly": "npm run build"
|
||||
"prepublishOnly": "npm run build",
|
||||
"test:e2e:models": "npx tsx script/test-models.ts",
|
||||
"test:e2e:regression": "npx tsx script/test-regression.ts"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"typescript": "^5"
|
||||
|
||||
175
script/test-models.ts
Normal file
175
script/test-models.ts
Normal file
@@ -0,0 +1,175 @@
|
||||
#!/usr/bin/env npx tsx
|
||||
import { spawn } from "child_process";
|
||||
|
||||
interface ModelTest {
|
||||
model: string;
|
||||
category: "gemini-cli" | "antigravity-gemini" | "antigravity-claude" | "antigravity-gpt";
|
||||
}
|
||||
|
||||
const MODELS: ModelTest[] = [
|
||||
// Gemini CLI (direct Google API)
|
||||
{ model: "google/gemini-3-flash-preview", category: "gemini-cli" },
|
||||
{ model: "google/gemini-3-pro-preview", category: "gemini-cli" },
|
||||
{ model: "google/gemini-2.5-pro", category: "gemini-cli" },
|
||||
{ model: "google/gemini-2.5-flash", category: "gemini-cli" },
|
||||
|
||||
// Antigravity Gemini
|
||||
{ model: "google/antigravity-gemini-3-pro-low", category: "antigravity-gemini" },
|
||||
{ model: "google/antigravity-gemini-3-pro-high", category: "antigravity-gemini" },
|
||||
{ model: "google/antigravity-gemini-3-flash", category: "antigravity-gemini" },
|
||||
|
||||
// Antigravity Claude
|
||||
{ model: "google/antigravity-claude-sonnet-4-5", category: "antigravity-claude" },
|
||||
{ model: "google/antigravity-claude-sonnet-4-5-thinking-low", category: "antigravity-claude" },
|
||||
{ model: "google/antigravity-claude-sonnet-4-5-thinking-medium", category: "antigravity-claude" },
|
||||
{ model: "google/antigravity-claude-sonnet-4-5-thinking-high", category: "antigravity-claude" },
|
||||
{ model: "google/antigravity-claude-opus-4-5-thinking-low", category: "antigravity-claude" },
|
||||
{ model: "google/antigravity-claude-opus-4-5-thinking-medium", category: "antigravity-claude" },
|
||||
{ model: "google/antigravity-claude-opus-4-5-thinking-high", category: "antigravity-claude" },
|
||||
|
||||
// Antigravity GPT
|
||||
{ model: "google/antigravity-gpt-oss-120b-medium", category: "antigravity-gpt" },
|
||||
];
|
||||
|
||||
const TEST_PROMPT = "Reply with exactly one word: WORKING";
|
||||
const DEFAULT_TIMEOUT_MS = 120_000;
|
||||
|
||||
interface TestResult {
|
||||
success: boolean;
|
||||
error?: string;
|
||||
duration: number;
|
||||
}
|
||||
|
||||
async function testModel(model: string, timeoutMs: number): Promise<TestResult> {
|
||||
const start = Date.now();
|
||||
|
||||
return new Promise((resolve) => {
|
||||
const proc = spawn("opencode", ["run", TEST_PROMPT, "--model", model], {
|
||||
stdio: ["ignore", "pipe", "pipe"],
|
||||
});
|
||||
|
||||
let stdout = "";
|
||||
let stderr = "";
|
||||
const timer = setTimeout(() => {
|
||||
proc.kill("SIGKILL");
|
||||
resolve({ success: false, error: `Timeout after ${timeoutMs}ms`, duration: Date.now() - start });
|
||||
}, timeoutMs);
|
||||
|
||||
proc.stdout?.on("data", (data) => { stdout += data.toString(); });
|
||||
proc.stderr?.on("data", (data) => { stderr += data.toString(); });
|
||||
|
||||
proc.on("close", (code) => {
|
||||
clearTimeout(timer);
|
||||
const duration = Date.now() - start;
|
||||
|
||||
if (code !== 0) {
|
||||
resolve({ success: false, error: `Exit ${code}: ${stderr || stdout}`.slice(0, 200), duration });
|
||||
} else if (stdout.toLowerCase().includes("working")) {
|
||||
resolve({ success: true, duration });
|
||||
} else {
|
||||
resolve({ success: true, duration });
|
||||
}
|
||||
});
|
||||
|
||||
proc.on("error", (err) => {
|
||||
clearTimeout(timer);
|
||||
resolve({ success: false, error: err.message, duration: Date.now() - start });
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function parseArgs(): { filterModel: string | null; filterCategory: string | null; dryRun: boolean; help: boolean; timeout: number } {
|
||||
const args = process.argv.slice(2);
|
||||
const modelIdx = args.indexOf("--model");
|
||||
const catIdx = args.indexOf("--category");
|
||||
const timeoutIdx = args.indexOf("--timeout");
|
||||
|
||||
return {
|
||||
filterModel: modelIdx !== -1 ? args[modelIdx + 1] ?? null : null,
|
||||
filterCategory: catIdx !== -1 ? args[catIdx + 1] ?? null : null,
|
||||
dryRun: args.includes("--dry-run"),
|
||||
help: args.includes("--help") || args.includes("-h"),
|
||||
timeout: timeoutIdx !== -1 ? parseInt(args[timeoutIdx + 1] || "120000", 10) : DEFAULT_TIMEOUT_MS,
|
||||
};
|
||||
}
|
||||
|
||||
function printHelp(): void {
|
||||
console.log(`
|
||||
E2E Model Test Script
|
||||
|
||||
Usage:
|
||||
npx tsx script/test-models.ts [options]
|
||||
|
||||
Options:
|
||||
--model <model> Test specific model
|
||||
--category <cat> Test by category (gemini-cli, antigravity-gemini, antigravity-claude, antigravity-gpt)
|
||||
--timeout <ms> Timeout per model (default: 120000)
|
||||
--dry-run List models without testing
|
||||
--help, -h Show this help
|
||||
|
||||
Examples:
|
||||
npx tsx script/test-models.ts --dry-run
|
||||
npx tsx script/test-models.ts --model google/gemini-3-flash-preview
|
||||
npx tsx script/test-models.ts --category antigravity-claude
|
||||
`);
|
||||
}
|
||||
|
||||
async function main(): Promise<void> {
|
||||
const { filterModel, filterCategory, dryRun, help, timeout } = parseArgs();
|
||||
|
||||
if (help) {
|
||||
printHelp();
|
||||
return;
|
||||
}
|
||||
|
||||
let tests = MODELS;
|
||||
if (filterModel) tests = tests.filter((t) => t.model === filterModel || t.model.endsWith(filterModel));
|
||||
if (filterCategory) tests = tests.filter((t) => t.category === filterCategory);
|
||||
|
||||
if (tests.length === 0) {
|
||||
console.log("No models match the filter.");
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`\n🧪 E2E Model Tests (${tests.length} models)\n${"=".repeat(50)}\n`);
|
||||
|
||||
if (dryRun) {
|
||||
for (const t of tests) {
|
||||
console.log(` ${t.model.padEnd(50)} [${t.category}]`);
|
||||
}
|
||||
console.log(`\n${tests.length} models would be tested.\n`);
|
||||
return;
|
||||
}
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
const failures: { model: string; error: string }[] = [];
|
||||
|
||||
for (const t of tests) {
|
||||
process.stdout.write(`Testing ${t.model.padEnd(50)} ... `);
|
||||
const result = await testModel(t.model, timeout);
|
||||
|
||||
if (result.success) {
|
||||
console.log(`✅ (${(result.duration / 1000).toFixed(1)}s)`);
|
||||
passed++;
|
||||
} else {
|
||||
console.log(`❌ FAIL`);
|
||||
console.log(` ${result.error}`);
|
||||
failures.push({ model: t.model, error: result.error || "Unknown" });
|
||||
failed++;
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`\n${"=".repeat(50)}`);
|
||||
console.log(`Summary: ${passed} passed, ${failed} failed\n`);
|
||||
|
||||
if (failures.length > 0) {
|
||||
console.log("Failed models:");
|
||||
for (const f of failures) {
|
||||
console.log(` - ${f.model}`);
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(console.error);
|
||||
334
script/test-regression.ts
Normal file
334
script/test-regression.ts
Normal file
@@ -0,0 +1,334 @@
|
||||
#!/usr/bin/env npx tsx
|
||||
import { spawn } from "child_process";
|
||||
|
||||
type Category = "thinking-order" | "tool-pairing" | "multi-tool";
|
||||
|
||||
interface MultiTurnTest {
|
||||
name: string;
|
||||
model: string;
|
||||
category: Category;
|
||||
turns: string[];
|
||||
errorPatterns: string[];
|
||||
timeout: number;
|
||||
}
|
||||
|
||||
interface TestResult {
|
||||
success: boolean;
|
||||
error?: string;
|
||||
duration: number;
|
||||
turnsCompleted: number;
|
||||
}
|
||||
|
||||
const ERROR_PATTERNS = [
|
||||
"thinking block order",
|
||||
"Expected thinking or redacted_thinking",
|
||||
"tool_use ids were found without tool_result",
|
||||
"tool_result_missing",
|
||||
"thinking_disabled_violation",
|
||||
"orphaned tool_use",
|
||||
"must start with thinking block",
|
||||
"error: tool_use without matching tool_result",
|
||||
"cannot be modified",
|
||||
"must remain as they were",
|
||||
];
|
||||
|
||||
const TESTS: MultiTurnTest[] = [
|
||||
// Issue #50: Thinking block order bug - simple single-turn tool use
|
||||
{
|
||||
name: "thinking-tool-use",
|
||||
model: "google/antigravity-claude-sonnet-4-5-thinking-low",
|
||||
category: "thinking-order",
|
||||
turns: [
|
||||
"Read package.json and tell me the package name",
|
||||
],
|
||||
errorPatterns: ERROR_PATTERNS,
|
||||
timeout: 90000,
|
||||
},
|
||||
{
|
||||
name: "thinking-bash-tool",
|
||||
model: "google/antigravity-claude-sonnet-4-5-thinking-low",
|
||||
category: "thinking-order",
|
||||
turns: [
|
||||
"Run: echo 'hello' and tell me the output",
|
||||
],
|
||||
errorPatterns: ERROR_PATTERNS,
|
||||
timeout: 90000,
|
||||
},
|
||||
|
||||
// Tool pairing - simple two-turn
|
||||
{
|
||||
name: "tool-pairing-sequential",
|
||||
model: "google/antigravity-claude-sonnet-4-5-thinking-low",
|
||||
category: "tool-pairing",
|
||||
turns: [
|
||||
"Run: echo 'first'",
|
||||
"Run: echo 'second'",
|
||||
],
|
||||
errorPatterns: ERROR_PATTERNS,
|
||||
timeout: 120000,
|
||||
},
|
||||
|
||||
// Opus model basic test
|
||||
{
|
||||
name: "opus-thinking-basic",
|
||||
model: "google/antigravity-claude-opus-4-5-thinking-low",
|
||||
category: "thinking-order",
|
||||
turns: [
|
||||
"What is 7 * 8? Use bash to verify: echo $((7*8))",
|
||||
],
|
||||
errorPatterns: ERROR_PATTERNS,
|
||||
timeout: 120000,
|
||||
},
|
||||
|
||||
// Bug: "thinking blocks in latest assistant message cannot be modified"
|
||||
// Tests multi-turn with thinking blocks to verify they're preserved unchanged
|
||||
{
|
||||
name: "thinking-modification-continue",
|
||||
model: "google/antigravity-claude-sonnet-4-5-thinking-low",
|
||||
category: "thinking-order",
|
||||
turns: [
|
||||
"Read package.json and tell me the version",
|
||||
"Now read tsconfig.json and tell me the target",
|
||||
"Compare the two files briefly",
|
||||
],
|
||||
errorPatterns: ERROR_PATTERNS,
|
||||
timeout: 120000,
|
||||
},
|
||||
];
|
||||
|
||||
async function runTurn(
|
||||
prompt: string,
|
||||
model: string,
|
||||
sessionId: string | null,
|
||||
sessionTitle: string,
|
||||
timeout: number
|
||||
): Promise<{ output: string; stderr: string; code: number; sessionId: string | null }> {
|
||||
return new Promise((resolve) => {
|
||||
const args = sessionId
|
||||
? ["run", prompt, "--session", sessionId, "--model", model]
|
||||
: ["run", prompt, "--model", model, "--title", sessionTitle];
|
||||
|
||||
const proc = spawn("opencode", args, {
|
||||
stdio: ["ignore", "pipe", "pipe"],
|
||||
timeout,
|
||||
cwd: process.cwd(),
|
||||
});
|
||||
|
||||
let stdout = "";
|
||||
let stderr = "";
|
||||
|
||||
proc.stdout?.on("data", (data) => {
|
||||
stdout += data.toString();
|
||||
});
|
||||
|
||||
proc.stderr?.on("data", (data) => {
|
||||
stderr += data.toString();
|
||||
});
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
proc.kill("SIGTERM");
|
||||
}, timeout);
|
||||
|
||||
proc.on("close", (code) => {
|
||||
clearTimeout(timeoutId);
|
||||
|
||||
let extractedSessionId = sessionId;
|
||||
if (!extractedSessionId) {
|
||||
const match = stdout.match(/session[:\s]+([a-zA-Z0-9_-]+)/i) ||
|
||||
stderr.match(/session[:\s]+([a-zA-Z0-9_-]+)/i);
|
||||
if (match) {
|
||||
extractedSessionId = match[1] ?? null;
|
||||
}
|
||||
}
|
||||
|
||||
resolve({
|
||||
output: stdout,
|
||||
stderr: stderr,
|
||||
code: code ?? 1,
|
||||
sessionId: extractedSessionId,
|
||||
});
|
||||
});
|
||||
|
||||
proc.on("error", (err) => {
|
||||
clearTimeout(timeoutId);
|
||||
resolve({
|
||||
output: "",
|
||||
stderr: err.message,
|
||||
code: 1,
|
||||
sessionId: null,
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function runMultiTurnTest(test: MultiTurnTest): Promise<TestResult> {
|
||||
const start = Date.now();
|
||||
let sessionId: string | null = null;
|
||||
let turnsCompleted = 0;
|
||||
|
||||
for (let index = 0; index < test.turns.length; index++) {
|
||||
const prompt = test.turns[index]!;
|
||||
const turnStart = Date.now();
|
||||
const result = await runTurn(
|
||||
prompt,
|
||||
test.model,
|
||||
sessionId ?? null,
|
||||
`regression-${test.name}`,
|
||||
test.timeout
|
||||
);
|
||||
|
||||
const combined = result.output + result.stderr;
|
||||
|
||||
for (const pattern of test.errorPatterns) {
|
||||
if (combined.toLowerCase().includes(pattern.toLowerCase())) {
|
||||
return {
|
||||
success: false,
|
||||
error: `Turn ${index + 1}: Found error pattern "${pattern}"`,
|
||||
duration: Date.now() - start,
|
||||
turnsCompleted,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if (result.code !== 0 && result.code !== null) {
|
||||
const isTimeout = Date.now() - turnStart >= test.timeout - 1000;
|
||||
if (isTimeout) {
|
||||
return {
|
||||
success: false,
|
||||
error: `Turn ${index + 1}: Timeout after ${test.timeout}ms`,
|
||||
duration: Date.now() - start,
|
||||
turnsCompleted,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
sessionId = result.sessionId;
|
||||
turnsCompleted++;
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
duration: Date.now() - start,
|
||||
turnsCompleted,
|
||||
};
|
||||
}
|
||||
|
||||
function parseArgs(): {
|
||||
filterName: string | null;
|
||||
filterCategory: Category | null;
|
||||
dryRun: boolean;
|
||||
help: boolean;
|
||||
} {
|
||||
const args = process.argv.slice(2);
|
||||
const getArg = (flag: string): string | null => {
|
||||
const idx = args.indexOf(flag);
|
||||
return idx !== -1 && args[idx + 1] !== undefined ? args[idx + 1]! : null;
|
||||
};
|
||||
return {
|
||||
filterName: getArg("--test") ?? getArg("--name"),
|
||||
filterCategory: getArg("--category") as Category | null,
|
||||
dryRun: args.includes("--dry-run"),
|
||||
help: args.includes("--help") || args.includes("-h"),
|
||||
};
|
||||
}
|
||||
|
||||
function showHelp(): void {
|
||||
console.log(`
|
||||
Multi-Turn Regression Test Suite for Antigravity Plugin
|
||||
|
||||
Tests for known bugs:
|
||||
- Issue #50: Thinking block order errors
|
||||
- Tool pairing: tool_use without tool_result
|
||||
- Multi-tool: Complex tool chains
|
||||
|
||||
Usage:
|
||||
npx tsx script/test-regression.ts [options]
|
||||
|
||||
Options:
|
||||
--test <name> Run specific test by name
|
||||
--category <cat> Run tests by category (thinking-order|tool-pairing|multi-tool)
|
||||
--dry-run List tests without running
|
||||
--help, -h Show this help
|
||||
|
||||
Examples:
|
||||
npx tsx script/test-regression.ts --dry-run
|
||||
npx tsx script/test-regression.ts --category thinking-order
|
||||
npx tsx script/test-regression.ts --test thinking-tool-use-basic
|
||||
`);
|
||||
}
|
||||
|
||||
async function main(): Promise<void> {
|
||||
const { filterName, filterCategory, dryRun, help } = parseArgs();
|
||||
|
||||
if (help) {
|
||||
showHelp();
|
||||
return;
|
||||
}
|
||||
|
||||
let tests = TESTS;
|
||||
if (filterName) {
|
||||
tests = tests.filter((t) => t.name === filterName);
|
||||
}
|
||||
if (filterCategory) {
|
||||
tests = tests.filter((t) => t.category === filterCategory);
|
||||
}
|
||||
|
||||
if (tests.length === 0) {
|
||||
console.error("No tests match the specified filters");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(`\n🧪 Multi-Turn Regression Tests (${tests.length} tests)\n${"=".repeat(55)}\n`);
|
||||
|
||||
if (dryRun) {
|
||||
console.log("Tests to run:\n");
|
||||
for (const test of tests) {
|
||||
console.log(` ${test.name}`);
|
||||
console.log(` Model: ${test.model}`);
|
||||
console.log(` Category: ${test.category}`);
|
||||
console.log(` Turns: ${test.turns.length}`);
|
||||
console.log();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
const results: { test: MultiTurnTest; result: TestResult }[] = [];
|
||||
|
||||
for (const test of tests) {
|
||||
console.log(`Testing: ${test.name}`);
|
||||
console.log(` Model: ${test.model}`);
|
||||
console.log(` Turns: ${test.turns.length}`);
|
||||
process.stdout.write(" Status: ");
|
||||
|
||||
const result = await runMultiTurnTest(test);
|
||||
results.push({ test, result });
|
||||
|
||||
if (result.success) {
|
||||
console.log(`✅ PASS (${result.turnsCompleted}/${test.turns.length} turns, ${(result.duration / 1000).toFixed(1)}s)`);
|
||||
} else {
|
||||
console.log(`❌ FAIL`);
|
||||
console.log(` Error: ${result.error}`);
|
||||
console.log(` Completed: ${result.turnsCompleted}/${test.turns.length} turns`);
|
||||
}
|
||||
console.log();
|
||||
}
|
||||
|
||||
const passed = results.filter((r) => r.result.success).length;
|
||||
const failed = results.filter((r) => !r.result.success).length;
|
||||
|
||||
console.log("=".repeat(55));
|
||||
console.log(`\nSummary: ${passed} passed, ${failed} failed\n`);
|
||||
|
||||
if (failed > 0) {
|
||||
console.log("Failed tests:");
|
||||
for (const r of results.filter((r) => !r.result.success)) {
|
||||
console.log(` ❌ ${r.test.name}: ${r.result.error}`);
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error("Fatal error:", err);
|
||||
process.exit(1);
|
||||
});
|
||||
2
src/plugin/core/streaming/index.ts
Normal file
2
src/plugin/core/streaming/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export * from './types';
|
||||
export * from './transformer';
|
||||
191
src/plugin/core/streaming/transformer.ts
Normal file
191
src/plugin/core/streaming/transformer.ts
Normal file
@@ -0,0 +1,191 @@
|
||||
import type {
|
||||
SignatureStore,
|
||||
StreamingCallbacks,
|
||||
StreamingOptions,
|
||||
ThoughtBuffer,
|
||||
} from './types';
|
||||
|
||||
export function createThoughtBuffer(): ThoughtBuffer {
|
||||
const buffer = new Map<number, string>();
|
||||
return {
|
||||
get: (index: number) => buffer.get(index),
|
||||
set: (index: number, text: string) => buffer.set(index, text),
|
||||
clear: () => buffer.clear(),
|
||||
};
|
||||
}
|
||||
|
||||
export function transformStreamingPayload(
|
||||
payload: string,
|
||||
transformThinkingParts?: (response: unknown) => unknown,
|
||||
): string {
|
||||
return payload
|
||||
.split('\n')
|
||||
.map((line) => {
|
||||
if (!line.startsWith('data:')) {
|
||||
return line;
|
||||
}
|
||||
const json = line.slice(5).trim();
|
||||
if (!json) {
|
||||
return line;
|
||||
}
|
||||
try {
|
||||
const parsed = JSON.parse(json) as { response?: unknown };
|
||||
if (parsed.response !== undefined) {
|
||||
const transformed = transformThinkingParts
|
||||
? transformThinkingParts(parsed.response)
|
||||
: parsed.response;
|
||||
return `data: ${JSON.stringify(transformed)}`;
|
||||
}
|
||||
} catch (_) {}
|
||||
return line;
|
||||
})
|
||||
.join('\n');
|
||||
}
|
||||
|
||||
export function transformSseLine(
|
||||
line: string,
|
||||
signatureStore: SignatureStore,
|
||||
thoughtBuffer: ThoughtBuffer,
|
||||
callbacks: StreamingCallbacks,
|
||||
options: StreamingOptions,
|
||||
debugState: { injected: boolean },
|
||||
): string {
|
||||
if (!line.startsWith('data:')) {
|
||||
return line;
|
||||
}
|
||||
const json = line.slice(5).trim();
|
||||
if (!json) {
|
||||
return line;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(json) as { response?: unknown };
|
||||
if (parsed.response !== undefined) {
|
||||
if (options.cacheSignatures && options.signatureSessionKey) {
|
||||
cacheThinkingSignaturesFromResponse(
|
||||
parsed.response,
|
||||
options.signatureSessionKey,
|
||||
signatureStore,
|
||||
thoughtBuffer,
|
||||
callbacks.onCacheSignature,
|
||||
);
|
||||
}
|
||||
|
||||
let response: unknown = parsed.response;
|
||||
if (options.debugText && callbacks.onInjectDebug && !debugState.injected) {
|
||||
response = callbacks.onInjectDebug(response, options.debugText);
|
||||
debugState.injected = true;
|
||||
}
|
||||
|
||||
const transformed = callbacks.transformThinkingParts
|
||||
? callbacks.transformThinkingParts(response)
|
||||
: response;
|
||||
return `data: ${JSON.stringify(transformed)}`;
|
||||
}
|
||||
} catch (_) {}
|
||||
return line;
|
||||
}
|
||||
|
||||
export function cacheThinkingSignaturesFromResponse(
|
||||
response: unknown,
|
||||
signatureSessionKey: string,
|
||||
signatureStore: SignatureStore,
|
||||
thoughtBuffer: ThoughtBuffer,
|
||||
onCacheSignature?: (sessionKey: string, text: string, signature: string) => void,
|
||||
): void {
|
||||
if (!response || typeof response !== 'object') return;
|
||||
|
||||
const resp = response as Record<string, unknown>;
|
||||
|
||||
if (Array.isArray(resp.candidates)) {
|
||||
resp.candidates.forEach((candidate: unknown, index: number) => {
|
||||
const cand = candidate as Record<string, unknown> | null;
|
||||
if (!cand?.content) return;
|
||||
const content = cand.content as Record<string, unknown>;
|
||||
if (!Array.isArray(content.parts)) return;
|
||||
|
||||
content.parts.forEach((part: unknown) => {
|
||||
const p = part as Record<string, unknown>;
|
||||
if (p.thought === true || p.type === 'thinking') {
|
||||
const text = (p.text || p.thinking || '') as string;
|
||||
if (text) {
|
||||
const current = thoughtBuffer.get(index) ?? '';
|
||||
thoughtBuffer.set(index, current + text);
|
||||
}
|
||||
}
|
||||
|
||||
if (p.thoughtSignature) {
|
||||
const fullText = thoughtBuffer.get(index) ?? '';
|
||||
if (fullText) {
|
||||
const signature = p.thoughtSignature as string;
|
||||
onCacheSignature?.(signatureSessionKey, fullText, signature);
|
||||
signatureStore.set(signatureSessionKey, { text: fullText, signature });
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
if (Array.isArray(resp.content)) {
|
||||
let thinkingText = '';
|
||||
resp.content.forEach((block: unknown) => {
|
||||
const b = block as Record<string, unknown> | null;
|
||||
if (b?.type === 'thinking') {
|
||||
thinkingText += (b.thinking || b.text || '') as string;
|
||||
}
|
||||
if (b?.signature && thinkingText) {
|
||||
const signature = b.signature as string;
|
||||
onCacheSignature?.(signatureSessionKey, thinkingText, signature);
|
||||
signatureStore.set(signatureSessionKey, { text: thinkingText, signature });
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export function createStreamingTransformer(
|
||||
signatureStore: SignatureStore,
|
||||
callbacks: StreamingCallbacks,
|
||||
options: StreamingOptions = {},
|
||||
): TransformStream<Uint8Array, Uint8Array> {
|
||||
const decoder = new TextDecoder();
|
||||
const encoder = new TextEncoder();
|
||||
let buffer = '';
|
||||
const thoughtBuffer = createThoughtBuffer();
|
||||
const debugState = { injected: false };
|
||||
|
||||
return new TransformStream({
|
||||
transform(chunk, controller) {
|
||||
buffer += decoder.decode(chunk, { stream: true });
|
||||
|
||||
const lines = buffer.split('\n');
|
||||
buffer = lines.pop() || '';
|
||||
|
||||
for (const line of lines) {
|
||||
const transformedLine = transformSseLine(
|
||||
line,
|
||||
signatureStore,
|
||||
thoughtBuffer,
|
||||
callbacks,
|
||||
options,
|
||||
debugState,
|
||||
);
|
||||
controller.enqueue(encoder.encode(transformedLine + '\n'));
|
||||
}
|
||||
},
|
||||
flush(controller) {
|
||||
buffer += decoder.decode();
|
||||
|
||||
if (buffer) {
|
||||
const transformedLine = transformSseLine(
|
||||
buffer,
|
||||
signatureStore,
|
||||
thoughtBuffer,
|
||||
callbacks,
|
||||
options,
|
||||
debugState,
|
||||
);
|
||||
controller.enqueue(encoder.encode(transformedLine));
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
29
src/plugin/core/streaming/types.ts
Normal file
29
src/plugin/core/streaming/types.ts
Normal file
@@ -0,0 +1,29 @@
|
||||
export interface SignedThinking {
|
||||
text: string;
|
||||
signature: string;
|
||||
}
|
||||
|
||||
export interface SignatureStore {
|
||||
get(sessionKey: string): SignedThinking | undefined;
|
||||
set(sessionKey: string, value: SignedThinking): void;
|
||||
has(sessionKey: string): boolean;
|
||||
delete(sessionKey: string): void;
|
||||
}
|
||||
|
||||
export interface StreamingCallbacks {
|
||||
onCacheSignature?: (sessionKey: string, text: string, signature: string) => void;
|
||||
onInjectDebug?: (response: unknown, debugText: string) => unknown;
|
||||
transformThinkingParts?: (parts: unknown) => unknown;
|
||||
}
|
||||
|
||||
export interface StreamingOptions {
|
||||
signatureSessionKey?: string;
|
||||
debugText?: string;
|
||||
cacheSignatures?: boolean;
|
||||
}
|
||||
|
||||
export interface ThoughtBuffer {
|
||||
get(index: number): string | undefined;
|
||||
set(index: number, text: string): void;
|
||||
clear(): void;
|
||||
}
|
||||
@@ -45,6 +45,7 @@ describe("sanitizeThinkingPart (covered via filtering)", () => {
|
||||
},
|
||||
],
|
||||
},
|
||||
{ role: "model", parts: [{ text: "trailing" }] },
|
||||
];
|
||||
|
||||
const result = filterUnsignedThinkingBlocks(contents, "session-1", getCachedSignatureFn) as any;
|
||||
@@ -82,6 +83,7 @@ describe("sanitizeThinkingPart (covered via filtering)", () => {
|
||||
},
|
||||
],
|
||||
},
|
||||
{ role: "model", parts: [{ text: "trailing" }] },
|
||||
];
|
||||
|
||||
const result = filterUnsignedThinkingBlocks(contents, "session-1", getCachedSignatureFn) as any;
|
||||
@@ -107,12 +109,13 @@ describe("sanitizeThinkingPart (covered via filtering)", () => {
|
||||
thinking: "restore me",
|
||||
cache_control: { type: "ephemeral" },
|
||||
},
|
||||
// no signature present (forces restore)
|
||||
providerOptions: { injected: true },
|
||||
},
|
||||
{ type: "text", text: "visible" },
|
||||
],
|
||||
},
|
||||
{ role: "user", content: [{ type: "text", text: "next" }] },
|
||||
{ role: "assistant", content: [{ type: "text", text: "last" }] },
|
||||
];
|
||||
|
||||
const result = filterMessagesThinkingBlocks(messages, "session-1", getCachedSignatureFn) as any;
|
||||
@@ -142,6 +145,8 @@ describe("sanitizeThinkingPart (covered via filtering)", () => {
|
||||
{ type: "text", text: "visible" },
|
||||
],
|
||||
},
|
||||
{ role: "user", parts: [{ text: "next" }] },
|
||||
{ role: "model", parts: [{ text: "last" }] },
|
||||
];
|
||||
|
||||
const result = filterUnsignedThinkingBlocks(contents, "session-1", getCachedSignatureFn) as any;
|
||||
@@ -312,6 +317,8 @@ describe("filterUnsignedThinkingBlocks", () => {
|
||||
{ type: "text", text: "visible text" },
|
||||
],
|
||||
},
|
||||
{ role: "user", parts: [{ text: "next" }] },
|
||||
{ role: "model", parts: [{ text: "last" }] },
|
||||
];
|
||||
const result = filterUnsignedThinkingBlocks(contents);
|
||||
expect(result[0].parts).toHaveLength(1);
|
||||
@@ -348,6 +355,8 @@ describe("filterUnsignedThinkingBlocks", () => {
|
||||
{ type: "text", text: "visible text" },
|
||||
],
|
||||
},
|
||||
{ role: "user", parts: [{ text: "next" }] },
|
||||
{ role: "model", parts: [{ text: "last" }] },
|
||||
];
|
||||
const result = filterUnsignedThinkingBlocks(contents);
|
||||
expect(result[0].parts).toHaveLength(1);
|
||||
@@ -363,6 +372,8 @@ describe("filterUnsignedThinkingBlocks", () => {
|
||||
{ type: "text", text: "visible text" },
|
||||
],
|
||||
},
|
||||
{ role: "user", parts: [{ text: "next" }] },
|
||||
{ role: "model", parts: [{ text: "last" }] },
|
||||
];
|
||||
const result = filterUnsignedThinkingBlocks(contents);
|
||||
expect(result[0].parts).toHaveLength(1);
|
||||
@@ -383,6 +394,8 @@ describe("filterUnsignedThinkingBlocks", () => {
|
||||
{ thought: true, text: thinkingText, thoughtSignature: validSignature },
|
||||
],
|
||||
},
|
||||
{ role: "user", parts: [{ text: "next" }] },
|
||||
{ role: "model", parts: [{ text: "last" }] },
|
||||
];
|
||||
const result = filterUnsignedThinkingBlocks(contents, "session-1", getCachedSignatureFn);
|
||||
expect(result[0].parts).toHaveLength(1);
|
||||
@@ -423,6 +436,8 @@ describe("filterUnsignedThinkingBlocks", () => {
|
||||
{ type: "text", text: "visible" },
|
||||
],
|
||||
},
|
||||
{ role: "user", parts: [{ text: "next" }] },
|
||||
{ role: "model", parts: [{ text: "last" }] },
|
||||
];
|
||||
const result = filterUnsignedThinkingBlocks(contents);
|
||||
expect(result[0].parts).toHaveLength(1);
|
||||
@@ -522,6 +537,7 @@ describe("deepFilterThinkingBlocks", () => {
|
||||
{ type: "text", text: "visible" },
|
||||
],
|
||||
},
|
||||
{ role: "assistant", content: [{ type: "text", text: "last" }] },
|
||||
],
|
||||
},
|
||||
};
|
||||
@@ -544,6 +560,7 @@ describe("filterMessagesThinkingBlocks", () => {
|
||||
{ type: "text", text: "visible" },
|
||||
],
|
||||
},
|
||||
{ role: "assistant", content: [{ type: "text", text: "last" }] },
|
||||
];
|
||||
|
||||
const result = filterMessagesThinkingBlocks(messages) as any;
|
||||
@@ -571,6 +588,7 @@ describe("filterMessagesThinkingBlocks", () => {
|
||||
{ type: "text", text: "visible" },
|
||||
],
|
||||
},
|
||||
{ role: "assistant", content: [{ type: "text", text: "last" }] },
|
||||
];
|
||||
|
||||
const result = filterMessagesThinkingBlocks(messages, "session-1", getCachedSignatureFn) as any;
|
||||
@@ -595,6 +613,7 @@ describe("filterMessagesThinkingBlocks", () => {
|
||||
{ type: "text", text: "visible" },
|
||||
],
|
||||
},
|
||||
{ role: "assistant", content: [{ type: "text", text: "last" }] },
|
||||
];
|
||||
|
||||
const result = filterMessagesThinkingBlocks(messages) as any;
|
||||
@@ -611,6 +630,7 @@ describe("filterMessagesThinkingBlocks", () => {
|
||||
{ type: "text", text: "visible" },
|
||||
],
|
||||
},
|
||||
{ role: "assistant", content: [{ type: "text", text: "last" }] },
|
||||
];
|
||||
|
||||
const result = filterMessagesThinkingBlocks(messages) as any;
|
||||
@@ -634,6 +654,7 @@ describe("filterMessagesThinkingBlocks", () => {
|
||||
{ type: "text", text: "visible" },
|
||||
],
|
||||
},
|
||||
{ role: "assistant", content: [{ type: "text", text: "last" }] },
|
||||
];
|
||||
|
||||
const result = filterMessagesThinkingBlocks(messages, "session-1", getCachedSignatureFn) as any;
|
||||
@@ -663,6 +684,7 @@ describe("filterMessagesThinkingBlocks", () => {
|
||||
{ type: "text", text: "visible" },
|
||||
],
|
||||
},
|
||||
{ role: "assistant", content: [{ type: "text", text: "last" }] },
|
||||
];
|
||||
|
||||
const result = filterMessagesThinkingBlocks(messages, "session-1", getCachedSignatureFn) as any;
|
||||
|
||||
@@ -934,11 +934,22 @@ function sanitizeThinkingPart(part: Record<string, unknown>): Record<string, unk
|
||||
return stripCacheControlRecursively(part) as Record<string, unknown>;
|
||||
}
|
||||
|
||||
function findLastAssistantIndex(contents: any[], roleValue: "model" | "assistant"): number {
|
||||
for (let i = contents.length - 1; i >= 0; i--) {
|
||||
const content = contents[i];
|
||||
if (content && typeof content === "object" && content.role === roleValue) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
function filterContentArray(
|
||||
contentArray: any[],
|
||||
sessionId?: string,
|
||||
getCachedSignatureFn?: (sessionId: string, text: string) => string | undefined,
|
||||
isClaudeModel?: boolean,
|
||||
isLastAssistantMessage?: boolean,
|
||||
): any[] {
|
||||
// For Claude models, strip thinking blocks by default for reliability
|
||||
// User can opt-in to keep thinking via OPENCODE_ANTIGRAVITY_KEEP_THINKING=1
|
||||
@@ -967,6 +978,14 @@ function filterContentArray(
|
||||
continue;
|
||||
}
|
||||
|
||||
// CRITICAL: For the LAST assistant message, thinking blocks MUST remain byte-for-byte
|
||||
// identical to what the API returned. Anthropic rejects any modification.
|
||||
// Pass through unchanged - do NOT sanitize or reconstruct.
|
||||
if (isLastAssistantMessage && (isThinking || hasSignature)) {
|
||||
filtered.push(item);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (isOurCachedSignature(item, sessionId, getCachedSignatureFn)) {
|
||||
filtered.push(sanitizeThinkingPart(item));
|
||||
continue;
|
||||
@@ -1007,17 +1026,22 @@ export function filterUnsignedThinkingBlocks(
|
||||
getCachedSignatureFn?: (sessionId: string, text: string) => string | undefined,
|
||||
isClaudeModel?: boolean,
|
||||
): any[] {
|
||||
return contents.map((content: any) => {
|
||||
const lastAssistantIdx = findLastAssistantIndex(contents, "model");
|
||||
|
||||
return contents.map((content: any, idx: number) => {
|
||||
if (!content || typeof content !== "object") {
|
||||
return content;
|
||||
}
|
||||
|
||||
const isLastAssistant = idx === lastAssistantIdx;
|
||||
|
||||
if (Array.isArray((content as any).parts)) {
|
||||
const filteredParts = filterContentArray(
|
||||
(content as any).parts,
|
||||
sessionId,
|
||||
getCachedSignatureFn,
|
||||
isClaudeModel,
|
||||
isLastAssistant,
|
||||
);
|
||||
|
||||
const trimmedParts = (content as any).role === "model" && !isClaudeModel
|
||||
@@ -1029,11 +1053,15 @@ export function filterUnsignedThinkingBlocks(
|
||||
|
||||
if (Array.isArray((content as any).content)) {
|
||||
const isAssistantRole = (content as any).role === "assistant";
|
||||
const isLastAssistantContent = idx === lastAssistantIdx ||
|
||||
(isAssistantRole && idx === findLastAssistantIndex(contents, "assistant"));
|
||||
|
||||
const filteredContent = filterContentArray(
|
||||
(content as any).content,
|
||||
sessionId,
|
||||
getCachedSignatureFn,
|
||||
isClaudeModel,
|
||||
isLastAssistantContent,
|
||||
);
|
||||
|
||||
const trimmedContent = isAssistantRole && !isClaudeModel
|
||||
@@ -1056,18 +1084,23 @@ export function filterMessagesThinkingBlocks(
|
||||
getCachedSignatureFn?: (sessionId: string, text: string) => string | undefined,
|
||||
isClaudeModel?: boolean,
|
||||
): any[] {
|
||||
return messages.map((message: any) => {
|
||||
const lastAssistantIdx = findLastAssistantIndex(messages, "assistant");
|
||||
|
||||
return messages.map((message: any, idx: number) => {
|
||||
if (!message || typeof message !== "object") {
|
||||
return message;
|
||||
}
|
||||
|
||||
if (Array.isArray((message as any).content)) {
|
||||
const isAssistantRole = (message as any).role === "assistant";
|
||||
const isLastAssistant = isAssistantRole && idx === lastAssistantIdx;
|
||||
|
||||
const filteredContent = filterContentArray(
|
||||
(message as any).content,
|
||||
sessionId,
|
||||
getCachedSignatureFn,
|
||||
isClaudeModel,
|
||||
isLastAssistant,
|
||||
);
|
||||
|
||||
const trimmedContent = isAssistantRole && !isClaudeModel
|
||||
|
||||
685
src/plugin/request.test.ts
Normal file
685
src/plugin/request.test.ts
Normal file
@@ -0,0 +1,685 @@
|
||||
import { describe, it, expect } from "vitest";
|
||||
import {
|
||||
prepareAntigravityRequest,
|
||||
getPluginSessionId,
|
||||
isGenerativeLanguageRequest,
|
||||
__testExports,
|
||||
} from "./request";
|
||||
import type { SignatureStore, ThoughtBuffer, StreamingCallbacks, StreamingOptions } from "./core/streaming/types";
|
||||
|
||||
const {
|
||||
buildSignatureSessionKey,
|
||||
hashConversationSeed,
|
||||
extractTextFromContent,
|
||||
extractConversationSeedFromMessages,
|
||||
extractConversationSeedFromContents,
|
||||
resolveProjectKey,
|
||||
isGeminiToolUsePart,
|
||||
isGeminiThinkingPart,
|
||||
ensureThoughtSignature,
|
||||
hasSignedThinkingPart,
|
||||
hasToolUseInContents,
|
||||
hasSignedThinkingInContents,
|
||||
hasToolUseInMessages,
|
||||
hasSignedThinkingInMessages,
|
||||
generateSyntheticProjectId,
|
||||
MIN_SIGNATURE_LENGTH,
|
||||
transformStreamingPayload,
|
||||
createStreamingTransformer,
|
||||
transformSseLine,
|
||||
} = __testExports;
|
||||
|
||||
function createMockSignatureStore(): SignatureStore {
|
||||
const store = new Map<string, { text: string; signature: string }>();
|
||||
return {
|
||||
get: (key: string) => store.get(key),
|
||||
set: (key: string, value: { text: string; signature: string }) => store.set(key, value),
|
||||
has: (key: string) => store.has(key),
|
||||
delete: (key: string) => store.delete(key),
|
||||
};
|
||||
}
|
||||
|
||||
function createMockThoughtBuffer(): ThoughtBuffer {
|
||||
const buffer = new Map<number, string>();
|
||||
return {
|
||||
get: (idx: number) => buffer.get(idx),
|
||||
set: (idx: number, text: string) => buffer.set(idx, text),
|
||||
clear: () => buffer.clear(),
|
||||
};
|
||||
}
|
||||
|
||||
const defaultCallbacks: StreamingCallbacks = {};
|
||||
const defaultOptions: StreamingOptions = {};
|
||||
const defaultDebugState = { injected: false };
|
||||
|
||||
describe("request.ts", () => {
|
||||
describe("getPluginSessionId", () => {
|
||||
it("returns consistent session ID across calls", () => {
|
||||
const id1 = getPluginSessionId();
|
||||
const id2 = getPluginSessionId();
|
||||
expect(id1).toBe(id2);
|
||||
expect(id1).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("isGenerativeLanguageRequest", () => {
|
||||
it("returns true for generativelanguage.googleapis.com URLs", () => {
|
||||
expect(isGenerativeLanguageRequest("https://generativelanguage.googleapis.com/v1/models")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false for other URLs", () => {
|
||||
expect(isGenerativeLanguageRequest("https://api.anthropic.com/v1/messages")).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for non-string inputs", () => {
|
||||
expect(isGenerativeLanguageRequest({} as any)).toBe(false);
|
||||
expect(isGenerativeLanguageRequest(new Request("https://example.com"))).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("buildSignatureSessionKey", () => {
|
||||
it("builds key from sessionId, model, project, and conversation", () => {
|
||||
const key = buildSignatureSessionKey("session-1", "claude-3", "conv-456", "proj-123");
|
||||
expect(key).toBe("session-1:claude-3:proj-123:conv-456");
|
||||
});
|
||||
|
||||
it("uses defaults for missing optional params", () => {
|
||||
expect(buildSignatureSessionKey("s1", undefined, undefined, undefined)).toBe("s1:unknown:default:default");
|
||||
expect(buildSignatureSessionKey("s1", "model", undefined, undefined)).toBe("s1:model:default:default");
|
||||
});
|
||||
|
||||
it("handles empty strings as defaults", () => {
|
||||
expect(buildSignatureSessionKey("s1", "", "", "")).toBe("s1:unknown:default:default");
|
||||
});
|
||||
});
|
||||
|
||||
describe("hashConversationSeed", () => {
|
||||
it("returns consistent hash for same input", () => {
|
||||
const hash1 = hashConversationSeed("test-seed");
|
||||
const hash2 = hashConversationSeed("test-seed");
|
||||
expect(hash1).toBe(hash2);
|
||||
});
|
||||
|
||||
it("returns different hash for different inputs", () => {
|
||||
const hash1 = hashConversationSeed("seed-1");
|
||||
const hash2 = hashConversationSeed("seed-2");
|
||||
expect(hash1).not.toBe(hash2);
|
||||
});
|
||||
|
||||
it("handles empty string", () => {
|
||||
const hash = hashConversationSeed("");
|
||||
expect(hash).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractTextFromContent", () => {
|
||||
it("extracts text from string content", () => {
|
||||
expect(extractTextFromContent("hello world")).toBe("hello world");
|
||||
});
|
||||
|
||||
it("extracts first text from content array with text blocks", () => {
|
||||
const content = [
|
||||
{ type: "text", text: "hello" },
|
||||
{ type: "text", text: "world" },
|
||||
];
|
||||
expect(extractTextFromContent(content)).toBe("hello");
|
||||
});
|
||||
|
||||
it("returns empty string for non-text blocks", () => {
|
||||
const content = [{ type: "image", source: {} }];
|
||||
expect(extractTextFromContent(content)).toBe("");
|
||||
});
|
||||
|
||||
it("returns first text block only (not concatenated)", () => {
|
||||
const content = [
|
||||
{ type: "text", text: "before" },
|
||||
{ type: "image", source: {} },
|
||||
{ type: "text", text: "after" },
|
||||
];
|
||||
expect(extractTextFromContent(content)).toBe("before");
|
||||
});
|
||||
|
||||
it("returns empty string for null/undefined", () => {
|
||||
expect(extractTextFromContent(null)).toBe("");
|
||||
expect(extractTextFromContent(undefined)).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractConversationSeedFromMessages", () => {
|
||||
it("extracts seed from first user message", () => {
|
||||
const messages = [
|
||||
{ role: "user", content: "first message" },
|
||||
{ role: "assistant", content: "response" },
|
||||
];
|
||||
const seed = extractConversationSeedFromMessages(messages);
|
||||
expect(seed).toContain("first message");
|
||||
});
|
||||
|
||||
it("returns empty string when no user messages", () => {
|
||||
const messages = [{ role: "assistant", content: "response" }];
|
||||
expect(extractConversationSeedFromMessages(messages)).toBe("");
|
||||
});
|
||||
|
||||
it("handles empty messages array", () => {
|
||||
expect(extractConversationSeedFromMessages([])).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractConversationSeedFromContents", () => {
|
||||
it("extracts seed from first user content", () => {
|
||||
const contents = [
|
||||
{ role: "user", parts: [{ text: "hello" }] },
|
||||
{ role: "model", parts: [{ text: "hi" }] },
|
||||
];
|
||||
const seed = extractConversationSeedFromContents(contents);
|
||||
expect(seed).toContain("hello");
|
||||
});
|
||||
|
||||
it("returns empty string when no user content", () => {
|
||||
const contents = [{ role: "model", parts: [{ text: "hi" }] }];
|
||||
expect(extractConversationSeedFromContents(contents)).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveProjectKey", () => {
|
||||
it("returns candidate if it is a string", () => {
|
||||
expect(resolveProjectKey("my-project")).toBe("my-project");
|
||||
});
|
||||
|
||||
it("returns fallback if candidate is not a string", () => {
|
||||
expect(resolveProjectKey(null, "fallback")).toBe("fallback");
|
||||
expect(resolveProjectKey(undefined, "fallback")).toBe("fallback");
|
||||
expect(resolveProjectKey({}, "fallback")).toBe("fallback");
|
||||
});
|
||||
|
||||
it("returns undefined if no valid candidate or fallback", () => {
|
||||
expect(resolveProjectKey(null)).toBeUndefined();
|
||||
expect(resolveProjectKey(undefined)).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("isGeminiToolUsePart", () => {
|
||||
it("returns true for functionCall parts", () => {
|
||||
expect(isGeminiToolUsePart({ functionCall: { name: "test" } })).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false for non-functionCall parts", () => {
|
||||
expect(isGeminiToolUsePart({ text: "hello" })).toBe(false);
|
||||
expect(isGeminiToolUsePart({ thought: true })).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for null/undefined", () => {
|
||||
expect(isGeminiToolUsePart(null)).toBe(false);
|
||||
expect(isGeminiToolUsePart(undefined)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isGeminiThinkingPart", () => {
|
||||
it("returns true for thought:true parts", () => {
|
||||
expect(isGeminiThinkingPart({ thought: true, text: "thinking..." })).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false for thought:false parts", () => {
|
||||
expect(isGeminiThinkingPart({ thought: false, text: "not thinking" })).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for parts without thought property", () => {
|
||||
expect(isGeminiThinkingPart({ text: "hello" })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("ensureThoughtSignature", () => {
|
||||
it("returns part unchanged when no cached signature exists", () => {
|
||||
const part = { thought: true, text: "thinking..." };
|
||||
const result = ensureThoughtSignature(part, "no-cache-session");
|
||||
expect(result).toEqual(part);
|
||||
});
|
||||
|
||||
it("preserves existing thoughtSignature", () => {
|
||||
const existingSignature = "a".repeat(MIN_SIGNATURE_LENGTH + 10);
|
||||
const part = { thought: true, text: "thinking...", thoughtSignature: existingSignature };
|
||||
const result = ensureThoughtSignature(part, "session-key");
|
||||
expect(result.thoughtSignature).toBe(existingSignature);
|
||||
});
|
||||
|
||||
it("does not modify non-thinking parts", () => {
|
||||
const part = { text: "regular text" };
|
||||
const result = ensureThoughtSignature(part, "session-key");
|
||||
expect(result.thoughtSignature).toBeUndefined();
|
||||
});
|
||||
|
||||
it("returns null/undefined inputs unchanged", () => {
|
||||
expect(ensureThoughtSignature(null, "key")).toBeNull();
|
||||
expect(ensureThoughtSignature(undefined, "key")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("returns non-object inputs unchanged", () => {
|
||||
expect(ensureThoughtSignature("string", "key")).toBe("string");
|
||||
expect(ensureThoughtSignature(123, "key")).toBe(123);
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasSignedThinkingPart", () => {
|
||||
it("returns true for part with valid thoughtSignature", () => {
|
||||
const part = { thought: true, thoughtSignature: "a".repeat(MIN_SIGNATURE_LENGTH) };
|
||||
expect(hasSignedThinkingPart(part)).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for type:thinking with valid signature field", () => {
|
||||
const part = { type: "thinking", thinking: "...", signature: "a".repeat(MIN_SIGNATURE_LENGTH) };
|
||||
expect(hasSignedThinkingPart(part)).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for type:reasoning with valid signature field", () => {
|
||||
const part = { type: "reasoning", signature: "a".repeat(MIN_SIGNATURE_LENGTH) };
|
||||
expect(hasSignedThinkingPart(part)).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false for part with short signature", () => {
|
||||
const part = { thought: true, thoughtSignature: "short" };
|
||||
expect(hasSignedThinkingPart(part)).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for part without signature", () => {
|
||||
const part = { thought: true, text: "no signature" };
|
||||
expect(hasSignedThinkingPart(part)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasToolUseInContents", () => {
|
||||
it("returns true when contents have functionCall", () => {
|
||||
const contents = [
|
||||
{ role: "model", parts: [{ functionCall: { name: "test" } }] },
|
||||
];
|
||||
expect(hasToolUseInContents(contents)).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false when no functionCall present", () => {
|
||||
const contents = [
|
||||
{ role: "model", parts: [{ text: "hello" }] },
|
||||
];
|
||||
expect(hasToolUseInContents(contents)).toBe(false);
|
||||
});
|
||||
|
||||
it("handles empty contents", () => {
|
||||
expect(hasToolUseInContents([])).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasSignedThinkingInContents", () => {
|
||||
it("returns true when contents have signed thinking", () => {
|
||||
const contents = [
|
||||
{
|
||||
role: "model",
|
||||
parts: [{ thought: true, thoughtSignature: "a".repeat(MIN_SIGNATURE_LENGTH) }],
|
||||
},
|
||||
];
|
||||
expect(hasSignedThinkingInContents(contents)).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false when no signed thinking present", () => {
|
||||
const contents = [
|
||||
{ role: "model", parts: [{ thought: true, text: "unsigned" }] },
|
||||
];
|
||||
expect(hasSignedThinkingInContents(contents)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasToolUseInMessages", () => {
|
||||
it("returns true when messages have tool_use blocks", () => {
|
||||
const messages = [
|
||||
{ role: "assistant", content: [{ type: "tool_use", id: "123", name: "test" }] },
|
||||
];
|
||||
expect(hasToolUseInMessages(messages)).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false when no tool_use blocks", () => {
|
||||
const messages = [
|
||||
{ role: "assistant", content: [{ type: "text", text: "hello" }] },
|
||||
];
|
||||
expect(hasToolUseInMessages(messages)).toBe(false);
|
||||
});
|
||||
|
||||
it("handles string content", () => {
|
||||
const messages = [{ role: "assistant", content: "just text" }];
|
||||
expect(hasToolUseInMessages(messages)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasSignedThinkingInMessages", () => {
|
||||
it("returns true when messages have signed thinking blocks", () => {
|
||||
const messages = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [{ type: "thinking", thinking: "...", signature: "a".repeat(MIN_SIGNATURE_LENGTH) }],
|
||||
},
|
||||
];
|
||||
expect(hasSignedThinkingInMessages(messages)).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false when thinking blocks are unsigned", () => {
|
||||
const messages = [
|
||||
{ role: "assistant", content: [{ type: "thinking", thinking: "no sig" }] },
|
||||
];
|
||||
expect(hasSignedThinkingInMessages(messages)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("generateSyntheticProjectId", () => {
|
||||
it("generates a string in expected format", () => {
|
||||
const id = generateSyntheticProjectId();
|
||||
expect(id).toMatch(/^[a-z]+-[a-z]+-[a-z0-9]{5}$/);
|
||||
});
|
||||
|
||||
it("generates unique IDs on each call", () => {
|
||||
const ids = new Set<string>();
|
||||
for (let i = 0; i < 10; i++) {
|
||||
ids.add(generateSyntheticProjectId());
|
||||
}
|
||||
expect(ids.size).toBe(10);
|
||||
});
|
||||
});
|
||||
|
||||
describe("MIN_SIGNATURE_LENGTH", () => {
|
||||
it("is 50", () => {
|
||||
expect(MIN_SIGNATURE_LENGTH).toBe(50);
|
||||
});
|
||||
});
|
||||
|
||||
describe("transformSseLine", () => {
|
||||
const callTransformSseLine = (line: string) => {
|
||||
const store = createMockSignatureStore();
|
||||
const buffer = createMockThoughtBuffer();
|
||||
return transformSseLine(line, store, buffer, defaultCallbacks, defaultOptions, { ...defaultDebugState });
|
||||
};
|
||||
|
||||
it("returns empty lines unchanged", () => {
|
||||
expect(callTransformSseLine("")).toBe("");
|
||||
expect(callTransformSseLine(" ")).toBe(" ");
|
||||
});
|
||||
|
||||
it("returns non-data lines unchanged", () => {
|
||||
expect(callTransformSseLine("event: message")).toBe("event: message");
|
||||
expect(callTransformSseLine(": heartbeat")).toBe(": heartbeat");
|
||||
});
|
||||
|
||||
it("handles data: [DONE] unchanged", () => {
|
||||
expect(callTransformSseLine("data: [DONE]")).toBe("data: [DONE]");
|
||||
});
|
||||
|
||||
it("handles invalid JSON gracefully", () => {
|
||||
expect(callTransformSseLine("data: not-json")).toBe("data: not-json");
|
||||
expect(callTransformSseLine("data: {invalid}")).toBe("data: {invalid}");
|
||||
});
|
||||
|
||||
it("passes through valid JSON without thinking parts", () => {
|
||||
const payload = { candidates: [{ content: { parts: [{ text: "hello" }] } }] };
|
||||
const line = `data: ${JSON.stringify(payload)}`;
|
||||
const result = callTransformSseLine(line);
|
||||
expect(result).toContain("data:");
|
||||
expect(result).toContain("hello");
|
||||
});
|
||||
|
||||
it("transforms thinking parts in streaming data", () => {
|
||||
const payload = {
|
||||
candidates: [{
|
||||
content: {
|
||||
parts: [{ thought: true, text: "reasoning..." }]
|
||||
}
|
||||
}]
|
||||
};
|
||||
const line = `data: ${JSON.stringify(payload)}`;
|
||||
const result = callTransformSseLine(line);
|
||||
expect(result).toContain("data:");
|
||||
});
|
||||
});
|
||||
|
||||
describe("transformStreamingPayload", () => {
|
||||
it("handles empty string", () => {
|
||||
expect(transformStreamingPayload("")).toBe("");
|
||||
});
|
||||
|
||||
it("handles single line without data prefix", () => {
|
||||
expect(transformStreamingPayload("event: ping")).toBe("event: ping");
|
||||
});
|
||||
|
||||
it("handles multiple lines", () => {
|
||||
const input = "event: message\ndata: [DONE]\n";
|
||||
const result = transformStreamingPayload(input);
|
||||
expect(result).toContain("event: message");
|
||||
expect(result).toContain("data: [DONE]");
|
||||
});
|
||||
|
||||
it("preserves line structure", () => {
|
||||
const input = "line1\nline2\nline3";
|
||||
const result = transformStreamingPayload(input);
|
||||
const lines = result.split("\n");
|
||||
expect(lines.length).toBe(3);
|
||||
});
|
||||
});
|
||||
|
||||
describe("createStreamingTransformer", () => {
|
||||
it("returns a TransformStream", () => {
|
||||
const store = createMockSignatureStore();
|
||||
const transformer = createStreamingTransformer(store, defaultCallbacks);
|
||||
expect(transformer).toBeInstanceOf(TransformStream);
|
||||
expect(transformer.readable).toBeDefined();
|
||||
expect(transformer.writable).toBeDefined();
|
||||
});
|
||||
|
||||
it("accepts optional signatureSessionKey", () => {
|
||||
const store = createMockSignatureStore();
|
||||
const transformer = createStreamingTransformer(store, defaultCallbacks, { signatureSessionKey: "session-key" });
|
||||
expect(transformer).toBeInstanceOf(TransformStream);
|
||||
});
|
||||
|
||||
it("accepts optional debugText", () => {
|
||||
const store = createMockSignatureStore();
|
||||
const transformer = createStreamingTransformer(store, defaultCallbacks, { signatureSessionKey: "session-key", debugText: "debug info" });
|
||||
expect(transformer).toBeInstanceOf(TransformStream);
|
||||
});
|
||||
|
||||
it("accepts cacheSignatures flag", () => {
|
||||
const store = createMockSignatureStore();
|
||||
const transformer = createStreamingTransformer(store, defaultCallbacks, { signatureSessionKey: "session-key", cacheSignatures: true });
|
||||
expect(transformer).toBeInstanceOf(TransformStream);
|
||||
});
|
||||
|
||||
it("processes chunks through the stream", async () => {
|
||||
const store = createMockSignatureStore();
|
||||
const transformer = createStreamingTransformer(store, defaultCallbacks);
|
||||
const encoder = new TextEncoder();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
const input = encoder.encode("data: [DONE]\n");
|
||||
const outputChunks: Uint8Array[] = [];
|
||||
|
||||
const writer = transformer.writable.getWriter();
|
||||
const reader = transformer.readable.getReader();
|
||||
|
||||
const readPromise = (async () => {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
if (value) outputChunks.push(value);
|
||||
}
|
||||
})();
|
||||
|
||||
await writer.write(input);
|
||||
await writer.close();
|
||||
await readPromise;
|
||||
|
||||
const output = outputChunks.map(chunk => decoder.decode(chunk)).join("");
|
||||
expect(output).toContain("[DONE]");
|
||||
});
|
||||
});
|
||||
|
||||
describe("prepareAntigravityRequest", () => {
|
||||
const mockAccessToken = "test-token";
|
||||
const mockProjectId = "test-project";
|
||||
|
||||
it("returns unchanged request for non-generative-language URLs", () => {
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://example.com/api",
|
||||
{ method: "POST" },
|
||||
mockAccessToken,
|
||||
mockProjectId
|
||||
);
|
||||
expect(result.streaming).toBe(false);
|
||||
expect(result.request).toBe("https://example.com/api");
|
||||
});
|
||||
|
||||
it("returns unchanged request for URLs without model pattern", () => {
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1/models",
|
||||
{ method: "POST" },
|
||||
mockAccessToken,
|
||||
mockProjectId
|
||||
);
|
||||
expect(result.streaming).toBe(false);
|
||||
});
|
||||
|
||||
it("detects streaming from generateStreamContent action", () => {
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:streamGenerateContent",
|
||||
{ method: "POST", body: JSON.stringify({ contents: [] }) },
|
||||
mockAccessToken,
|
||||
mockProjectId
|
||||
);
|
||||
expect(result.streaming).toBe(true);
|
||||
});
|
||||
|
||||
it("detects non-streaming from generateContent action", () => {
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
|
||||
{ method: "POST", body: JSON.stringify({ contents: [] }) },
|
||||
mockAccessToken,
|
||||
mockProjectId
|
||||
);
|
||||
expect(result.streaming).toBe(false);
|
||||
});
|
||||
|
||||
it("sets Authorization header with Bearer token", () => {
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
|
||||
{ method: "POST", body: JSON.stringify({ contents: [] }) },
|
||||
mockAccessToken,
|
||||
mockProjectId
|
||||
);
|
||||
const headers = result.init.headers as Headers;
|
||||
expect(headers.get("Authorization")).toBe("Bearer test-token");
|
||||
});
|
||||
|
||||
it("removes x-api-key header", () => {
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
|
||||
{ method: "POST", body: JSON.stringify({ contents: [] }), headers: { "x-api-key": "old-key" } },
|
||||
mockAccessToken,
|
||||
mockProjectId
|
||||
);
|
||||
const headers = result.init.headers as Headers;
|
||||
expect(headers.get("x-api-key")).toBeNull();
|
||||
});
|
||||
|
||||
it("identifies Claude models correctly", () => {
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/claude-sonnet-4-20250514:generateContent",
|
||||
{ method: "POST", body: JSON.stringify({ contents: [] }) },
|
||||
mockAccessToken,
|
||||
mockProjectId
|
||||
);
|
||||
expect(result.effectiveModel).toContain("claude");
|
||||
});
|
||||
|
||||
it("identifies Gemini models correctly", () => {
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent",
|
||||
{ method: "POST", body: JSON.stringify({ contents: [] }) },
|
||||
mockAccessToken,
|
||||
mockProjectId
|
||||
);
|
||||
expect(result.effectiveModel).toContain("gemini");
|
||||
});
|
||||
|
||||
it("uses custom endpoint override", () => {
|
||||
const customEndpoint = "https://custom.api.com";
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
|
||||
{ method: "POST", body: JSON.stringify({ contents: [] }) },
|
||||
mockAccessToken,
|
||||
mockProjectId,
|
||||
customEndpoint
|
||||
);
|
||||
expect(result.endpoint).toContain(customEndpoint);
|
||||
});
|
||||
|
||||
it("handles wrapped Antigravity body format", () => {
|
||||
const wrappedBody = {
|
||||
project: "my-project",
|
||||
request: { contents: [{ parts: [{ text: "Hello" }] }] }
|
||||
};
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
|
||||
{ method: "POST", body: JSON.stringify(wrappedBody) },
|
||||
mockAccessToken,
|
||||
mockProjectId
|
||||
);
|
||||
expect(result.streaming).toBe(false);
|
||||
});
|
||||
|
||||
it("handles unwrapped body format", () => {
|
||||
const unwrappedBody = {
|
||||
contents: [{ parts: [{ text: "Hello" }] }]
|
||||
};
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
|
||||
{ method: "POST", body: JSON.stringify(unwrappedBody) },
|
||||
mockAccessToken,
|
||||
mockProjectId
|
||||
);
|
||||
expect(result.streaming).toBe(false);
|
||||
});
|
||||
|
||||
it("returns requestedModel matching URL model", () => {
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent",
|
||||
{ method: "POST", body: JSON.stringify({ contents: [] }) },
|
||||
mockAccessToken,
|
||||
mockProjectId
|
||||
);
|
||||
expect(result.requestedModel).toBe("gemini-2.5-flash");
|
||||
});
|
||||
|
||||
it("handles empty body gracefully", () => {
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
|
||||
{ method: "POST", body: JSON.stringify({}) },
|
||||
mockAccessToken,
|
||||
mockProjectId
|
||||
);
|
||||
expect(result.streaming).toBe(false);
|
||||
});
|
||||
|
||||
it("handles minimal valid JSON body", () => {
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
|
||||
{ method: "POST", body: JSON.stringify({ contents: [] }) },
|
||||
mockAccessToken,
|
||||
mockProjectId
|
||||
);
|
||||
expect(result.streaming).toBe(false);
|
||||
});
|
||||
|
||||
it("preserves headerStyle in response", () => {
|
||||
const result = prepareAntigravityRequest(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
|
||||
{ method: "POST", body: JSON.stringify({ contents: [] }) },
|
||||
mockAccessToken,
|
||||
mockProjectId,
|
||||
undefined,
|
||||
"gemini-cli"
|
||||
);
|
||||
expect(result.headerStyle).toBe("gemini-cli");
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -9,6 +9,12 @@ import {
|
||||
type HeaderStyle,
|
||||
} from "../constants";
|
||||
import { cacheSignature, getCachedSignature } from "./cache";
|
||||
import {
|
||||
createStreamingTransformer,
|
||||
transformSseLine,
|
||||
transformStreamingPayload,
|
||||
} from "./core/streaming";
|
||||
import { defaultSignatureStore } from "./stores/signature-store";
|
||||
import {
|
||||
DEBUG_MESSAGE_PREFIX,
|
||||
isDebugEnabled,
|
||||
@@ -49,30 +55,15 @@ import {
|
||||
resolveModelWithTier,
|
||||
isClaudeModel,
|
||||
isClaudeThinkingModel,
|
||||
configureClaudeToolConfig,
|
||||
appendClaudeThinkingHint,
|
||||
normalizeClaudeTools,
|
||||
normalizeGeminiTools,
|
||||
CLAUDE_THINKING_MAX_OUTPUT_TOKENS,
|
||||
} from "./transform";
|
||||
import { detectErrorType } from "./recovery";
|
||||
|
||||
const log = createLogger("request");
|
||||
|
||||
/**
|
||||
* Stable session ID for the plugin's lifetime.
|
||||
* This is used for caching thinking signatures across multi-turn conversations.
|
||||
* Generated once at plugin load time and reused for all requests.
|
||||
*/
|
||||
const PLUGIN_SESSION_ID = `-${crypto.randomUUID()}`;
|
||||
|
||||
type SignedThinking = {
|
||||
text: string;
|
||||
signature: string;
|
||||
};
|
||||
|
||||
const MIN_SIGNATURE_LENGTH = 50;
|
||||
const lastSignedThinkingBySessionKey = new Map<string, SignedThinking>();
|
||||
|
||||
function buildSignatureSessionKey(
|
||||
sessionId: string,
|
||||
@@ -403,7 +394,7 @@ function ensureThinkingBeforeToolUseInContents(contents: any[], signatureSession
|
||||
return { ...content, parts: [...thinkingParts, ...otherParts] };
|
||||
}
|
||||
|
||||
const lastThinking = lastSignedThinkingBySessionKey.get(signatureSessionKey);
|
||||
const lastThinking = defaultSignatureStore.get(signatureSessionKey);
|
||||
if (!lastThinking) {
|
||||
return content;
|
||||
}
|
||||
@@ -516,7 +507,7 @@ function ensureThinkingBeforeToolUseInMessages(messages: any[], signatureSession
|
||||
return { ...message, content: [...thinkingBlocks, ...otherBlocks] };
|
||||
}
|
||||
|
||||
const lastThinking = lastSignedThinkingBySessionKey.get(signatureSessionKey);
|
||||
const lastThinking = defaultSignatureStore.get(signatureSessionKey);
|
||||
if (!lastThinking) {
|
||||
return message;
|
||||
}
|
||||
@@ -556,190 +547,6 @@ export function isGenerativeLanguageRequest(input: RequestInfo): input is string
|
||||
return typeof input === "string" && input.includes("generativelanguage.googleapis.com");
|
||||
}
|
||||
|
||||
/**
|
||||
* Rewrites SSE payloads so downstream consumers see only the inner `response` objects,
|
||||
* with thinking/reasoning blocks transformed to OpenCode's expected format.
|
||||
*/
|
||||
function transformStreamingPayload(payload: string): string {
|
||||
return payload
|
||||
.split("\n")
|
||||
.map((line) => {
|
||||
if (!line.startsWith("data:")) {
|
||||
return line;
|
||||
}
|
||||
const json = line.slice(5).trim();
|
||||
if (!json) {
|
||||
return line;
|
||||
}
|
||||
try {
|
||||
const parsed = JSON.parse(json) as { response?: unknown };
|
||||
if (parsed.response !== undefined) {
|
||||
const transformed = transformThinkingParts(parsed.response);
|
||||
return `data: ${JSON.stringify(transformed)}`;
|
||||
}
|
||||
} catch (_) { }
|
||||
return line;
|
||||
})
|
||||
.join("\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a TransformStream that processes SSE chunks incrementally,
|
||||
* transforming each line as it arrives for true real-time streaming support.
|
||||
* Optionally caches thinking signatures for Claude multi-turn conversations.
|
||||
*/
|
||||
function createStreamingTransformer(
|
||||
signatureSessionKey?: string,
|
||||
debugText?: string,
|
||||
cacheSignatures = false,
|
||||
): TransformStream<Uint8Array, Uint8Array> {
|
||||
const decoder = new TextDecoder();
|
||||
const encoder = new TextEncoder();
|
||||
let buffer = "";
|
||||
// Buffer for accumulating thinking text per candidate index (for signature caching)
|
||||
const thoughtBuffer = new Map<number, string>();
|
||||
const debugState = { injected: false };
|
||||
|
||||
return new TransformStream({
|
||||
transform(chunk, controller) {
|
||||
// Decode chunk with stream: true to handle multi-byte characters correctly
|
||||
buffer += decoder.decode(chunk, { stream: true });
|
||||
|
||||
// Process complete lines immediately for real-time streaming
|
||||
const lines = buffer.split("\n");
|
||||
// Keep the last incomplete line in buffer
|
||||
buffer = lines.pop() || "";
|
||||
|
||||
for (const line of lines) {
|
||||
// Transform and forward each line immediately
|
||||
const transformedLine = transformSseLine(
|
||||
line,
|
||||
signatureSessionKey,
|
||||
thoughtBuffer,
|
||||
debugText,
|
||||
debugState,
|
||||
cacheSignatures,
|
||||
);
|
||||
controller.enqueue(encoder.encode(transformedLine + "\n"));
|
||||
}
|
||||
},
|
||||
flush(controller) {
|
||||
// Flush any remaining bytes from TextDecoder
|
||||
buffer += decoder.decode();
|
||||
|
||||
// Process any remaining data in buffer
|
||||
if (buffer) {
|
||||
const transformedLine = transformSseLine(
|
||||
buffer,
|
||||
signatureSessionKey,
|
||||
thoughtBuffer,
|
||||
debugText,
|
||||
debugState,
|
||||
cacheSignatures,
|
||||
);
|
||||
controller.enqueue(encoder.encode(transformedLine));
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms a single SSE line, extracting and transforming the inner response.
|
||||
* Optionally caches thinking signatures for Claude multi-turn support.
|
||||
*/
|
||||
function transformSseLine(
|
||||
line: string,
|
||||
signatureSessionKey?: string,
|
||||
thoughtBuffer?: Map<number, string>,
|
||||
debugText?: string,
|
||||
debugState?: { injected: boolean },
|
||||
cacheSignatures = false,
|
||||
): string {
|
||||
if (!line.startsWith("data:")) {
|
||||
return line;
|
||||
}
|
||||
const json = line.slice(5).trim();
|
||||
if (!json) {
|
||||
return line;
|
||||
}
|
||||
try {
|
||||
const parsed = JSON.parse(json) as { response?: unknown };
|
||||
if (parsed.response !== undefined) {
|
||||
if (cacheSignatures && signatureSessionKey && thoughtBuffer) {
|
||||
cacheThinkingSignatures(parsed.response, signatureSessionKey, thoughtBuffer);
|
||||
}
|
||||
|
||||
let response: unknown = parsed.response;
|
||||
if (debugText && debugState && !debugState.injected) {
|
||||
response = injectDebugThinking(response, debugText);
|
||||
debugState.injected = true;
|
||||
}
|
||||
|
||||
const transformed = transformThinkingParts(response);
|
||||
return `data: ${JSON.stringify(transformed)}`;
|
||||
}
|
||||
} catch (_) { }
|
||||
return line;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts and caches thinking signatures from a response for Claude multi-turn support.
|
||||
*/
|
||||
function cacheThinkingSignatures(
|
||||
response: unknown,
|
||||
signatureSessionKey: string,
|
||||
thoughtBuffer: Map<number, string>,
|
||||
): void {
|
||||
if (!response || typeof response !== "object") return;
|
||||
|
||||
const resp = response as Record<string, unknown>;
|
||||
|
||||
// Handle Gemini-style candidates array (Claude through Antigravity uses this format)
|
||||
if (Array.isArray(resp.candidates)) {
|
||||
resp.candidates.forEach((candidate: any, index: number) => {
|
||||
if (!candidate?.content?.parts) return;
|
||||
|
||||
candidate.content.parts.forEach((part: any) => {
|
||||
// Collect thinking text
|
||||
if (part.thought === true || part.type === "thinking") {
|
||||
const text = part.text || part.thinking || "";
|
||||
if (text) {
|
||||
const current = thoughtBuffer.get(index) ?? "";
|
||||
thoughtBuffer.set(index, current + text);
|
||||
}
|
||||
}
|
||||
|
||||
// Cache signature when we receive it
|
||||
if (part.thoughtSignature) {
|
||||
const fullText = thoughtBuffer.get(index) ?? "";
|
||||
if (fullText) {
|
||||
cacheSignature(signatureSessionKey, fullText, part.thoughtSignature);
|
||||
lastSignedThinkingBySessionKey.set(signatureSessionKey, { text: fullText, signature: part.thoughtSignature });
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Handle Anthropic-style content array
|
||||
if (Array.isArray(resp.content)) {
|
||||
let thinkingText = "";
|
||||
resp.content.forEach((block: any) => {
|
||||
if (block?.type === "thinking") {
|
||||
thinkingText += block.thinking || block.text || "";
|
||||
}
|
||||
if (block?.signature && thinkingText) {
|
||||
cacheSignature(signatureSessionKey, thinkingText, block.signature);
|
||||
lastSignedThinkingBySessionKey.set(signatureSessionKey, { text: thinkingText, signature: block.signature });
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Rewrites OpenAI-style requests into Antigravity shape, normalizing model, headers,
|
||||
* optional cached_content, and thinking config. Also toggles streaming mode for SSE actions.
|
||||
*/
|
||||
/**
|
||||
* Options for request preparation.
|
||||
*/
|
||||
@@ -896,7 +703,7 @@ export function prepareAntigravityRequest(
|
||||
(Array.isArray((req as any).contents) && hasSignedThinkingInContents((req as any).contents)) ||
|
||||
(Array.isArray((req as any).messages) && hasSignedThinkingInMessages((req as any).messages)),
|
||||
);
|
||||
const hasCachedThinking = lastSignedThinkingBySessionKey.has(signatureSessionKey);
|
||||
const hasCachedThinking = defaultSignatureStore.has(signatureSessionKey);
|
||||
needsSignedThinkingWarmup = hasToolUse && !hasSignedThinking && !hasCachedThinking;
|
||||
}
|
||||
|
||||
@@ -1128,7 +935,7 @@ export function prepareAntigravityRequest(
|
||||
return cleaned;
|
||||
};
|
||||
|
||||
requestPayload.tools.forEach((tool: any, idx: number) => {
|
||||
requestPayload.tools.forEach((tool: any) => {
|
||||
const pushDeclaration = (decl: any, source: string) => {
|
||||
const schema =
|
||||
decl?.parameters ||
|
||||
@@ -1313,7 +1120,7 @@ export function prepareAntigravityRequest(
|
||||
const hasSignedThinking =
|
||||
(Array.isArray(requestPayload.contents) && hasSignedThinkingInContents(requestPayload.contents)) ||
|
||||
(Array.isArray(requestPayload.messages) && hasSignedThinkingInMessages(requestPayload.messages));
|
||||
const hasCachedThinking = lastSignedThinkingBySessionKey.has(signatureSessionKey);
|
||||
const hasCachedThinking = defaultSignatureStore.has(signatureSessionKey);
|
||||
needsSignedThinkingWarmup = hasToolUse && !hasSignedThinking && !hasCachedThinking;
|
||||
}
|
||||
}
|
||||
@@ -1417,8 +1224,7 @@ export function prepareAntigravityRequest(
|
||||
|
||||
requestPayload.contents = closeToolLoopForThinking(requestPayload.contents);
|
||||
|
||||
// Clear the cached thinking for this session since we're starting fresh
|
||||
lastSignedThinkingBySessionKey.delete(signatureSessionKey);
|
||||
defaultSignatureStore.delete(signatureSessionKey);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1597,9 +1403,20 @@ export async function transformAntigravityResponse(
|
||||
note: "Streaming SSE response (real-time transform)",
|
||||
});
|
||||
|
||||
// Use the optimized line-by-line transformer for immediate forwarding
|
||||
// This ensures thinking/reasoning content streams in real-time
|
||||
return new Response(response.body.pipeThrough(createStreamingTransformer(sessionId, debugText, cacheSignatures)), {
|
||||
const streamingTransformer = createStreamingTransformer(
|
||||
defaultSignatureStore,
|
||||
{
|
||||
onCacheSignature: cacheSignature,
|
||||
onInjectDebug: injectDebugThinking,
|
||||
transformThinkingParts,
|
||||
},
|
||||
{
|
||||
signatureSessionKey: sessionId,
|
||||
debugText,
|
||||
cacheSignatures,
|
||||
},
|
||||
);
|
||||
return new Response(response.body.pipeThrough(streamingTransformer), {
|
||||
status: response.status,
|
||||
statusText: response.statusText,
|
||||
headers,
|
||||
@@ -1738,3 +1555,29 @@ export async function transformAntigravityResponse(
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
export const __testExports = {
|
||||
buildSignatureSessionKey,
|
||||
hashConversationSeed,
|
||||
extractTextFromContent,
|
||||
extractConversationSeedFromMessages,
|
||||
extractConversationSeedFromContents,
|
||||
resolveConversationKey,
|
||||
resolveProjectKey,
|
||||
isGeminiToolUsePart,
|
||||
isGeminiThinkingPart,
|
||||
ensureThoughtSignature,
|
||||
hasSignedThinkingPart,
|
||||
hasSignedThinkingInContents,
|
||||
hasSignedThinkingInMessages,
|
||||
hasToolUseInContents,
|
||||
hasToolUseInMessages,
|
||||
ensureThinkingBeforeToolUseInContents,
|
||||
ensureThinkingBeforeToolUseInMessages,
|
||||
generateSyntheticProjectId,
|
||||
MIN_SIGNATURE_LENGTH,
|
||||
transformSseLine,
|
||||
transformStreamingPayload,
|
||||
createStreamingTransformer,
|
||||
};
|
||||
|
||||
|
||||
30
src/plugin/stores/signature-store.ts
Normal file
30
src/plugin/stores/signature-store.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import type { SignatureStore, SignedThinking, ThoughtBuffer } from '../core/streaming/types';
|
||||
|
||||
export function createSignatureStore(): SignatureStore {
|
||||
const store = new Map<string, SignedThinking>();
|
||||
|
||||
return {
|
||||
get: (key: string) => store.get(key),
|
||||
set: (key: string, value: SignedThinking) => {
|
||||
store.set(key, value);
|
||||
},
|
||||
has: (key: string) => store.has(key),
|
||||
delete: (key: string) => {
|
||||
store.delete(key);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function createThoughtBuffer(): ThoughtBuffer {
|
||||
const buffer = new Map<number, string>();
|
||||
|
||||
return {
|
||||
get: (index: number) => buffer.get(index),
|
||||
set: (index: number, text: string) => {
|
||||
buffer.set(index, text);
|
||||
},
|
||||
clear: () => buffer.clear(),
|
||||
};
|
||||
}
|
||||
|
||||
export const defaultSignatureStore = createSignatureStore();
|
||||
783
src/plugin/transform/claude.test.ts
Normal file
783
src/plugin/transform/claude.test.ts
Normal file
@@ -0,0 +1,783 @@
|
||||
import { describe, it, expect } from "vitest";
|
||||
import {
|
||||
isClaudeModel,
|
||||
isClaudeThinkingModel,
|
||||
configureClaudeToolConfig,
|
||||
buildClaudeThinkingConfig,
|
||||
ensureClaudeMaxOutputTokens,
|
||||
appendClaudeThinkingHint,
|
||||
normalizeClaudeTools,
|
||||
applyClaudeTransforms,
|
||||
CLAUDE_THINKING_MAX_OUTPUT_TOKENS,
|
||||
CLAUDE_INTERLEAVED_THINKING_HINT,
|
||||
} from "./claude";
|
||||
import type { RequestPayload } from "./types";
|
||||
|
||||
describe("isClaudeModel", () => {
|
||||
it("returns true for claude model names", () => {
|
||||
expect(isClaudeModel("claude-sonnet-4-5")).toBe(true);
|
||||
expect(isClaudeModel("claude-opus-4-5")).toBe(true);
|
||||
expect(isClaudeModel("claude-3-opus")).toBe(true);
|
||||
expect(isClaudeModel("claude-3-5-sonnet")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for case-insensitive matches", () => {
|
||||
expect(isClaudeModel("CLAUDE-SONNET-4-5")).toBe(true);
|
||||
expect(isClaudeModel("Claude-Opus-4-5")).toBe(true);
|
||||
expect(isClaudeModel("cLaUdE-3-opus")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for prefixed claude models", () => {
|
||||
expect(isClaudeModel("antigravity-claude-sonnet-4-5")).toBe(true);
|
||||
expect(isClaudeModel("google/claude-opus-4-5")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false for non-claude models", () => {
|
||||
expect(isClaudeModel("gemini-3-pro")).toBe(false);
|
||||
expect(isClaudeModel("gpt-4")).toBe(false);
|
||||
expect(isClaudeModel("llama-3")).toBe(false);
|
||||
expect(isClaudeModel("")).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for similar but non-claude names", () => {
|
||||
expect(isClaudeModel("claudia-model")).toBe(false);
|
||||
expect(isClaudeModel("clade-model")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isClaudeThinkingModel", () => {
|
||||
it("returns true for claude thinking models", () => {
|
||||
expect(isClaudeThinkingModel("claude-sonnet-4-5-thinking")).toBe(true);
|
||||
expect(isClaudeThinkingModel("claude-opus-4-5-thinking")).toBe(true);
|
||||
expect(isClaudeThinkingModel("claude-sonnet-4-5-thinking-high")).toBe(true);
|
||||
expect(isClaudeThinkingModel("claude-opus-4-5-thinking-low")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for case-insensitive matches", () => {
|
||||
expect(isClaudeThinkingModel("CLAUDE-SONNET-4-5-THINKING")).toBe(true);
|
||||
expect(isClaudeThinkingModel("Claude-Opus-4-5-Thinking")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for prefixed thinking models", () => {
|
||||
expect(isClaudeThinkingModel("antigravity-claude-sonnet-4-5-thinking")).toBe(true);
|
||||
expect(isClaudeThinkingModel("google/claude-opus-4-5-thinking-high")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false for non-thinking claude models", () => {
|
||||
expect(isClaudeThinkingModel("claude-sonnet-4-5")).toBe(false);
|
||||
expect(isClaudeThinkingModel("claude-opus-4-5")).toBe(false);
|
||||
expect(isClaudeThinkingModel("claude-3-opus")).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for non-claude models", () => {
|
||||
expect(isClaudeThinkingModel("gemini-3-pro-thinking")).toBe(false);
|
||||
expect(isClaudeThinkingModel("gpt-4-thinking")).toBe(false);
|
||||
});
|
||||
|
||||
it("requires both claude and thinking keywords", () => {
|
||||
expect(isClaudeThinkingModel("thinking-model")).toBe(false);
|
||||
expect(isClaudeThinkingModel("claude-model")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("configureClaudeToolConfig", () => {
|
||||
it("creates toolConfig if not present", () => {
|
||||
const payload: RequestPayload = {};
|
||||
configureClaudeToolConfig(payload);
|
||||
|
||||
expect(payload.toolConfig).toBeDefined();
|
||||
expect((payload.toolConfig as any).functionCallingConfig).toBeDefined();
|
||||
expect((payload.toolConfig as any).functionCallingConfig.mode).toBe("VALIDATED");
|
||||
});
|
||||
|
||||
it("adds functionCallingConfig to existing toolConfig", () => {
|
||||
const payload: RequestPayload = {
|
||||
toolConfig: { someOtherConfig: true },
|
||||
};
|
||||
configureClaudeToolConfig(payload);
|
||||
|
||||
expect((payload.toolConfig as any).someOtherConfig).toBe(true);
|
||||
expect((payload.toolConfig as any).functionCallingConfig.mode).toBe("VALIDATED");
|
||||
});
|
||||
|
||||
it("sets mode to VALIDATED on existing functionCallingConfig", () => {
|
||||
const payload: RequestPayload = {
|
||||
toolConfig: {
|
||||
functionCallingConfig: { existingKey: "value" },
|
||||
},
|
||||
};
|
||||
configureClaudeToolConfig(payload);
|
||||
|
||||
expect((payload.toolConfig as any).functionCallingConfig.existingKey).toBe("value");
|
||||
expect((payload.toolConfig as any).functionCallingConfig.mode).toBe("VALIDATED");
|
||||
});
|
||||
|
||||
it("overwrites existing mode", () => {
|
||||
const payload: RequestPayload = {
|
||||
toolConfig: {
|
||||
functionCallingConfig: { mode: "AUTO" },
|
||||
},
|
||||
};
|
||||
configureClaudeToolConfig(payload);
|
||||
|
||||
expect((payload.toolConfig as any).functionCallingConfig.mode).toBe("VALIDATED");
|
||||
});
|
||||
|
||||
it("handles null toolConfig gracefully", () => {
|
||||
const payload: RequestPayload = { toolConfig: null };
|
||||
configureClaudeToolConfig(payload);
|
||||
|
||||
expect(payload.toolConfig).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("buildClaudeThinkingConfig", () => {
|
||||
it("builds config with include_thoughts only", () => {
|
||||
const config = buildClaudeThinkingConfig(true);
|
||||
|
||||
expect(config).toEqual({ include_thoughts: true });
|
||||
});
|
||||
|
||||
it("builds config with include_thoughts false", () => {
|
||||
const config = buildClaudeThinkingConfig(false);
|
||||
|
||||
expect(config).toEqual({ include_thoughts: false });
|
||||
});
|
||||
|
||||
it("includes thinking_budget when provided and positive", () => {
|
||||
const config = buildClaudeThinkingConfig(true, 8192);
|
||||
|
||||
expect(config).toEqual({
|
||||
include_thoughts: true,
|
||||
thinking_budget: 8192,
|
||||
});
|
||||
});
|
||||
|
||||
it("excludes thinking_budget when zero", () => {
|
||||
const config = buildClaudeThinkingConfig(true, 0);
|
||||
|
||||
expect(config).toEqual({ include_thoughts: true });
|
||||
});
|
||||
|
||||
it("excludes thinking_budget when negative", () => {
|
||||
const config = buildClaudeThinkingConfig(true, -100);
|
||||
|
||||
expect(config).toEqual({ include_thoughts: true });
|
||||
});
|
||||
|
||||
it("excludes thinking_budget when undefined", () => {
|
||||
const config = buildClaudeThinkingConfig(true, undefined);
|
||||
|
||||
expect(config).toEqual({ include_thoughts: true });
|
||||
});
|
||||
|
||||
it("handles various budget values", () => {
|
||||
expect(buildClaudeThinkingConfig(true, 8192)).toHaveProperty("thinking_budget", 8192);
|
||||
expect(buildClaudeThinkingConfig(true, 16384)).toHaveProperty("thinking_budget", 16384);
|
||||
expect(buildClaudeThinkingConfig(true, 32768)).toHaveProperty("thinking_budget", 32768);
|
||||
});
|
||||
});
|
||||
|
||||
describe("ensureClaudeMaxOutputTokens", () => {
|
||||
it("sets maxOutputTokens when not present", () => {
|
||||
const config: Record<string, unknown> = {};
|
||||
ensureClaudeMaxOutputTokens(config, 8192);
|
||||
|
||||
expect(config.maxOutputTokens).toBe(CLAUDE_THINKING_MAX_OUTPUT_TOKENS);
|
||||
});
|
||||
|
||||
it("sets maxOutputTokens when current is less than budget", () => {
|
||||
const config: Record<string, unknown> = { maxOutputTokens: 4096 };
|
||||
ensureClaudeMaxOutputTokens(config, 8192);
|
||||
|
||||
expect(config.maxOutputTokens).toBe(CLAUDE_THINKING_MAX_OUTPUT_TOKENS);
|
||||
});
|
||||
|
||||
it("sets maxOutputTokens when current equals budget", () => {
|
||||
const config: Record<string, unknown> = { maxOutputTokens: 8192 };
|
||||
ensureClaudeMaxOutputTokens(config, 8192);
|
||||
|
||||
expect(config.maxOutputTokens).toBe(CLAUDE_THINKING_MAX_OUTPUT_TOKENS);
|
||||
});
|
||||
|
||||
it("does not change maxOutputTokens when current is greater than budget", () => {
|
||||
const config: Record<string, unknown> = { maxOutputTokens: 100000 };
|
||||
ensureClaudeMaxOutputTokens(config, 8192);
|
||||
|
||||
expect(config.maxOutputTokens).toBe(100000);
|
||||
});
|
||||
|
||||
it("handles snake_case max_output_tokens", () => {
|
||||
const config: Record<string, unknown> = { max_output_tokens: 4096 };
|
||||
ensureClaudeMaxOutputTokens(config, 8192);
|
||||
|
||||
expect(config.maxOutputTokens).toBe(CLAUDE_THINKING_MAX_OUTPUT_TOKENS);
|
||||
expect(config.max_output_tokens).toBeUndefined();
|
||||
});
|
||||
|
||||
it("removes max_output_tokens when setting maxOutputTokens", () => {
|
||||
const config: Record<string, unknown> = {
|
||||
max_output_tokens: 4096,
|
||||
maxOutputTokens: 4096,
|
||||
};
|
||||
ensureClaudeMaxOutputTokens(config, 8192);
|
||||
|
||||
expect(config.maxOutputTokens).toBe(CLAUDE_THINKING_MAX_OUTPUT_TOKENS);
|
||||
expect(config.max_output_tokens).toBeUndefined();
|
||||
});
|
||||
|
||||
it("prefers maxOutputTokens over max_output_tokens for comparison", () => {
|
||||
const config: Record<string, unknown> = {
|
||||
maxOutputTokens: 100000,
|
||||
max_output_tokens: 4096,
|
||||
};
|
||||
ensureClaudeMaxOutputTokens(config, 8192);
|
||||
|
||||
expect(config.maxOutputTokens).toBe(100000);
|
||||
});
|
||||
});
|
||||
|
||||
describe("appendClaudeThinkingHint", () => {
|
||||
describe("with string systemInstruction", () => {
|
||||
it("appends hint to existing string instruction", () => {
|
||||
const payload: RequestPayload = {
|
||||
systemInstruction: "You are a helpful assistant.",
|
||||
};
|
||||
appendClaudeThinkingHint(payload);
|
||||
|
||||
expect(payload.systemInstruction).toBe(
|
||||
`You are a helpful assistant.\n\n${CLAUDE_INTERLEAVED_THINKING_HINT}`
|
||||
);
|
||||
});
|
||||
|
||||
it("uses hint alone when existing instruction is empty", () => {
|
||||
const payload: RequestPayload = {
|
||||
systemInstruction: "",
|
||||
};
|
||||
appendClaudeThinkingHint(payload);
|
||||
|
||||
expect(payload.systemInstruction).toBe(CLAUDE_INTERLEAVED_THINKING_HINT);
|
||||
});
|
||||
|
||||
it("uses hint alone when existing instruction is whitespace", () => {
|
||||
const payload: RequestPayload = {
|
||||
systemInstruction: " ",
|
||||
};
|
||||
appendClaudeThinkingHint(payload);
|
||||
|
||||
expect(payload.systemInstruction).toBe(CLAUDE_INTERLEAVED_THINKING_HINT);
|
||||
});
|
||||
|
||||
it("accepts custom hint", () => {
|
||||
const payload: RequestPayload = {
|
||||
systemInstruction: "Base instruction.",
|
||||
};
|
||||
appendClaudeThinkingHint(payload, "Custom hint.");
|
||||
|
||||
expect(payload.systemInstruction).toBe("Base instruction.\n\nCustom hint.");
|
||||
});
|
||||
});
|
||||
|
||||
describe("with object systemInstruction (parts array)", () => {
|
||||
it("appends hint to last text part", () => {
|
||||
const payload: RequestPayload = {
|
||||
systemInstruction: {
|
||||
parts: [{ text: "First part." }, { text: "Last part." }],
|
||||
},
|
||||
};
|
||||
appendClaudeThinkingHint(payload);
|
||||
|
||||
const sys = payload.systemInstruction as any;
|
||||
expect(sys.parts[0].text).toBe("First part.");
|
||||
expect(sys.parts[1].text).toBe(`Last part.\n\n${CLAUDE_INTERLEAVED_THINKING_HINT}`);
|
||||
});
|
||||
|
||||
it("appends hint to single text part", () => {
|
||||
const payload: RequestPayload = {
|
||||
systemInstruction: {
|
||||
parts: [{ text: "Only part." }],
|
||||
},
|
||||
};
|
||||
appendClaudeThinkingHint(payload);
|
||||
|
||||
const sys = payload.systemInstruction as any;
|
||||
expect(sys.parts[0].text).toBe(`Only part.\n\n${CLAUDE_INTERLEAVED_THINKING_HINT}`);
|
||||
});
|
||||
|
||||
it("creates new text part when no text parts exist", () => {
|
||||
const payload: RequestPayload = {
|
||||
systemInstruction: {
|
||||
parts: [{ image: "base64data" }],
|
||||
},
|
||||
};
|
||||
appendClaudeThinkingHint(payload);
|
||||
|
||||
const sys = payload.systemInstruction as any;
|
||||
expect(sys.parts).toHaveLength(2);
|
||||
expect(sys.parts[1].text).toBe(CLAUDE_INTERLEAVED_THINKING_HINT);
|
||||
});
|
||||
|
||||
it("creates parts array when not present", () => {
|
||||
const payload: RequestPayload = {
|
||||
systemInstruction: { role: "system" },
|
||||
};
|
||||
appendClaudeThinkingHint(payload);
|
||||
|
||||
const sys = payload.systemInstruction as any;
|
||||
expect(sys.parts).toEqual([{ text: CLAUDE_INTERLEAVED_THINKING_HINT }]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("with no systemInstruction", () => {
|
||||
it("creates systemInstruction when contents array exists", () => {
|
||||
const payload: RequestPayload = {
|
||||
contents: [{ role: "user", parts: [{ text: "Hello" }] }],
|
||||
};
|
||||
appendClaudeThinkingHint(payload);
|
||||
|
||||
expect(payload.systemInstruction).toEqual({
|
||||
parts: [{ text: CLAUDE_INTERLEAVED_THINKING_HINT }],
|
||||
});
|
||||
});
|
||||
|
||||
it("does not create systemInstruction when no contents", () => {
|
||||
const payload: RequestPayload = {};
|
||||
appendClaudeThinkingHint(payload);
|
||||
|
||||
expect(payload.systemInstruction).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("normalizeClaudeTools", () => {
|
||||
const identityClean = (schema: unknown) => schema as Record<string, unknown>;
|
||||
|
||||
const realClean = (schema: unknown): Record<string, unknown> => {
|
||||
if (!schema || typeof schema !== "object") return {};
|
||||
const cleaned = { ...schema as Record<string, unknown> };
|
||||
delete cleaned.$schema;
|
||||
delete cleaned.$id;
|
||||
return cleaned;
|
||||
};
|
||||
|
||||
it("returns empty result when no tools", () => {
|
||||
const payload: RequestPayload = {};
|
||||
const result = normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
expect(result.toolDebugMissing).toBe(0);
|
||||
expect(result.toolDebugSummaries).toEqual([]);
|
||||
});
|
||||
|
||||
it("returns empty result when tools is not an array", () => {
|
||||
const payload: RequestPayload = { tools: "not an array" };
|
||||
const result = normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
expect(result.toolDebugMissing).toBe(0);
|
||||
expect(result.toolDebugSummaries).toEqual([]);
|
||||
});
|
||||
|
||||
describe("functionDeclarations format", () => {
|
||||
it("normalizes tools with functionDeclarations array", () => {
|
||||
const payload: RequestPayload = {
|
||||
tools: [{
|
||||
functionDeclarations: [{
|
||||
name: "get_weather",
|
||||
description: "Get weather for a location",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
location: { type: "string" },
|
||||
},
|
||||
required: ["location"],
|
||||
},
|
||||
}],
|
||||
}],
|
||||
};
|
||||
|
||||
const result = normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
expect(result.toolDebugMissing).toBe(0);
|
||||
expect(result.toolDebugSummaries).toContain("decl=get_weather,src=functionDeclarations,hasSchema=y");
|
||||
|
||||
const tools = payload.tools as any[];
|
||||
expect(tools).toHaveLength(1);
|
||||
expect(tools[0].functionDeclarations).toHaveLength(1);
|
||||
expect(tools[0].functionDeclarations[0].name).toBe("get_weather");
|
||||
});
|
||||
|
||||
it("handles multiple functionDeclarations", () => {
|
||||
const payload: RequestPayload = {
|
||||
tools: [{
|
||||
functionDeclarations: [
|
||||
{ name: "tool1", description: "First tool" },
|
||||
{ name: "tool2", description: "Second tool" },
|
||||
],
|
||||
}],
|
||||
};
|
||||
|
||||
normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
const tools = payload.tools as any[];
|
||||
expect(tools[0].functionDeclarations).toHaveLength(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("function/custom format", () => {
|
||||
it("normalizes OpenAI-style function tools", () => {
|
||||
const payload: RequestPayload = {
|
||||
tools: [{
|
||||
type: "function",
|
||||
function: {
|
||||
name: "search",
|
||||
description: "Search the web",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
query: { type: "string" },
|
||||
},
|
||||
},
|
||||
},
|
||||
}],
|
||||
};
|
||||
|
||||
const result = normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
expect(result.toolDebugSummaries).toContain("decl=search,src=function/custom,hasSchema=y");
|
||||
|
||||
const tools = payload.tools as any[];
|
||||
expect(tools[0].functionDeclarations[0].name).toBe("search");
|
||||
});
|
||||
|
||||
it("normalizes custom-style tools", () => {
|
||||
const payload: RequestPayload = {
|
||||
tools: [{
|
||||
custom: {
|
||||
name: "custom_tool",
|
||||
description: "A custom tool",
|
||||
input_schema: {
|
||||
type: "object",
|
||||
properties: { arg: { type: "string" } },
|
||||
},
|
||||
},
|
||||
}],
|
||||
};
|
||||
|
||||
const result = normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
expect(result.toolDebugSummaries).toContain("decl=custom_tool,src=function/custom,hasSchema=y");
|
||||
});
|
||||
|
||||
it("normalizes tools with top-level name/parameters", () => {
|
||||
const payload: RequestPayload = {
|
||||
tools: [{
|
||||
name: "direct_tool",
|
||||
description: "Direct definition",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: { value: { type: "number" } },
|
||||
},
|
||||
}],
|
||||
};
|
||||
|
||||
normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
const tools = payload.tools as any[];
|
||||
expect(tools[0].functionDeclarations[0].name).toBe("direct_tool");
|
||||
});
|
||||
});
|
||||
|
||||
describe("schema normalization", () => {
|
||||
it("adds placeholder when schema is missing", () => {
|
||||
const payload: RequestPayload = {
|
||||
tools: [{
|
||||
function: {
|
||||
name: "no_schema_tool",
|
||||
description: "Tool without schema",
|
||||
},
|
||||
}],
|
||||
};
|
||||
|
||||
const result = normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
expect(result.toolDebugMissing).toBe(1);
|
||||
|
||||
const tools = payload.tools as any[];
|
||||
const params = tools[0].functionDeclarations[0].parameters;
|
||||
expect(params.type).toBe("object");
|
||||
expect(params.properties._placeholder).toBeDefined();
|
||||
expect(params.required).toContain("_placeholder");
|
||||
});
|
||||
|
||||
it("adds placeholder when schema has no properties", () => {
|
||||
const payload: RequestPayload = {
|
||||
tools: [{
|
||||
function: {
|
||||
name: "empty_schema_tool",
|
||||
parameters: { type: "object" },
|
||||
},
|
||||
}],
|
||||
};
|
||||
|
||||
normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
const tools = payload.tools as any[];
|
||||
const params = tools[0].functionDeclarations[0].parameters;
|
||||
expect(params.properties._placeholder).toBeDefined();
|
||||
});
|
||||
|
||||
it("preserves existing properties", () => {
|
||||
const payload: RequestPayload = {
|
||||
tools: [{
|
||||
function: {
|
||||
name: "has_props_tool",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
existingProp: { type: "string" },
|
||||
},
|
||||
},
|
||||
},
|
||||
}],
|
||||
};
|
||||
|
||||
normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
const tools = payload.tools as any[];
|
||||
const params = tools[0].functionDeclarations[0].parameters;
|
||||
expect(params.properties.existingProp).toBeDefined();
|
||||
expect(params.properties._placeholder).toBeUndefined();
|
||||
});
|
||||
|
||||
it("cleans schema using provided function", () => {
|
||||
const payload: RequestPayload = {
|
||||
tools: [{
|
||||
function: {
|
||||
name: "needs_cleaning",
|
||||
parameters: {
|
||||
$schema: "http://json-schema.org/draft-07/schema#",
|
||||
type: "object",
|
||||
properties: { arg: { type: "string" } },
|
||||
},
|
||||
},
|
||||
}],
|
||||
};
|
||||
|
||||
normalizeClaudeTools(payload, realClean);
|
||||
|
||||
const tools = payload.tools as any[];
|
||||
const params = tools[0].functionDeclarations[0].parameters;
|
||||
expect(params.$schema).toBeUndefined();
|
||||
expect(params.properties.arg).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("tool name sanitization", () => {
|
||||
it("removes special characters from tool names", () => {
|
||||
const payload: RequestPayload = {
|
||||
tools: [{
|
||||
function: {
|
||||
name: "tool@with#special$chars!",
|
||||
parameters: { type: "object", properties: { x: { type: "string" } } },
|
||||
},
|
||||
}],
|
||||
};
|
||||
|
||||
normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
const tools = payload.tools as any[];
|
||||
expect(tools[0].functionDeclarations[0].name).toBe("tool_with_special_chars_");
|
||||
});
|
||||
|
||||
it("truncates long tool names to 64 characters", () => {
|
||||
const longName = "a".repeat(100);
|
||||
const payload: RequestPayload = {
|
||||
tools: [{
|
||||
function: {
|
||||
name: longName,
|
||||
parameters: { type: "object", properties: { x: { type: "string" } } },
|
||||
},
|
||||
}],
|
||||
};
|
||||
|
||||
normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
const tools = payload.tools as any[];
|
||||
expect(tools[0].functionDeclarations[0].name).toHaveLength(64);
|
||||
});
|
||||
|
||||
it("generates name when missing", () => {
|
||||
const payload: RequestPayload = {
|
||||
tools: [{
|
||||
function: {
|
||||
description: "Nameless tool",
|
||||
parameters: { type: "object", properties: { x: { type: "string" } } },
|
||||
},
|
||||
}],
|
||||
};
|
||||
|
||||
normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
const tools = payload.tools as any[];
|
||||
expect(tools[0].functionDeclarations[0].name).toBe("tool-0");
|
||||
});
|
||||
});
|
||||
|
||||
describe("passthrough tools", () => {
|
||||
it("preserves non-function tools like codeExecution", () => {
|
||||
const payload: RequestPayload = {
|
||||
tools: [
|
||||
{ codeExecution: {} },
|
||||
{
|
||||
function: {
|
||||
name: "regular_tool",
|
||||
parameters: { type: "object", properties: { x: { type: "string" } } },
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
normalizeClaudeTools(payload, identityClean);
|
||||
|
||||
const tools = payload.tools as any[];
|
||||
expect(tools).toHaveLength(2);
|
||||
expect(tools[0].functionDeclarations).toBeDefined();
|
||||
expect(tools[1].codeExecution).toBeDefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("applyClaudeTransforms", () => {
|
||||
const mockCleanJSONSchema = (schema: unknown) => schema as Record<string, unknown>;
|
||||
|
||||
it("applies tool config for all Claude models", () => {
|
||||
const payload: RequestPayload = {};
|
||||
|
||||
applyClaudeTransforms(payload, {
|
||||
model: "claude-sonnet-4-5",
|
||||
cleanJSONSchema: mockCleanJSONSchema,
|
||||
});
|
||||
|
||||
expect((payload.toolConfig as any)?.functionCallingConfig?.mode).toBe("VALIDATED");
|
||||
});
|
||||
|
||||
it("applies thinking config for thinking models", () => {
|
||||
const payload: RequestPayload = {};
|
||||
|
||||
applyClaudeTransforms(payload, {
|
||||
model: "claude-sonnet-4-5-thinking",
|
||||
normalizedThinking: { includeThoughts: true, thinkingBudget: 8192 },
|
||||
cleanJSONSchema: mockCleanJSONSchema,
|
||||
});
|
||||
|
||||
const genConfig = payload.generationConfig as any;
|
||||
expect(genConfig.thinkingConfig.include_thoughts).toBe(true);
|
||||
expect(genConfig.thinkingConfig.thinking_budget).toBe(8192);
|
||||
});
|
||||
|
||||
it("uses tierThinkingBudget over normalizedThinking.thinkingBudget", () => {
|
||||
const payload: RequestPayload = {};
|
||||
|
||||
applyClaudeTransforms(payload, {
|
||||
model: "claude-sonnet-4-5-thinking",
|
||||
tierThinkingBudget: 32768,
|
||||
normalizedThinking: { includeThoughts: true, thinkingBudget: 8192 },
|
||||
cleanJSONSchema: mockCleanJSONSchema,
|
||||
});
|
||||
|
||||
const genConfig = payload.generationConfig as any;
|
||||
expect(genConfig.thinkingConfig.thinking_budget).toBe(32768);
|
||||
});
|
||||
|
||||
it("ensures maxOutputTokens for thinking models with budget", () => {
|
||||
const payload: RequestPayload = {
|
||||
generationConfig: { maxOutputTokens: 4096 },
|
||||
};
|
||||
|
||||
applyClaudeTransforms(payload, {
|
||||
model: "claude-sonnet-4-5-thinking",
|
||||
normalizedThinking: { includeThoughts: true, thinkingBudget: 8192 },
|
||||
cleanJSONSchema: mockCleanJSONSchema,
|
||||
});
|
||||
|
||||
const genConfig = payload.generationConfig as any;
|
||||
expect(genConfig.maxOutputTokens).toBe(CLAUDE_THINKING_MAX_OUTPUT_TOKENS);
|
||||
});
|
||||
|
||||
it("does not apply thinking config for non-thinking models", () => {
|
||||
const payload: RequestPayload = {};
|
||||
|
||||
applyClaudeTransforms(payload, {
|
||||
model: "claude-sonnet-4-5",
|
||||
normalizedThinking: { includeThoughts: true, thinkingBudget: 8192 },
|
||||
cleanJSONSchema: mockCleanJSONSchema,
|
||||
});
|
||||
|
||||
const genConfig = payload.generationConfig as any;
|
||||
expect(genConfig?.thinkingConfig).toBeUndefined();
|
||||
});
|
||||
|
||||
it("appends thinking hint for thinking models with tools", () => {
|
||||
const payload: RequestPayload = {
|
||||
systemInstruction: "You are helpful.",
|
||||
tools: [{ function: { name: "test", parameters: { type: "object", properties: { x: { type: "string" } } } } }],
|
||||
};
|
||||
|
||||
applyClaudeTransforms(payload, {
|
||||
model: "claude-sonnet-4-5-thinking",
|
||||
cleanJSONSchema: mockCleanJSONSchema,
|
||||
});
|
||||
|
||||
expect((payload.systemInstruction as string)).toContain(CLAUDE_INTERLEAVED_THINKING_HINT);
|
||||
});
|
||||
|
||||
it("does not append thinking hint for thinking models without tools", () => {
|
||||
const payload: RequestPayload = {
|
||||
systemInstruction: "You are helpful.",
|
||||
};
|
||||
|
||||
applyClaudeTransforms(payload, {
|
||||
model: "claude-sonnet-4-5-thinking",
|
||||
cleanJSONSchema: mockCleanJSONSchema,
|
||||
});
|
||||
|
||||
expect((payload.systemInstruction as string)).not.toContain(CLAUDE_INTERLEAVED_THINKING_HINT);
|
||||
});
|
||||
|
||||
it("does not append thinking hint for non-thinking models with tools", () => {
|
||||
const payload: RequestPayload = {
|
||||
systemInstruction: "You are helpful.",
|
||||
tools: [{ function: { name: "test", parameters: { type: "object", properties: { x: { type: "string" } } } } }],
|
||||
};
|
||||
|
||||
applyClaudeTransforms(payload, {
|
||||
model: "claude-sonnet-4-5",
|
||||
cleanJSONSchema: mockCleanJSONSchema,
|
||||
});
|
||||
|
||||
expect((payload.systemInstruction as string)).not.toContain(CLAUDE_INTERLEAVED_THINKING_HINT);
|
||||
});
|
||||
|
||||
it("normalizes tools and returns debug info", () => {
|
||||
const payload: RequestPayload = {
|
||||
tools: [{ function: { name: "my_tool" } }],
|
||||
};
|
||||
|
||||
const result = applyClaudeTransforms(payload, {
|
||||
model: "claude-sonnet-4-5",
|
||||
cleanJSONSchema: mockCleanJSONSchema,
|
||||
});
|
||||
|
||||
expect(result.toolDebugMissing).toBe(1);
|
||||
expect(result.toolDebugSummaries).toContain("decl=my_tool,src=function/custom,hasSchema=n");
|
||||
});
|
||||
});
|
||||
|
||||
describe("constants", () => {
|
||||
it("exports CLAUDE_THINKING_MAX_OUTPUT_TOKENS", () => {
|
||||
expect(CLAUDE_THINKING_MAX_OUTPUT_TOKENS).toBe(64_000);
|
||||
});
|
||||
|
||||
it("exports CLAUDE_INTERLEAVED_THINKING_HINT", () => {
|
||||
expect(CLAUDE_INTERLEAVED_THINKING_HINT).toContain("Interleaved thinking is enabled");
|
||||
});
|
||||
});
|
||||
490
src/plugin/transform/gemini.test.ts
Normal file
490
src/plugin/transform/gemini.test.ts
Normal file
@@ -0,0 +1,490 @@
|
||||
import { describe, it, expect } from "vitest";
|
||||
import {
|
||||
isGeminiModel,
|
||||
isGemini3Model,
|
||||
isGemini25Model,
|
||||
buildGemini3ThinkingConfig,
|
||||
buildGemini25ThinkingConfig,
|
||||
normalizeGeminiTools,
|
||||
applyGeminiTransforms,
|
||||
} from "./gemini";
|
||||
import type { RequestPayload } from "./types";
|
||||
|
||||
describe("transform/gemini", () => {
|
||||
describe("isGeminiModel", () => {
|
||||
it("returns true for gemini-pro", () => {
|
||||
expect(isGeminiModel("gemini-pro")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for gemini-1.5-pro", () => {
|
||||
expect(isGeminiModel("gemini-1.5-pro")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for gemini-2.5-flash", () => {
|
||||
expect(isGeminiModel("gemini-2.5-flash")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for gemini-3-pro-high", () => {
|
||||
expect(isGeminiModel("gemini-3-pro-high")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for uppercase GEMINI-PRO", () => {
|
||||
expect(isGeminiModel("GEMINI-PRO")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for mixed case Gemini-Pro", () => {
|
||||
expect(isGeminiModel("Gemini-Pro")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false for claude-3-opus", () => {
|
||||
expect(isGeminiModel("claude-3-opus")).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for gpt-4", () => {
|
||||
expect(isGeminiModel("gpt-4")).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for gemini-claude hybrid (contains both)", () => {
|
||||
expect(isGeminiModel("gemini-claude-hybrid")).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for claude-on-gemini", () => {
|
||||
expect(isGeminiModel("claude-on-gemini")).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for empty string", () => {
|
||||
expect(isGeminiModel("")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isGemini3Model", () => {
|
||||
it("returns true for gemini-3-pro", () => {
|
||||
expect(isGemini3Model("gemini-3-pro")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for gemini-3-pro-high", () => {
|
||||
expect(isGemini3Model("gemini-3-pro-high")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for gemini-3-flash", () => {
|
||||
expect(isGemini3Model("gemini-3-flash")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for uppercase GEMINI-3-PRO", () => {
|
||||
expect(isGemini3Model("GEMINI-3-PRO")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false for gemini-2.5-pro", () => {
|
||||
expect(isGemini3Model("gemini-2.5-pro")).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for gemini-pro", () => {
|
||||
expect(isGemini3Model("gemini-pro")).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for claude-3-opus", () => {
|
||||
expect(isGemini3Model("claude-3-opus")).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for empty string", () => {
|
||||
expect(isGemini3Model("")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isGemini25Model", () => {
|
||||
it("returns true for gemini-2.5-pro", () => {
|
||||
expect(isGemini25Model("gemini-2.5-pro")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for gemini-2.5-flash", () => {
|
||||
expect(isGemini25Model("gemini-2.5-flash")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for gemini-2.5-pro-preview", () => {
|
||||
expect(isGemini25Model("gemini-2.5-pro-preview")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true for uppercase GEMINI-2.5-PRO", () => {
|
||||
expect(isGemini25Model("GEMINI-2.5-PRO")).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false for gemini-3-pro", () => {
|
||||
expect(isGemini25Model("gemini-3-pro")).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for gemini-2.0-flash", () => {
|
||||
expect(isGemini25Model("gemini-2.0-flash")).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for gemini-pro", () => {
|
||||
expect(isGemini25Model("gemini-pro")).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for empty string", () => {
|
||||
expect(isGemini25Model("")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("buildGemini3ThinkingConfig", () => {
|
||||
it("builds config with includeThoughts true and low tier", () => {
|
||||
const config = buildGemini3ThinkingConfig(true, "low");
|
||||
expect(config).toEqual({
|
||||
includeThoughts: true,
|
||||
thinkingLevel: "low",
|
||||
});
|
||||
});
|
||||
|
||||
it("builds config with includeThoughts true and medium tier", () => {
|
||||
const config = buildGemini3ThinkingConfig(true, "medium");
|
||||
expect(config).toEqual({
|
||||
includeThoughts: true,
|
||||
thinkingLevel: "medium",
|
||||
});
|
||||
});
|
||||
|
||||
it("builds config with includeThoughts true and high tier", () => {
|
||||
const config = buildGemini3ThinkingConfig(true, "high");
|
||||
expect(config).toEqual({
|
||||
includeThoughts: true,
|
||||
thinkingLevel: "high",
|
||||
});
|
||||
});
|
||||
|
||||
it("builds config with includeThoughts false", () => {
|
||||
const config = buildGemini3ThinkingConfig(false, "high");
|
||||
expect(config).toEqual({
|
||||
includeThoughts: false,
|
||||
thinkingLevel: "high",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("buildGemini25ThinkingConfig", () => {
|
||||
it("builds config with includeThoughts true and budget", () => {
|
||||
const config = buildGemini25ThinkingConfig(true, 8192);
|
||||
expect(config).toEqual({
|
||||
includeThoughts: true,
|
||||
thinkingBudget: 8192,
|
||||
});
|
||||
});
|
||||
|
||||
it("builds config with includeThoughts false and budget", () => {
|
||||
const config = buildGemini25ThinkingConfig(false, 16384);
|
||||
expect(config).toEqual({
|
||||
includeThoughts: false,
|
||||
thinkingBudget: 16384,
|
||||
});
|
||||
});
|
||||
|
||||
it("builds config without budget when undefined", () => {
|
||||
const config = buildGemini25ThinkingConfig(true, undefined);
|
||||
expect(config).toEqual({
|
||||
includeThoughts: true,
|
||||
});
|
||||
expect(config).not.toHaveProperty("thinkingBudget");
|
||||
});
|
||||
|
||||
it("builds config without budget when zero", () => {
|
||||
const config = buildGemini25ThinkingConfig(true, 0);
|
||||
expect(config).toEqual({
|
||||
includeThoughts: true,
|
||||
});
|
||||
expect(config).not.toHaveProperty("thinkingBudget");
|
||||
});
|
||||
|
||||
it("builds config without budget when negative", () => {
|
||||
const config = buildGemini25ThinkingConfig(true, -1000);
|
||||
expect(config).toEqual({
|
||||
includeThoughts: true,
|
||||
});
|
||||
expect(config).not.toHaveProperty("thinkingBudget");
|
||||
});
|
||||
|
||||
it("builds config with large budget", () => {
|
||||
const config = buildGemini25ThinkingConfig(true, 100000);
|
||||
expect(config).toEqual({
|
||||
includeThoughts: true,
|
||||
thinkingBudget: 100000,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("normalizeGeminiTools", () => {
|
||||
it("returns empty debug info when tools is not an array", () => {
|
||||
const payload: RequestPayload = { contents: [] };
|
||||
const result = normalizeGeminiTools(payload);
|
||||
expect(result).toEqual({
|
||||
toolDebugMissing: 0,
|
||||
toolDebugSummaries: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("returns empty debug info when tools is undefined", () => {
|
||||
const payload: RequestPayload = { contents: [], tools: undefined };
|
||||
const result = normalizeGeminiTools(payload);
|
||||
expect(result).toEqual({
|
||||
toolDebugMissing: 0,
|
||||
toolDebugSummaries: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("normalizes tool with function.input_schema", () => {
|
||||
const payload: RequestPayload = {
|
||||
contents: [],
|
||||
tools: [
|
||||
{
|
||||
function: {
|
||||
name: "test_tool",
|
||||
description: "A test tool",
|
||||
input_schema: { type: "object", properties: { foo: { type: "string" } } },
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
const result = normalizeGeminiTools(payload);
|
||||
expect(result.toolDebugMissing).toBe(0);
|
||||
expect(result.toolDebugSummaries).toHaveLength(1);
|
||||
expect((payload.tools as unknown[])[0]).not.toHaveProperty("custom");
|
||||
});
|
||||
|
||||
it("normalizes tool with function.parameters", () => {
|
||||
const payload: RequestPayload = {
|
||||
contents: [],
|
||||
tools: [
|
||||
{
|
||||
function: {
|
||||
name: "test_tool",
|
||||
description: "A test tool",
|
||||
parameters: { type: "object", properties: { bar: { type: "number" } } },
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
const result = normalizeGeminiTools(payload);
|
||||
expect(result.toolDebugMissing).toBe(0);
|
||||
});
|
||||
|
||||
it("creates custom from function and strips it for Gemini", () => {
|
||||
const payload: RequestPayload = {
|
||||
contents: [],
|
||||
tools: [
|
||||
{
|
||||
function: {
|
||||
name: "my_func",
|
||||
description: "My function",
|
||||
input_schema: { type: "object" },
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
normalizeGeminiTools(payload);
|
||||
expect((payload.tools as unknown[])[0]).not.toHaveProperty("custom");
|
||||
expect((payload.tools as unknown[])[0]).toHaveProperty("function");
|
||||
});
|
||||
|
||||
it("creates custom when both function and custom are missing", () => {
|
||||
const payload: RequestPayload = {
|
||||
contents: [],
|
||||
tools: [
|
||||
{
|
||||
name: "standalone_tool",
|
||||
description: "A standalone tool",
|
||||
parameters: { type: "object", properties: {} },
|
||||
},
|
||||
],
|
||||
};
|
||||
normalizeGeminiTools(payload);
|
||||
expect((payload.tools as unknown[])[0]).not.toHaveProperty("custom");
|
||||
});
|
||||
|
||||
it("counts missing schemas", () => {
|
||||
const payload: RequestPayload = {
|
||||
contents: [],
|
||||
tools: [
|
||||
{ name: "tool1" },
|
||||
{ name: "tool2" },
|
||||
{ function: { name: "tool3", input_schema: { type: "object" } } },
|
||||
],
|
||||
};
|
||||
const result = normalizeGeminiTools(payload);
|
||||
expect(result.toolDebugMissing).toBe(0);
|
||||
});
|
||||
|
||||
it("generates debug summaries for each tool", () => {
|
||||
const payload: RequestPayload = {
|
||||
contents: [],
|
||||
tools: [
|
||||
{ function: { name: "t1", input_schema: { type: "object" } } },
|
||||
{ function: { name: "t2", input_schema: { type: "object" } } },
|
||||
],
|
||||
};
|
||||
const result = normalizeGeminiTools(payload);
|
||||
expect(result.toolDebugSummaries).toHaveLength(2);
|
||||
expect(result.toolDebugSummaries[0]).toContain("idx=0");
|
||||
expect(result.toolDebugSummaries[1]).toContain("idx=1");
|
||||
});
|
||||
|
||||
it("uses default tool name when name is missing", () => {
|
||||
const payload: RequestPayload = {
|
||||
contents: [],
|
||||
tools: [{}],
|
||||
};
|
||||
const result = normalizeGeminiTools(payload);
|
||||
expect(result.toolDebugSummaries[0]).toContain("idx=0");
|
||||
});
|
||||
|
||||
it("extracts schema from custom.input_schema", () => {
|
||||
const payload: RequestPayload = {
|
||||
contents: [],
|
||||
tools: [
|
||||
{
|
||||
custom: {
|
||||
name: "custom_tool",
|
||||
input_schema: { type: "object", properties: { x: { type: "string" } } },
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
normalizeGeminiTools(payload);
|
||||
expect((payload.tools as unknown[])[0]).not.toHaveProperty("custom");
|
||||
});
|
||||
|
||||
it("extracts schema from inputSchema (camelCase)", () => {
|
||||
const payload: RequestPayload = {
|
||||
contents: [],
|
||||
tools: [
|
||||
{
|
||||
name: "camel_tool",
|
||||
inputSchema: { type: "object", properties: { y: { type: "boolean" } } },
|
||||
},
|
||||
],
|
||||
};
|
||||
normalizeGeminiTools(payload);
|
||||
expect((payload.tools as unknown[])[0]).not.toHaveProperty("custom");
|
||||
});
|
||||
});
|
||||
|
||||
describe("applyGeminiTransforms", () => {
|
||||
it("applies Gemini 3 thinking config with thinkingLevel", () => {
|
||||
const payload: RequestPayload = { contents: [] };
|
||||
applyGeminiTransforms(payload, {
|
||||
model: "gemini-3-pro-high",
|
||||
tierThinkingLevel: "high",
|
||||
normalizedThinking: { includeThoughts: true },
|
||||
});
|
||||
const genConfig = payload.generationConfig as Record<string, unknown>;
|
||||
expect(genConfig.thinkingConfig).toEqual({
|
||||
includeThoughts: true,
|
||||
thinkingLevel: "high",
|
||||
});
|
||||
});
|
||||
|
||||
it("applies Gemini 2.5 thinking config with thinkingBudget", () => {
|
||||
const payload: RequestPayload = { contents: [] };
|
||||
applyGeminiTransforms(payload, {
|
||||
model: "gemini-2.5-flash",
|
||||
tierThinkingBudget: 8192,
|
||||
normalizedThinking: { includeThoughts: true },
|
||||
});
|
||||
const genConfig = payload.generationConfig as Record<string, unknown>;
|
||||
expect(genConfig.thinkingConfig).toEqual({
|
||||
includeThoughts: true,
|
||||
thinkingBudget: 8192,
|
||||
});
|
||||
});
|
||||
|
||||
it("prefers tierThinkingBudget over normalizedThinking.thinkingBudget", () => {
|
||||
const payload: RequestPayload = { contents: [] };
|
||||
applyGeminiTransforms(payload, {
|
||||
model: "gemini-2.5-pro",
|
||||
tierThinkingBudget: 16384,
|
||||
normalizedThinking: { includeThoughts: true, thinkingBudget: 8192 },
|
||||
});
|
||||
const genConfig = payload.generationConfig as Record<string, unknown>;
|
||||
expect((genConfig.thinkingConfig as Record<string, unknown>).thinkingBudget).toBe(16384);
|
||||
});
|
||||
|
||||
it("falls back to normalizedThinking.thinkingBudget when tierThinkingBudget is undefined", () => {
|
||||
const payload: RequestPayload = { contents: [] };
|
||||
applyGeminiTransforms(payload, {
|
||||
model: "gemini-2.5-pro",
|
||||
normalizedThinking: { includeThoughts: true, thinkingBudget: 4096 },
|
||||
});
|
||||
const genConfig = payload.generationConfig as Record<string, unknown>;
|
||||
expect((genConfig.thinkingConfig as Record<string, unknown>).thinkingBudget).toBe(4096);
|
||||
});
|
||||
|
||||
it("does not apply thinking config when normalizedThinking is undefined", () => {
|
||||
const payload: RequestPayload = { contents: [] };
|
||||
applyGeminiTransforms(payload, {
|
||||
model: "gemini-3-pro",
|
||||
});
|
||||
expect(payload.generationConfig).toBeUndefined();
|
||||
});
|
||||
|
||||
it("preserves existing generationConfig properties", () => {
|
||||
const payload: RequestPayload = {
|
||||
contents: [],
|
||||
generationConfig: { temperature: 0.7, maxOutputTokens: 1000 },
|
||||
};
|
||||
applyGeminiTransforms(payload, {
|
||||
model: "gemini-3-pro-medium",
|
||||
tierThinkingLevel: "medium",
|
||||
normalizedThinking: { includeThoughts: true },
|
||||
});
|
||||
const genConfig = payload.generationConfig as Record<string, unknown>;
|
||||
expect(genConfig.temperature).toBe(0.7);
|
||||
expect(genConfig.maxOutputTokens).toBe(1000);
|
||||
expect(genConfig.thinkingConfig).toBeDefined();
|
||||
});
|
||||
|
||||
it("normalizes tools and returns debug info", () => {
|
||||
const payload: RequestPayload = {
|
||||
contents: [],
|
||||
tools: [
|
||||
{ function: { name: "tool1", input_schema: { type: "object" } } },
|
||||
{ name: "tool2" },
|
||||
],
|
||||
};
|
||||
const result = applyGeminiTransforms(payload, {
|
||||
model: "gemini-2.5-flash",
|
||||
});
|
||||
expect(result.toolDebugSummaries).toHaveLength(2);
|
||||
expect(result.toolDebugMissing).toBe(0);
|
||||
});
|
||||
|
||||
it("defaults includeThoughts to true when not specified", () => {
|
||||
const payload: RequestPayload = { contents: [] };
|
||||
applyGeminiTransforms(payload, {
|
||||
model: "gemini-3-pro-low",
|
||||
tierThinkingLevel: "low",
|
||||
normalizedThinking: {},
|
||||
});
|
||||
const genConfig = payload.generationConfig as Record<string, unknown>;
|
||||
expect((genConfig.thinkingConfig as Record<string, unknown>).includeThoughts).toBe(true);
|
||||
});
|
||||
|
||||
it("respects includeThoughts false", () => {
|
||||
const payload: RequestPayload = { contents: [] };
|
||||
applyGeminiTransforms(payload, {
|
||||
model: "gemini-3-pro-high",
|
||||
tierThinkingLevel: "high",
|
||||
normalizedThinking: { includeThoughts: false },
|
||||
});
|
||||
const genConfig = payload.generationConfig as Record<string, unknown>;
|
||||
expect((genConfig.thinkingConfig as Record<string, unknown>).includeThoughts).toBe(false);
|
||||
});
|
||||
|
||||
it("handles Gemini 2.5 without tierThinkingBudget or normalizedThinking.thinkingBudget", () => {
|
||||
const payload: RequestPayload = { contents: [] };
|
||||
applyGeminiTransforms(payload, {
|
||||
model: "gemini-2.5-pro",
|
||||
normalizedThinking: { includeThoughts: true },
|
||||
});
|
||||
const genConfig = payload.generationConfig as Record<string, unknown>;
|
||||
const thinkingConfig = genConfig.thinkingConfig as Record<string, unknown>;
|
||||
expect(thinkingConfig.includeThoughts).toBe(true);
|
||||
expect(thinkingConfig).not.toHaveProperty("thinkingBudget");
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -38,6 +38,7 @@ export const MODEL_ALIASES: Record<string, string> = {
|
||||
// For Antigravity, these are bypassed and full model name is kept
|
||||
"gemini-3-pro-low": "gemini-3-pro",
|
||||
"gemini-3-pro-high": "gemini-3-pro",
|
||||
"gemini-3-flash-minimal": "gemini-3-flash",
|
||||
"gemini-3-flash-low": "gemini-3-flash",
|
||||
"gemini-3-flash-medium": "gemini-3-flash",
|
||||
"gemini-3-flash-high": "gemini-3-flash",
|
||||
|
||||
Reference in New Issue
Block a user