mirror of
https://github.com/moltbot/moltbot.git
synced 2026-05-13 15:47:28 +00:00
fix: normalize gemini 3 preview provider config
This commit is contained in:
@@ -128,6 +128,7 @@ Docs: https://docs.openclaw.ai
|
||||
- OpenAI/Codex: point gateway missing-key recovery and wizard docs at the canonical `openai/gpt-5.5` plus Codex OAuth route, and fix trajectory export errors so they suggest the valid `openclaw sessions` command.
|
||||
- Google/Gemini: normalize retired `google/gemini-3-pro-preview` primary, fallback, and model-map refs during config load and unrelated config writes so saved config keeps targeting Gemini 3.1 Pro Preview.
|
||||
- Google/Gemini: normalize retired Gemini 3 Pro Preview ids inside emitted Google provider model config, so regenerated models.json rows test `google/gemini-3.1-pro-preview`.
|
||||
- Google/Gemini: normalize retired Gemini 3 Pro Preview ids for explicit OpenAI-compatible Google and Gemini CLI provider configs, so emitted config targets `google/gemini-3.1-pro-preview`.
|
||||
- Google/Gemini: normalize retired Gemini 3 Pro Preview ids preserved from existing merged models.json providers so config emission keeps targeting `google/gemini-3.1-pro-preview`.
|
||||
- GitHub Copilot: mint short-lived Copilot API tokens with the same `vscode-chat` integration identity used by runtime requests, and refresh legacy cached tokens missing that identity so image-capable Copilot models no longer inherit the `copilot-language-server` scope. Fixes #79946, #80074. Thanks @TurboTheTurtle.
|
||||
- Plugins/doctor: drop stale managed npm install records when `openclaw doctor --fix` removes npm packages that shadow bundled plugins, so the rebuilt registry no longer resurrects the removed package metadata.
|
||||
|
||||
@@ -44,4 +44,56 @@ describe("google provider policy public artifact", () => {
|
||||
api: "openai-completions",
|
||||
});
|
||||
});
|
||||
|
||||
it("normalizes retired Google model ids even for explicit OpenAI-compatible endpoints", () => {
|
||||
expect(
|
||||
normalizeConfig({
|
||||
provider: "google",
|
||||
providerConfig: {
|
||||
baseUrl: "https://generativelanguage.googleapis.com/v1beta/openai",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "google/gemini-3-pro-preview",
|
||||
name: "Gemini 3 Pro",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1_048_576,
|
||||
maxTokens: 65_536,
|
||||
},
|
||||
],
|
||||
},
|
||||
}),
|
||||
).toMatchObject({
|
||||
baseUrl: "https://generativelanguage.googleapis.com/v1beta/openai",
|
||||
api: "openai-completions",
|
||||
models: [{ id: "google/gemini-3.1-pro-preview" }],
|
||||
});
|
||||
});
|
||||
|
||||
it("normalizes retired Gemini CLI config model ids before emission", () => {
|
||||
expect(
|
||||
normalizeConfig({
|
||||
provider: "google-gemini-cli",
|
||||
providerConfig: {
|
||||
baseUrl: "openclaw://google-gemini-cli",
|
||||
models: [
|
||||
{
|
||||
id: "google/gemini-3-pro-preview",
|
||||
name: "Gemini CLI 3 Pro",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1_048_576,
|
||||
maxTokens: 65_536,
|
||||
},
|
||||
],
|
||||
},
|
||||
}),
|
||||
).toMatchObject({
|
||||
baseUrl: "openclaw://google-gemini-cli",
|
||||
models: [{ id: "google/gemini-3.1-pro-preview" }],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -10,6 +10,7 @@ type GoogleProviderConfigLike = GoogleApiCarrier & {
|
||||
};
|
||||
|
||||
export const DEFAULT_GOOGLE_API_BASE_URL = "https://generativelanguage.googleapis.com/v1beta";
|
||||
const GOOGLE_MODEL_ID_PROVIDERS = new Set(["google", "google-gemini-cli", "google-vertex"]);
|
||||
|
||||
function normalizeOptionalString(value: unknown): string | undefined {
|
||||
return typeof value === "string" && value.trim() ? value.trim() : undefined;
|
||||
@@ -152,9 +153,7 @@ export function normalizeGoogleProviderConfig(
|
||||
provider: ModelProviderConfig,
|
||||
): ModelProviderConfig {
|
||||
let nextProvider = provider;
|
||||
const shouldNormalizeModelIds =
|
||||
providerKey === "google-vertex" ||
|
||||
shouldNormalizeGoogleGenerativeAiProviderConfig(providerKey, nextProvider);
|
||||
const shouldNormalizeModelIds = GOOGLE_MODEL_ID_PROVIDERS.has(providerKey);
|
||||
|
||||
if (shouldNormalizeModelIds) {
|
||||
const modelNormalized = normalizeProviderModels(nextProvider, normalizeGoogleModelId);
|
||||
|
||||
@@ -194,6 +194,36 @@ describe("applyModelDefaults", () => {
|
||||
expect(next.models?.providers?.google?.models?.[0]?.id).toBe("google/gemini-3.1-pro-preview");
|
||||
});
|
||||
|
||||
it("normalizes provider-prefixed Gemini ids for OpenAI-compatible Google provider rows", () => {
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
google: {
|
||||
baseUrl: "https://generativelanguage.googleapis.com/v1beta/openai",
|
||||
api: "openai-completions",
|
||||
apiKey: "GOOGLE_API_KEY",
|
||||
models: [
|
||||
{
|
||||
id: "google/gemini-3-pro-preview",
|
||||
name: "Gemini 3 Pro",
|
||||
input: ["text", "image"],
|
||||
reasoning: true,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1_048_576,
|
||||
maxTokens: 65_536,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} satisfies OpenClawConfig;
|
||||
|
||||
const next = applyModelDefaults(cfg);
|
||||
|
||||
expect(next.models?.providers?.google?.api).toBe("openai-completions");
|
||||
expect(next.models?.providers?.google?.models?.[0]?.id).toBe("google/gemini-3.1-pro-preview");
|
||||
});
|
||||
|
||||
it("fills missing model provider defaults", () => {
|
||||
const cfg = buildProxyProviderConfig();
|
||||
|
||||
|
||||
Reference in New Issue
Block a user