core: expose v2 model listing API (#25821)

This commit is contained in:
Dax
2026-05-13 10:43:08 -04:00
committed by GitHub
parent bebe5442a5
commit 8345152319
138 changed files with 8191 additions and 305 deletions

View File

@@ -26,6 +26,27 @@
"@types/semver": "catalog:"
},
"dependencies": {
"@ai-sdk/alibaba": "1.0.17",
"@ai-sdk/amazon-bedrock": "4.0.96",
"@ai-sdk/anthropic": "3.0.71",
"@ai-sdk/azure": "3.0.49",
"@ai-sdk/cerebras": "2.0.41",
"@ai-sdk/cohere": "3.0.27",
"@ai-sdk/deepinfra": "2.0.41",
"@ai-sdk/gateway": "3.0.104",
"@ai-sdk/google": "3.0.63",
"@ai-sdk/google-vertex": "4.0.112",
"@ai-sdk/groq": "3.0.31",
"@ai-sdk/mistral": "3.0.27",
"@ai-sdk/openai": "3.0.53",
"@ai-sdk/openai-compatible": "2.0.41",
"@ai-sdk/perplexity": "3.0.26",
"@ai-sdk/provider": "3.0.8",
"@ai-sdk/provider-utils": "4.0.23",
"@ai-sdk/togetherai": "2.0.41",
"@ai-sdk/vercel": "2.0.39",
"@ai-sdk/xai": "3.0.82",
"@aws-sdk/credential-providers": "3.993.0",
"@effect/opentelemetry": "catalog:",
"@effect/platform-node": "catalog:",
"@npmcli/arborist": "9.4.0",
@@ -34,14 +55,21 @@
"@opentelemetry/context-async-hooks": "2.6.1",
"@opentelemetry/exporter-trace-otlp-http": "0.214.0",
"@opentelemetry/sdk-trace-base": "2.6.1",
"effect": "catalog:",
"@openrouter/ai-sdk-provider": "2.8.1",
"ai-gateway-provider": "3.1.2",
"cross-spawn": "catalog:",
"effect": "catalog:",
"gitlab-ai-provider": "6.6.0",
"glob": "13.0.5",
"google-auth-library": "10.5.0",
"immer": "11.1.4",
"mime-types": "3.0.2",
"minimatch": "10.2.5",
"npm-package-arg": "13.0.2",
"semver": "^7.6.3",
"xdg-basedir": "5.1.0"
"venice-ai-sdk-provider": "2.0.1",
"xdg-basedir": "5.1.0",
"zod": "catalog:"
},
"overrides": {
"drizzle-orm": "catalog:"

172
packages/core/src/aisdk.ts Normal file
View File

@@ -0,0 +1,172 @@
export * as AISDK from "./aisdk"
import type { LanguageModelV3 } from "@ai-sdk/provider"
import { Cause, Context, Effect, Layer, Schema } from "effect"
import { ModelV2 } from "./model"
import { PluginV2 } from "./plugin"
import { ProviderV2 } from "./provider"
type SDK = any
function wrapSSE(res: Response, ms: number, ctl: AbortController) {
if (typeof ms !== "number" || ms <= 0) return res
if (!res.body) return res
if (!res.headers.get("content-type")?.includes("text/event-stream")) return res
const reader = res.body.getReader()
const body = new ReadableStream<Uint8Array>({
async pull(ctrl) {
const part = await new Promise<Awaited<ReturnType<typeof reader.read>>>((resolve, reject) => {
const id = setTimeout(() => {
const err = new Error("SSE read timed out")
ctl.abort(err)
void reader.cancel(err)
reject(err)
}, ms)
reader.read().then(
(part) => {
clearTimeout(id)
resolve(part)
},
(err) => {
clearTimeout(id)
reject(err)
},
)
})
if (part.done) {
ctrl.close()
return
}
ctrl.enqueue(part.value)
},
async cancel(reason) {
ctl.abort(reason)
await reader.cancel(reason)
},
})
return new Response(body, {
headers: new Headers(res.headers),
status: res.status,
statusText: res.statusText,
})
}
function prepareOptions(model: ModelV2.Info, pkg: string) {
const options: Record<string, any> = { name: model.providerID, ...model.options.aisdk.provider }
if (model.endpoint.type === "aisdk" && model.endpoint.url) options.baseURL = model.endpoint.url
const customFetch = options.fetch
const chunkTimeout = options.chunkTimeout
delete options.chunkTimeout
options.fetch = async (input: Parameters<typeof fetch>[0], init?: RequestInit) => {
const opts = { ...(init ?? {}) }
const signals = [
opts.signal,
typeof chunkTimeout === "number" && chunkTimeout > 0 ? new AbortController() : undefined,
options.timeout !== undefined && options.timeout !== null && options.timeout !== false
? AbortSignal.timeout(options.timeout)
: undefined,
].filter((item): item is AbortSignal | AbortController => Boolean(item))
const chunkAbortCtl = signals.find((item): item is AbortController => item instanceof AbortController)
const abortSignals = signals.map((item) => (item instanceof AbortController ? item.signal : item))
if (abortSignals.length === 1) opts.signal = abortSignals[0]
if (abortSignals.length > 1) opts.signal = AbortSignal.any(abortSignals)
if ((pkg === "@ai-sdk/openai" || pkg === "@ai-sdk/azure") && opts.body && opts.method === "POST") {
const body = JSON.parse(opts.body as string)
if (body.store !== true && Array.isArray(body.input)) {
for (const item of body.input) {
if ("id" in item) delete item.id
}
opts.body = JSON.stringify(body)
}
}
const res = await (typeof customFetch === "function" ? customFetch : fetch)(input, {
...opts,
timeout: false,
})
if (!chunkAbortCtl || typeof chunkTimeout !== "number") return res
return wrapSSE(res, chunkTimeout, chunkAbortCtl)
}
return options
}
export class InitError extends Schema.TaggedErrorClass<InitError>()("AISDK.InitError", {
providerID: ProviderV2.ID,
cause: Schema.Defect,
}) {}
function initError(providerID: ProviderV2.ID) {
return Effect.catchCause((cause) => Effect.fail(new InitError({ providerID, cause: Cause.squash(cause) })))
}
export interface Interface {
readonly language: (model: ModelV2.Info) => Effect.Effect<LanguageModelV3, InitError>
}
export class Service extends Context.Service<Service, Interface>()("@opencode/v2/AISDK") {}
export const layer = Layer.effect(
Service,
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const languages = new Map<string, LanguageModelV3>()
const sdks = new Map<string, SDK>()
return Service.of({
language: Effect.fn("AISDK.language")(function* (model) {
const key = `${model.providerID}/${model.id}/${model.options.variant ?? "default"}`
const existing = languages.get(key)
if (existing) return existing
if (model.endpoint.type !== "aisdk")
return yield* new InitError({
providerID: model.providerID,
cause: new Error(`Unsupported endpoint ${model.endpoint.type}`),
})
const options = prepareOptions(model, model.endpoint.package)
const sdkKey = JSON.stringify({
providerID: model.providerID,
endpoint: model.endpoint,
options,
})
const sdk =
sdks.get(sdkKey) ??
(yield* plugin
.trigger("aisdk.sdk", { model, package: model.endpoint.package, options }, {})
.pipe(initError(model.providerID))).sdk
if (!sdk)
return yield* new InitError({
providerID: model.providerID,
cause: new Error("No AISDK provider plugin returned an SDK"),
})
sdks.set(sdkKey, sdk)
const result = yield* plugin
.trigger(
"aisdk.language",
{
model,
sdk,
options,
},
{},
)
.pipe(initError(model.providerID))
const language = yield* Effect.sync(() => result.language ?? sdk.languageModel(model.apiID)).pipe(
initError(model.providerID),
)
languages.set(key, language)
return language
}),
})
}),
)
export const defaultLayer = layer.pipe(Layer.provide(PluginV2.defaultLayer))

264
packages/core/src/auth.ts Normal file
View File

@@ -0,0 +1,264 @@
import path from "path"
import { Effect, Layer, Option, Schema, Context, SynchronizedRef } from "effect"
import { Identifier } from "./util/identifier"
import { NonNegativeInt, withStatics } from "./schema"
import { Global } from "./global"
import { AppFileSystem } from "./filesystem"
export const OAUTH_DUMMY_KEY = "opencode-oauth-dummy-key"
const AccountID = Schema.String.pipe(
Schema.brand("AccountID"),
withStatics((schema) => ({ create: () => schema.make("acc_" + Identifier.ascending()) })),
)
export type AccountID = typeof AccountID.Type
export const ServiceID = Schema.String.pipe(Schema.brand("ServiceID"))
export type ServiceID = typeof ServiceID.Type
export class OAuthCredential extends Schema.Class<OAuthCredential>("AuthV2.OAuthCredential")({
type: Schema.Literal("oauth"),
refresh: Schema.String,
access: Schema.String,
expires: NonNegativeInt,
}) {}
export class ApiKeyCredential extends Schema.Class<ApiKeyCredential>("AuthV2.ApiKeyCredential")({
type: Schema.Literal("api"),
key: Schema.String,
metadata: Schema.optional(Schema.Record(Schema.String, Schema.String)),
}) {}
export const Credential = Schema.Union([OAuthCredential, ApiKeyCredential])
.pipe(Schema.toTaggedUnion("type"))
.annotate({
identifier: "AuthV2.Credential",
})
export type Credential = Schema.Schema.Type<typeof Credential>
export class Account extends Schema.Class<Account>("AuthV2.Account")({
id: AccountID,
serviceID: ServiceID,
description: Schema.String,
credential: Credential,
}) {}
export class AuthFileWriteError extends Schema.TaggedErrorClass<AuthFileWriteError>()("AuthV2.FileWriteError", {
operation: Schema.Union([Schema.Literal("migrate"), Schema.Literal("write")]),
cause: Schema.Defect,
}) {}
export type AuthError = AuthFileWriteError
interface Writable {
version: 2
accounts: Record<string, Account>
active: Record<string, AccountID>
}
const decodeV1 = Schema.decodeUnknownOption(Schema.Record(Schema.String, Credential))
function migrate(old: Record<string, unknown>): Writable {
const accounts: Record<string, Account> = {}
const active: Record<string, AccountID> = {}
for (const [serviceID, value] of Object.entries(old)) {
const decoded = Option.getOrElse(decodeV1({ [serviceID]: value }), () => ({}))
const parsed = (decoded as Record<string, Credential>)[serviceID]
if (!parsed) continue
const id = Identifier.ascending()
const accountID = AccountID.make(id)
const brandedServiceID = ServiceID.make(serviceID)
accounts[id] = new Account({
id: accountID,
serviceID: brandedServiceID,
description: "default",
credential: parsed,
})
active[brandedServiceID] = accountID
}
return { version: 2, accounts, active }
}
export interface Interface {
readonly get: (accountID: AccountID) => Effect.Effect<Account | undefined, AuthError>
readonly all: () => Effect.Effect<Account[], AuthError>
readonly create: (input: {
serviceID: ServiceID
credential: Credential
description?: string
active?: boolean
}) => Effect.Effect<Account, AuthError>
readonly update: (
accountID: AccountID,
updates: Partial<Pick<Account, "description" | "credential">>,
) => Effect.Effect<void, AuthError>
readonly remove: (accountID: AccountID) => Effect.Effect<void, AuthError>
readonly activate: (accountID: AccountID) => Effect.Effect<void, AuthError>
readonly active: (serviceID: ServiceID) => Effect.Effect<Account | undefined, AuthError>
readonly forService: (serviceID: ServiceID) => Effect.Effect<Account[], AuthError>
}
export class Service extends Context.Service<Service, Interface>()("@opencode/v2/Auth") {}
export const layer = Layer.effect(
Service,
Effect.gen(function* () {
const fsys = yield* AppFileSystem.Service
const global = yield* Global.Service
const file = path.join(global.data, "auth-v2.json")
const legacyFile = path.join(global.data, "auth.json")
const writeMigrated = Effect.fnUntraced(function* (raw: Record<string, unknown>) {
const migrated = migrate(raw)
yield* fsys
.writeJson(file, migrated, 0o600)
.pipe(Effect.mapError((cause) => new AuthFileWriteError({ operation: "migrate", cause })))
return migrated
})
const parseAuthContent = () => {
try {
return JSON.parse(process.env.OPENCODE_AUTH_CONTENT ?? "")
} catch {}
}
const load: () => Effect.Effect<Writable, AuthError> = Effect.fnUntraced(function* () {
if (process.env.OPENCODE_AUTH_CONTENT) {
const raw = parseAuthContent()
if (raw && typeof raw === "object") {
if ("version" in raw && raw.version === 2) return raw as Writable
return yield* writeMigrated(raw as Record<string, unknown>)
}
return { version: 2, accounts: {}, active: {} }
}
const legacy = yield* fsys.readJson(legacyFile).pipe(Effect.orElseSucceed(() => null))
if (legacy && typeof legacy === "object") return yield* writeMigrated(legacy as Record<string, unknown>)
const raw = yield* fsys.readJson(file).pipe(Effect.orElseSucceed(() => null))
if (raw && typeof raw === "object") {
if ("version" in raw && raw.version === 2) return raw as Writable
return yield* writeMigrated(raw as Record<string, unknown>)
}
return { version: 2, accounts: {}, active: {} }
})
const write = (data: Writable) =>
fsys
.writeJson(file, data, 0o600)
.pipe(Effect.mapError((cause) => new AuthFileWriteError({ operation: "write", cause })))
const state = SynchronizedRef.makeUnsafe(yield* load())
const result: Interface = {
get: Effect.fn("AuthV2.get")(function* (accountID) {
return (yield* SynchronizedRef.get(state)).accounts[accountID]
}),
all: Effect.fn("AuthV2.all")(function* () {
return Object.values((yield* SynchronizedRef.get(state)).accounts)
}),
active: Effect.fn("AuthV2.active")(function* (serviceID) {
const data = yield* SynchronizedRef.get(state)
return (
data.accounts[data.active[serviceID]] ?? Object.values(data.accounts).find((a) => a.serviceID === serviceID)
)
}),
forService: Effect.fn("AuthV2.list")(function* (serviceID) {
return Object.values((yield* SynchronizedRef.get(state)).accounts).filter((a) => a.serviceID === serviceID)
}),
create: Effect.fn("AuthV2.add")(function* (input) {
return yield* SynchronizedRef.modifyEffect(
state,
Effect.fnUntraced(function* (data) {
const account = new Account({
id: AccountID.make(Identifier.ascending()),
serviceID: input.serviceID,
description: input.description ?? "default",
credential: input.credential,
})
const next = {
...data,
accounts: { ...data.accounts, [account.id]: account },
active:
(input.active ?? Object.values(data.accounts).every((a) => a.serviceID !== input.serviceID))
? { ...data.active, [input.serviceID]: account.id }
: data.active,
}
yield* write(next)
return [account, next] as const
}),
)
}),
update: Effect.fn("AuthV2.update")(function* (accountID, updates) {
yield* SynchronizedRef.modifyEffect(
state,
Effect.fnUntraced(function* (data) {
const existing = data.accounts[accountID]
if (!existing) return [undefined, data] as const
const next = {
...data,
accounts: {
...data.accounts,
[accountID]: new Account({
id: accountID,
serviceID: existing.serviceID,
description: updates.description ?? existing.description,
credential: updates.credential ?? existing.credential,
}),
},
}
yield* write(next)
return [undefined, next] as const
}),
)
}),
remove: Effect.fn("AuthV2.remove")(function* (accountID) {
yield* SynchronizedRef.modifyEffect(
state,
Effect.fnUntraced(function* (data) {
const accounts = { ...data.accounts }
const active = { ...data.active }
if (accounts[accountID] && active[accounts[accountID].serviceID] === accountID)
delete active[accounts[accountID].serviceID]
delete accounts[accountID]
const next = { ...data, accounts, active }
yield* write(next)
return [undefined, next] as const
}),
)
}),
activate: Effect.fn("AuthV2.activate")(function* (accountID) {
yield* SynchronizedRef.modifyEffect(
state,
Effect.fnUntraced(function* (data) {
const account = data.accounts[accountID]
if (!account) return [undefined, data] as const
const next = { ...data, active: { ...data.active, [account.serviceID]: accountID } }
yield* write(next)
return [undefined, next] as const
}),
)
}),
}
return Service.of(result)
}),
)
export const defaultLayer = layer.pipe(Layer.provide(AppFileSystem.defaultLayer), Layer.provide(Global.defaultLayer))
export * as AuthV2 from "./auth"

View File

@@ -0,0 +1,258 @@
export * as Catalog from "./catalog"
import { Context, Effect, HashMap, Layer, Option, Order, pipe, Schema, Array } from "effect"
import { produce, type Draft } from "immer"
import { ModelV2 } from "./model"
import { PluginV2 } from "./plugin"
import { ProviderV2 } from "./provider"
type ProviderRecord = {
provider: ProviderV2.Info
models: HashMap.HashMap<ModelV2.ID, ModelV2.Info>
}
export class ProviderNotFoundError extends Schema.TaggedErrorClass<ProviderNotFoundError>()(
"CatalogV2.ProviderNotFound",
{
providerID: ProviderV2.ID,
},
) {}
export class ModelNotFoundError extends Schema.TaggedErrorClass<ModelNotFoundError>()("CatalogV2.ModelNotFound", {
providerID: ProviderV2.ID,
modelID: ModelV2.ID,
}) {}
export interface Interface {
readonly provider: {
readonly get: (providerID: ProviderV2.ID) => Effect.Effect<ProviderV2.Info, ProviderNotFoundError>
readonly update: (providerID: ProviderV2.ID, fn: (provider: Draft<ProviderV2.Info>) => void) => Effect.Effect<void>
readonly all: () => Effect.Effect<ProviderV2.Info[]>
readonly available: () => Effect.Effect<ProviderV2.Info[]>
}
readonly model: {
readonly get: (
providerID: ProviderV2.ID,
modelID: ModelV2.ID,
) => Effect.Effect<ModelV2.Info, ProviderNotFoundError | ModelNotFoundError>
readonly update: (
providerID: ProviderV2.ID,
modelID: ModelV2.ID,
fn: (model: Draft<ModelV2.Info>) => void,
) => Effect.Effect<void, ProviderNotFoundError>
readonly all: () => Effect.Effect<ModelV2.Info[]>
readonly available: () => Effect.Effect<ModelV2.Info[]>
readonly default: () => Effect.Effect<Option.Option<ModelV2.Info>>
readonly setDefault: (
providerID: ProviderV2.ID,
modelID: ModelV2.ID,
) => Effect.Effect<void, ProviderNotFoundError | ModelNotFoundError>
readonly small: (providerID: ProviderV2.ID) => Effect.Effect<Option.Option<ModelV2.Info>>
}
}
export class Service extends Context.Service<Service, Interface>()("@opencode/v2/Catalog") {}
export const layer = Layer.effect(
Service,
Effect.gen(function* () {
let records = HashMap.empty<ProviderV2.ID, ProviderRecord>()
let defaultModel: { providerID: ProviderV2.ID; modelID: ModelV2.ID } | undefined
const plugin = yield* PluginV2.Service
const resolve = (model: ModelV2.Info) => {
const provider = Option.getOrThrow(HashMap.get(records, model.providerID)).provider
const endpoint =
model.endpoint.type === "unknown"
? provider.endpoint
: model.endpoint.type === "aisdk" && provider.endpoint.type === "aisdk" && !model.endpoint.url
? { ...model.endpoint, url: provider.endpoint.url }
: model.endpoint
const options = {
headers: {
...provider.options.headers,
...model.options.headers,
},
body: {
...provider.options.body,
...model.options.body,
},
aisdk: {
provider: {
...provider.options.aisdk.provider,
...model.options.aisdk.provider,
},
request: model.options.aisdk.request,
},
variant: model.options.variant,
}
return new ModelV2.Info({
...model,
endpoint,
options,
})
}
function* getRecord(providerID: ProviderV2.ID) {
const match = HashMap.get(records, providerID)
if (!match.valueOrUndefined) return yield* new ProviderNotFoundError({ providerID })
return match.value
}
const result: Interface = {
provider: {
get: Effect.fn("CatalogV2.provider.get")(function* (providerID) {
const record = yield* getRecord(providerID)
return record.provider
}),
update: Effect.fnUntraced(function* (providerID, fn) {
const current = Option.getOrUndefined(HashMap.get(records, providerID))
const provider = produce(current?.provider ?? ProviderV2.Info.empty(providerID), (draft) => {
fn(draft)
if (draft.endpoint.type === "aisdk" && typeof draft.options.aisdk.provider.baseURL === "string") {
draft.endpoint.url = draft.options.aisdk.provider.baseURL
delete draft.options.aisdk.provider.baseURL
}
})
const updated = yield* plugin.trigger("provider.update", {}, { provider, cancel: false })
records = HashMap.set(records, providerID, {
provider: updated.provider,
models: current?.models ?? HashMap.empty<ModelV2.ID, ModelV2.Info>(),
})
}),
all: Effect.fn("CatalogV2.provider.all")(function* () {
return globalThis.Array.from(HashMap.values(records)).map((record) => record.provider)
}),
available: Effect.fn("CatalogV2.provider.available")(function* () {
return globalThis.Array.from(HashMap.values(records))
.map((record) => record.provider)
.filter((provider) => provider.enabled)
}),
},
model: {
get: Effect.fn("CatalogV2.model.get")(function* (providerID, modelID) {
const record = yield* getRecord(providerID)
const model = Option.getOrUndefined(HashMap.get(record.models, modelID))
if (!model) return yield* new ModelNotFoundError({ providerID, modelID })
return resolve(model)
}),
update: Effect.fnUntraced(function* (providerID, modelID, fn) {
const record = yield* getRecord(providerID)
const model = produce(
HashMap.get(record.models, modelID).pipe(Option.getOrElse(() => ModelV2.Info.empty(providerID, modelID))),
(draft) => {
fn(draft)
if (draft.endpoint.type === "aisdk" && typeof draft.options.aisdk.provider.baseURL === "string") {
draft.endpoint.url = draft.options.aisdk.provider.baseURL
delete draft.options.aisdk.provider.baseURL
}
},
)
const updated = yield* plugin.trigger("model.update", {}, { model, cancel: false })
if (updated.cancel) return
records = HashMap.set(records, providerID, {
provider: record.provider,
models: HashMap.set(
record.models,
modelID,
new ModelV2.Info({ ...updated.model, id: modelID, providerID }),
),
})
return
}),
all: Effect.fn("CatalogV2.model.all")(function* () {
return pipe(
records,
HashMap.toValues,
Array.flatMap((record) => HashMap.toValues(record.models)),
Array.map(resolve),
Array.sortWith((item) => item.time.released.epochMilliseconds, Order.flip(Order.Number)),
)
}),
available: Effect.fn("CatalogV2.model.available")(function* () {
return (yield* result.model.all()).filter((model) => {
const record = Option.getOrUndefined(HashMap.get(records, model.providerID))
return record?.provider.enabled !== false && model.enabled
})
}),
default: Effect.fn("CatalogV2.model.default")(function* () {
if (defaultModel) {
const model = yield* result.model.get(defaultModel.providerID, defaultModel.modelID).pipe(Effect.option)
if (Option.isSome(model) && model.value.enabled) return model
}
return pipe(
yield* result.model.available(),
Array.sortWith((item) => item.time.released.epochMilliseconds, Order.flip(Order.Number)),
Array.head,
)
}),
setDefault: Effect.fn("CatalogV2.model.setDefault")(function* (providerID, modelID) {
yield* result.model.get(providerID, modelID)
defaultModel = { providerID, modelID }
}),
small: Effect.fn("CatalogV2.model.small")(function* (providerID) {
const record = Option.getOrUndefined(HashMap.get(records, providerID))
if (!record) return Option.none<ModelV2.Info>()
if (providerID === ProviderV2.ID.opencode) {
const gpt5Nano = Option.getOrUndefined(HashMap.get(record.models, ModelV2.ID.make("gpt-5-nano")))
if (gpt5Nano?.enabled && gpt5Nano.status === "active") return Option.some(resolve(gpt5Nano))
}
const candidates = pipe(
HashMap.toValues(record.models),
Array.filter(
(model) =>
model.providerID === providerID &&
model.enabled &&
model.status === "active" &&
model.capabilities.input.some((item) => item.startsWith("text")) &&
model.capabilities.output.some((item) => item.startsWith("text")),
),
Array.map((model) => ({
model,
cost: model.cost[0] ? model.cost[0].input + model.cost[0].output : 999,
age: (Date.now() - model.time.released.epochMilliseconds) / (1000 * 60 * 60 * 24 * 30),
small: SMALL_MODEL_RE.test(`${model.id} ${model.family ?? ""} ${model.name}`.toLowerCase()),
})),
Array.filter((item) => item.cost > 0 && item.age <= 18),
)
const pick = (items: typeof candidates) => {
const maxCost = Math.max(...items.map((item) => item.cost), 0.01)
const maxAge = Math.max(...items.map((item) => item.age), 0.01)
return pipe(
items,
Array.sortWith((item) => (item.cost / maxCost) * 0.8 + (item.age / maxAge) * 0.2, Order.Number),
Array.map((item) => resolve(item.model)),
Array.head,
)
}
return pipe(
candidates,
Array.filter((item) => item.small),
(items) => (items.length > 0 ? pick(items) : pick(candidates)),
)
}),
},
}
return Service.of(result)
}),
)
const SMALL_MODEL_RE = /\b(nano|flash|lite|mini|haiku|small|fast)\b/
export const defaultLayer = layer.pipe(Layer.provide(PluginV2.defaultLayer))

View File

@@ -0,0 +1,5 @@
This is a temporary package used primarily for GitHub Copilot compatibility.
These DO NOT apply for openai-compatible providers or majority of providers supporting completions/responses apis. THIS IS ONLY FOR GITHUB COPILOT!!!
Avoid making edits to these files

View File

@@ -0,0 +1,170 @@
import {
type LanguageModelV3Prompt,
type SharedV3ProviderOptions,
UnsupportedFunctionalityError,
} from "@ai-sdk/provider"
import type { OpenAICompatibleChatPrompt } from "./openai-compatible-api-types"
import { convertToBase64 } from "@ai-sdk/provider-utils"
function getOpenAIMetadata(message: { providerOptions?: SharedV3ProviderOptions }) {
return message?.providerOptions?.copilot ?? {}
}
export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV3Prompt): OpenAICompatibleChatPrompt {
const messages: OpenAICompatibleChatPrompt = []
for (const { role, content, ...message } of prompt) {
const metadata = getOpenAIMetadata({ ...message })
switch (role) {
case "system": {
messages.push({
role: "system",
content: content,
...metadata,
})
break
}
case "user": {
if (content.length === 1 && content[0].type === "text") {
messages.push({
role: "user",
content: content[0].text,
...getOpenAIMetadata(content[0]),
})
break
}
messages.push({
role: "user",
content: content.map((part) => {
const partMetadata = getOpenAIMetadata(part)
switch (part.type) {
case "text": {
return { type: "text", text: part.text, ...partMetadata }
}
case "file": {
if (part.mediaType.startsWith("image/")) {
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType
return {
type: "image_url",
image_url: {
url:
part.data instanceof URL
? part.data.toString()
: `data:${mediaType};base64,${convertToBase64(part.data)}`,
},
...partMetadata,
}
} else {
throw new UnsupportedFunctionalityError({
functionality: `file part media type ${part.mediaType}`,
})
}
}
}
}),
...metadata,
})
break
}
case "assistant": {
let text = ""
let reasoningText: string | undefined
let reasoningOpaque: string | undefined
const toolCalls: Array<{
id: string
type: "function"
function: { name: string; arguments: string }
}> = []
for (const part of content) {
const partMetadata = getOpenAIMetadata(part)
// Check for reasoningOpaque on any part (may be attached to text/tool-call)
const partOpaque = (part.providerOptions as { copilot?: { reasoningOpaque?: string } })?.copilot
?.reasoningOpaque
if (partOpaque && !reasoningOpaque) {
reasoningOpaque = partOpaque
}
switch (part.type) {
case "text": {
text += part.text
break
}
case "reasoning": {
if (part.text) reasoningText = part.text
break
}
case "tool-call": {
toolCalls.push({
id: part.toolCallId,
type: "function",
function: {
name: part.toolName,
arguments: JSON.stringify(part.input),
},
...partMetadata,
})
break
}
}
}
messages.push({
role: "assistant",
content: text || null,
tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
reasoning_text: reasoningOpaque ? reasoningText : undefined,
reasoning_opaque: reasoningOpaque,
...metadata,
})
break
}
case "tool": {
for (const toolResponse of content) {
if (toolResponse.type === "tool-approval-response") {
continue
}
const output = toolResponse.output
let contentValue: string
switch (output.type) {
case "text":
case "error-text":
contentValue = output.value
break
case "execution-denied":
contentValue = output.reason ?? "Tool execution denied."
break
case "content":
case "json":
case "error-json":
contentValue = JSON.stringify(output.value)
break
}
const toolResponseMetadata = getOpenAIMetadata(toolResponse)
messages.push({
role: "tool",
tool_call_id: toolResponse.toolCallId,
content: contentValue,
...toolResponseMetadata,
})
}
break
}
default: {
const _exhaustiveCheck: never = role
throw new Error(`Unsupported role: ${_exhaustiveCheck}`)
}
}
}
return messages
}

View File

@@ -0,0 +1,15 @@
export function getResponseMetadata({
id,
model,
created,
}: {
id?: string | undefined | null
created?: number | undefined | null
model?: string | undefined | null
}) {
return {
id: id ?? undefined,
modelId: model ?? undefined,
timestamp: created != null ? new Date(created * 1000) : undefined,
}
}

View File

@@ -0,0 +1,19 @@
import type { LanguageModelV3FinishReason } from "@ai-sdk/provider"
export function mapOpenAICompatibleFinishReason(
finishReason: string | null | undefined,
): LanguageModelV3FinishReason["unified"] {
switch (finishReason) {
case "stop":
return "stop"
case "length":
return "length"
case "content_filter":
return "content-filter"
case "function_call":
case "tool_calls":
return "tool-calls"
default:
return "other"
}
}

View File

@@ -0,0 +1,64 @@
import type { JSONValue } from "@ai-sdk/provider"
export type OpenAICompatibleChatPrompt = Array<OpenAICompatibleMessage>
export type OpenAICompatibleMessage =
| OpenAICompatibleSystemMessage
| OpenAICompatibleUserMessage
| OpenAICompatibleAssistantMessage
| OpenAICompatibleToolMessage
// Allow for arbitrary additional properties for general purpose
// provider-metadata-specific extensibility.
type JsonRecord<T = never> = Record<string, JSONValue | JSONValue[] | T | T[] | undefined>
export interface OpenAICompatibleSystemMessage extends JsonRecord<OpenAICompatibleSystemContentPart> {
role: "system"
content: string | Array<OpenAICompatibleSystemContentPart>
}
export interface OpenAICompatibleSystemContentPart extends JsonRecord {
type: "text"
text: string
}
export interface OpenAICompatibleUserMessage extends JsonRecord<OpenAICompatibleContentPart> {
role: "user"
content: string | Array<OpenAICompatibleContentPart>
}
export type OpenAICompatibleContentPart = OpenAICompatibleContentPartText | OpenAICompatibleContentPartImage
export interface OpenAICompatibleContentPartImage extends JsonRecord {
type: "image_url"
image_url: { url: string }
}
export interface OpenAICompatibleContentPartText extends JsonRecord {
type: "text"
text: string
}
export interface OpenAICompatibleAssistantMessage extends JsonRecord<OpenAICompatibleMessageToolCall> {
role: "assistant"
content?: string | null
tool_calls?: Array<OpenAICompatibleMessageToolCall>
// Copilot-specific reasoning fields
reasoning_text?: string
reasoning_opaque?: string
}
export interface OpenAICompatibleMessageToolCall extends JsonRecord {
type: "function"
id: string
function: {
arguments: string
name: string
}
}
export interface OpenAICompatibleToolMessage extends JsonRecord {
role: "tool"
content: string
tool_call_id: string
}

View File

@@ -0,0 +1,815 @@
import {
APICallError,
InvalidResponseDataError,
type LanguageModelV3,
type LanguageModelV3CallOptions,
type LanguageModelV3Content,
type LanguageModelV3StreamPart,
type SharedV3ProviderMetadata,
type SharedV3Warning,
} from "@ai-sdk/provider"
import {
combineHeaders,
createEventSourceResponseHandler,
createJsonErrorResponseHandler,
createJsonResponseHandler,
type FetchFunction,
generateId,
isParsableJson,
parseProviderOptions,
type ParseResult,
postJsonToApi,
type ResponseHandler,
} from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
import { convertToOpenAICompatibleChatMessages } from "./convert-to-openai-compatible-chat-messages"
import { getResponseMetadata } from "./get-response-metadata"
import { mapOpenAICompatibleFinishReason } from "./map-openai-compatible-finish-reason"
import { type OpenAICompatibleChatModelId, openaiCompatibleProviderOptions } from "./openai-compatible-chat-options"
import { defaultOpenAICompatibleErrorStructure, type ProviderErrorStructure } from "../openai-compatible-error"
import type { MetadataExtractor } from "./openai-compatible-metadata-extractor"
import { prepareTools } from "./openai-compatible-prepare-tools"
export type OpenAICompatibleChatConfig = {
provider: string
headers: () => Record<string, string | undefined>
url: (options: { modelId: string; path: string }) => string
fetch?: FetchFunction
includeUsage?: boolean
errorStructure?: ProviderErrorStructure<any>
metadataExtractor?: MetadataExtractor
/**
* Whether the model supports structured outputs.
*/
supportsStructuredOutputs?: boolean
/**
* The supported URLs for the model.
*/
supportedUrls?: () => LanguageModelV3["supportedUrls"]
}
export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
readonly specificationVersion = "v3"
readonly supportsStructuredOutputs: boolean
readonly modelId: OpenAICompatibleChatModelId
private readonly config: OpenAICompatibleChatConfig
private readonly failedResponseHandler: ResponseHandler<APICallError>
private readonly chunkSchema // type inferred via constructor
constructor(modelId: OpenAICompatibleChatModelId, config: OpenAICompatibleChatConfig) {
this.modelId = modelId
this.config = config
// initialize error handling:
const errorStructure = config.errorStructure ?? defaultOpenAICompatibleErrorStructure
this.chunkSchema = createOpenAICompatibleChatChunkSchema(errorStructure.errorSchema)
this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure)
this.supportsStructuredOutputs = config.supportsStructuredOutputs ?? false
}
get provider(): string {
return this.config.provider
}
private get providerOptionsName(): string {
return this.config.provider.split(".")[0].trim()
}
get supportedUrls() {
return this.config.supportedUrls?.() ?? {}
}
private async getArgs({
prompt,
maxOutputTokens,
temperature,
topP,
topK,
frequencyPenalty,
presencePenalty,
providerOptions,
stopSequences,
responseFormat,
seed,
toolChoice,
tools,
}: LanguageModelV3CallOptions) {
const warnings: SharedV3Warning[] = []
// Parse provider options
const compatibleOptions = Object.assign(
(await parseProviderOptions({
provider: "copilot",
providerOptions,
schema: openaiCompatibleProviderOptions,
})) ?? {},
(await parseProviderOptions({
provider: this.providerOptionsName,
providerOptions,
schema: openaiCompatibleProviderOptions,
})) ?? {},
)
if (topK != null) {
warnings.push({ type: "unsupported", feature: "topK" })
}
if (responseFormat?.type === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
warnings.push({
type: "unsupported",
feature: "responseFormat",
details: "JSON response format schema is only supported with structuredOutputs",
})
}
const {
tools: openaiTools,
toolChoice: openaiToolChoice,
toolWarnings,
} = prepareTools({
tools,
toolChoice,
})
return {
args: {
// model id:
model: this.modelId,
// model specific settings:
user: compatibleOptions.user,
// standardized settings:
max_tokens: maxOutputTokens,
temperature,
top_p: topP,
frequency_penalty: frequencyPenalty,
presence_penalty: presencePenalty,
response_format:
responseFormat?.type === "json"
? this.supportsStructuredOutputs === true && responseFormat.schema != null
? {
type: "json_schema",
json_schema: {
schema: responseFormat.schema,
name: responseFormat.name ?? "response",
description: responseFormat.description,
},
}
: { type: "json_object" }
: undefined,
stop: stopSequences,
seed,
...Object.fromEntries(
Object.entries(providerOptions?.[this.providerOptionsName] ?? {}).filter(
([key]) => !Object.keys(openaiCompatibleProviderOptions.shape).includes(key),
),
),
reasoning_effort: compatibleOptions.reasoningEffort,
verbosity: compatibleOptions.textVerbosity,
// messages:
messages: convertToOpenAICompatibleChatMessages(prompt),
// tools:
tools: openaiTools,
tool_choice: openaiToolChoice,
// thinking_budget
thinking_budget: compatibleOptions.thinking_budget,
},
warnings: [...warnings, ...toolWarnings],
}
}
async doGenerate(options: LanguageModelV3CallOptions) {
const { args, warnings } = await this.getArgs({ ...options })
const body = JSON.stringify(args)
const {
responseHeaders,
value: responseBody,
rawValue: rawResponse,
} = await postJsonToApi({
url: this.config.url({
path: "/chat/completions",
modelId: this.modelId,
}),
headers: combineHeaders(this.config.headers(), options.headers),
body: args,
failedResponseHandler: this.failedResponseHandler,
successfulResponseHandler: createJsonResponseHandler(OpenAICompatibleChatResponseSchema),
abortSignal: options.abortSignal,
fetch: this.config.fetch,
})
const choice = responseBody.choices[0]
const content: Array<LanguageModelV3Content> = []
// text content:
const text = choice.message.content
if (text != null && text.length > 0) {
content.push({
type: "text",
text,
providerMetadata: choice.message.reasoning_opaque
? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } }
: undefined,
})
}
// reasoning content (Copilot uses reasoning_text):
const reasoning = choice.message.reasoning_text
if (reasoning != null && reasoning.length > 0) {
content.push({
type: "reasoning",
text: reasoning,
// Include reasoning_opaque for Copilot multi-turn reasoning
providerMetadata: choice.message.reasoning_opaque
? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } }
: undefined,
})
}
// tool calls:
if (choice.message.tool_calls != null) {
for (const toolCall of choice.message.tool_calls) {
content.push({
type: "tool-call",
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments!,
providerMetadata: choice.message.reasoning_opaque
? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } }
: undefined,
})
}
}
// provider metadata:
const providerMetadata: SharedV3ProviderMetadata = {
[this.providerOptionsName]: {},
...(await this.config.metadataExtractor?.extractMetadata?.({
parsedBody: rawResponse,
})),
}
const completionTokenDetails = responseBody.usage?.completion_tokens_details
if (completionTokenDetails?.accepted_prediction_tokens != null) {
providerMetadata[this.providerOptionsName].acceptedPredictionTokens =
completionTokenDetails?.accepted_prediction_tokens
}
if (completionTokenDetails?.rejected_prediction_tokens != null) {
providerMetadata[this.providerOptionsName].rejectedPredictionTokens =
completionTokenDetails?.rejected_prediction_tokens
}
return {
content,
finishReason: {
unified: mapOpenAICompatibleFinishReason(choice.finish_reason),
raw: choice.finish_reason ?? undefined,
},
usage: {
inputTokens: {
total: responseBody.usage?.prompt_tokens ?? undefined,
noCache: undefined,
cacheRead: responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
cacheWrite: undefined,
},
outputTokens: {
total: responseBody.usage?.completion_tokens ?? undefined,
text: undefined,
reasoning: responseBody.usage?.completion_tokens_details?.reasoning_tokens ?? undefined,
},
raw: responseBody.usage ?? undefined,
},
providerMetadata,
request: { body },
response: {
...getResponseMetadata(responseBody),
headers: responseHeaders,
body: rawResponse,
},
warnings,
}
}
async doStream(options: LanguageModelV3CallOptions) {
const { args, warnings } = await this.getArgs({ ...options })
const body = {
...args,
stream: true,
// only include stream_options when in strict compatibility mode:
stream_options: this.config.includeUsage ? { include_usage: true } : undefined,
}
const metadataExtractor = this.config.metadataExtractor?.createStreamExtractor()
const { responseHeaders, value: response } = await postJsonToApi({
url: this.config.url({
path: "/chat/completions",
modelId: this.modelId,
}),
headers: combineHeaders(this.config.headers(), options.headers),
body,
failedResponseHandler: this.failedResponseHandler,
successfulResponseHandler: createEventSourceResponseHandler(this.chunkSchema),
abortSignal: options.abortSignal,
fetch: this.config.fetch,
})
const toolCalls: Array<{
id: string
type: "function"
function: {
name: string
arguments: string
}
hasFinished: boolean
}> = []
let finishReason: {
unified: ReturnType<typeof mapOpenAICompatibleFinishReason>
raw: string | undefined
} = {
unified: "other",
raw: undefined,
}
const usage: {
completionTokens: number | undefined
completionTokensDetails: {
reasoningTokens: number | undefined
acceptedPredictionTokens: number | undefined
rejectedPredictionTokens: number | undefined
}
promptTokens: number | undefined
promptTokensDetails: {
cachedTokens: number | undefined
}
totalTokens: number | undefined
} = {
completionTokens: undefined,
completionTokensDetails: {
reasoningTokens: undefined,
acceptedPredictionTokens: undefined,
rejectedPredictionTokens: undefined,
},
promptTokens: undefined,
promptTokensDetails: {
cachedTokens: undefined,
},
totalTokens: undefined,
}
let isFirstChunk = true
const providerOptionsName = this.providerOptionsName
let isActiveReasoning = false
let isActiveText = false
let reasoningOpaque: string | undefined
return {
stream: response.pipeThrough(
new TransformStream<ParseResult<z.infer<typeof this.chunkSchema>>, LanguageModelV3StreamPart>({
start(controller) {
controller.enqueue({ type: "stream-start", warnings })
},
// TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
transform(chunk, controller) {
// Emit raw chunk if requested (before anything else)
if (options.includeRawChunks) {
controller.enqueue({ type: "raw", rawValue: chunk.rawValue })
}
// handle failed chunk parsing / validation:
if (!chunk.success) {
finishReason = {
unified: "error",
raw: undefined,
}
controller.enqueue({ type: "error", error: chunk.error })
return
}
const value = chunk.value
metadataExtractor?.processChunk(chunk.rawValue)
// handle error chunks:
if ("error" in value) {
finishReason = {
unified: "error",
raw: undefined,
}
controller.enqueue({ type: "error", error: value.error.message })
return
}
if (isFirstChunk) {
isFirstChunk = false
controller.enqueue({
type: "response-metadata",
...getResponseMetadata(value),
})
}
if (value.usage != null) {
const {
prompt_tokens,
completion_tokens,
total_tokens,
prompt_tokens_details,
completion_tokens_details,
} = value.usage
usage.promptTokens = prompt_tokens ?? undefined
usage.completionTokens = completion_tokens ?? undefined
usage.totalTokens = total_tokens ?? undefined
if (completion_tokens_details?.reasoning_tokens != null) {
usage.completionTokensDetails.reasoningTokens = completion_tokens_details?.reasoning_tokens
}
if (completion_tokens_details?.accepted_prediction_tokens != null) {
usage.completionTokensDetails.acceptedPredictionTokens =
completion_tokens_details?.accepted_prediction_tokens
}
if (completion_tokens_details?.rejected_prediction_tokens != null) {
usage.completionTokensDetails.rejectedPredictionTokens =
completion_tokens_details?.rejected_prediction_tokens
}
if (prompt_tokens_details?.cached_tokens != null) {
usage.promptTokensDetails.cachedTokens = prompt_tokens_details?.cached_tokens
}
}
const choice = value.choices[0]
if (choice?.finish_reason != null) {
finishReason = {
unified: mapOpenAICompatibleFinishReason(choice.finish_reason),
raw: choice.finish_reason ?? undefined,
}
}
if (choice?.delta == null) {
return
}
const delta = choice.delta
// Capture reasoning_opaque for Copilot multi-turn reasoning
if (delta.reasoning_opaque) {
if (reasoningOpaque != null) {
throw new InvalidResponseDataError({
data: delta,
message:
"Multiple reasoning_opaque values received in a single response. Only one thinking part per response is supported.",
})
}
reasoningOpaque = delta.reasoning_opaque
}
// enqueue reasoning before text deltas (Copilot uses reasoning_text):
const reasoningContent = delta.reasoning_text
if (reasoningContent) {
if (!isActiveReasoning) {
controller.enqueue({
type: "reasoning-start",
id: "reasoning-0",
})
isActiveReasoning = true
}
controller.enqueue({
type: "reasoning-delta",
id: "reasoning-0",
delta: reasoningContent,
})
}
if (delta.content) {
// If reasoning was active and we're starting text, end reasoning first
// This handles the case where reasoning_opaque and content come in the same chunk
if (isActiveReasoning && !isActiveText) {
controller.enqueue({
type: "reasoning-end",
id: "reasoning-0",
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
isActiveReasoning = false
}
if (!isActiveText) {
controller.enqueue({
type: "text-start",
id: "txt-0",
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
isActiveText = true
}
controller.enqueue({
type: "text-delta",
id: "txt-0",
delta: delta.content,
})
}
if (delta.tool_calls != null) {
// If reasoning was active and we're starting tool calls, end reasoning first
// This handles the case where reasoning goes directly to tool calls with no content
if (isActiveReasoning) {
controller.enqueue({
type: "reasoning-end",
id: "reasoning-0",
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
isActiveReasoning = false
}
for (const toolCallDelta of delta.tool_calls) {
const index = toolCallDelta.index
if (toolCalls[index] == null) {
if (toolCallDelta.id == null) {
throw new InvalidResponseDataError({
data: toolCallDelta,
message: `Expected 'id' to be a string.`,
})
}
if (toolCallDelta.function?.name == null) {
throw new InvalidResponseDataError({
data: toolCallDelta,
message: `Expected 'function.name' to be a string.`,
})
}
controller.enqueue({
type: "tool-input-start",
id: toolCallDelta.id,
toolName: toolCallDelta.function.name,
})
toolCalls[index] = {
id: toolCallDelta.id,
type: "function",
function: {
name: toolCallDelta.function.name,
arguments: toolCallDelta.function.arguments ?? "",
},
hasFinished: false,
}
const toolCall = toolCalls[index]
if (toolCall.function?.name != null && toolCall.function?.arguments != null) {
// send delta if the argument text has already started:
if (toolCall.function.arguments.length > 0) {
controller.enqueue({
type: "tool-input-delta",
id: toolCall.id,
delta: toolCall.function.arguments,
})
}
// check if tool call is complete
// (some providers send the full tool call in one chunk):
if (isParsableJson(toolCall.function.arguments)) {
controller.enqueue({
type: "tool-input-end",
id: toolCall.id,
})
controller.enqueue({
type: "tool-call",
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments,
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
toolCall.hasFinished = true
}
}
continue
}
// existing tool call, merge if not finished
const toolCall = toolCalls[index]
if (toolCall.hasFinished) {
continue
}
if (toolCallDelta.function?.arguments != null) {
toolCall.function!.arguments += toolCallDelta.function?.arguments ?? ""
}
// send delta
controller.enqueue({
type: "tool-input-delta",
id: toolCall.id,
delta: toolCallDelta.function.arguments ?? "",
})
// check if tool call is complete
if (
toolCall.function?.name != null &&
toolCall.function?.arguments != null &&
isParsableJson(toolCall.function.arguments)
) {
controller.enqueue({
type: "tool-input-end",
id: toolCall.id,
})
controller.enqueue({
type: "tool-call",
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments,
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
toolCall.hasFinished = true
}
}
}
},
flush(controller) {
if (isActiveReasoning) {
controller.enqueue({
type: "reasoning-end",
id: "reasoning-0",
// Include reasoning_opaque for Copilot multi-turn reasoning
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
}
if (isActiveText) {
controller.enqueue({ type: "text-end", id: "txt-0" })
}
// go through all tool calls and send the ones that are not finished
for (const toolCall of toolCalls.filter((toolCall) => !toolCall.hasFinished)) {
controller.enqueue({
type: "tool-input-end",
id: toolCall.id,
})
controller.enqueue({
type: "tool-call",
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments,
})
}
const providerMetadata: SharedV3ProviderMetadata = {
[providerOptionsName]: {},
// Include reasoning_opaque for Copilot multi-turn reasoning
...(reasoningOpaque ? { copilot: { reasoningOpaque } } : {}),
...metadataExtractor?.buildMetadata(),
}
if (usage.completionTokensDetails.acceptedPredictionTokens != null) {
providerMetadata[providerOptionsName].acceptedPredictionTokens =
usage.completionTokensDetails.acceptedPredictionTokens
}
if (usage.completionTokensDetails.rejectedPredictionTokens != null) {
providerMetadata[providerOptionsName].rejectedPredictionTokens =
usage.completionTokensDetails.rejectedPredictionTokens
}
controller.enqueue({
type: "finish",
finishReason,
usage: {
inputTokens: {
total: usage.promptTokens,
noCache:
usage.promptTokens != undefined && usage.promptTokensDetails.cachedTokens != undefined
? usage.promptTokens - usage.promptTokensDetails.cachedTokens
: undefined,
cacheRead: usage.promptTokensDetails.cachedTokens,
cacheWrite: undefined,
},
outputTokens: {
total: usage.completionTokens,
text: undefined,
reasoning: usage.completionTokensDetails.reasoningTokens,
},
raw: {
prompt_tokens: usage.promptTokens ?? null,
completion_tokens: usage.completionTokens ?? null,
total_tokens: usage.totalTokens ?? null,
},
},
providerMetadata,
})
},
}),
),
request: { body },
response: { headers: responseHeaders },
}
}
}
const openaiCompatibleTokenUsageSchema = z
.object({
prompt_tokens: z.number().nullish(),
completion_tokens: z.number().nullish(),
total_tokens: z.number().nullish(),
prompt_tokens_details: z
.object({
cached_tokens: z.number().nullish(),
})
.nullish(),
completion_tokens_details: z
.object({
reasoning_tokens: z.number().nullish(),
accepted_prediction_tokens: z.number().nullish(),
rejected_prediction_tokens: z.number().nullish(),
})
.nullish(),
})
.nullish()
// limited version of the schema, focussed on what is needed for the implementation
// this approach limits breakages when the API changes and increases efficiency
const OpenAICompatibleChatResponseSchema = z.object({
id: z.string().nullish(),
created: z.number().nullish(),
model: z.string().nullish(),
choices: z.array(
z.object({
message: z.object({
role: z.literal("assistant").nullish(),
content: z.string().nullish(),
// Copilot-specific reasoning fields
reasoning_text: z.string().nullish(),
reasoning_opaque: z.string().nullish(),
tool_calls: z
.array(
z.object({
id: z.string().nullish(),
function: z.object({
name: z.string(),
arguments: z.string(),
}),
}),
)
.nullish(),
}),
finish_reason: z.string().nullish(),
}),
),
usage: openaiCompatibleTokenUsageSchema,
})
// limited version of the schema, focussed on what is needed for the implementation
// this approach limits breakages when the API changes and increases efficiency
const createOpenAICompatibleChatChunkSchema = <ERROR_SCHEMA extends z.core.$ZodType>(errorSchema: ERROR_SCHEMA) =>
z.union([
z.object({
id: z.string().nullish(),
created: z.number().nullish(),
model: z.string().nullish(),
choices: z.array(
z.object({
delta: z
.object({
role: z.enum(["assistant"]).nullish(),
content: z.string().nullish(),
// Copilot-specific reasoning fields
reasoning_text: z.string().nullish(),
reasoning_opaque: z.string().nullish(),
tool_calls: z
.array(
z.object({
index: z.number(),
id: z.string().nullish(),
function: z.object({
name: z.string().nullish(),
arguments: z.string().nullish(),
}),
}),
)
.nullish(),
})
.nullish(),
finish_reason: z.string().nullish(),
}),
),
usage: openaiCompatibleTokenUsageSchema,
}),
errorSchema,
])

View File

@@ -0,0 +1,28 @@
import { z } from "zod/v4"
export type OpenAICompatibleChatModelId = string
export const openaiCompatibleProviderOptions = z.object({
/**
* A unique identifier representing your end-user, which can help the provider to
* monitor and detect abuse.
*/
user: z.string().optional(),
/**
* Reasoning effort for reasoning models. Defaults to `medium`.
*/
reasoningEffort: z.string().optional(),
/**
* Controls the verbosity of the generated text. Defaults to `medium`.
*/
textVerbosity: z.string().optional(),
/**
* Copilot thinking_budget used for Anthropic models.
*/
thinking_budget: z.number().optional(),
})
export type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>

View File

@@ -0,0 +1,44 @@
import type { SharedV3ProviderMetadata } from "@ai-sdk/provider"
/**
Extracts provider-specific metadata from API responses.
Used to standardize metadata handling across different LLM providers while allowing
provider-specific metadata to be captured.
*/
export type MetadataExtractor = {
/**
* Extracts provider metadata from a complete, non-streaming response.
*
* @param parsedBody - The parsed response JSON body from the provider's API.
*
* @returns Provider-specific metadata or undefined if no metadata is available.
* The metadata should be under a key indicating the provider id.
*/
extractMetadata: ({ parsedBody }: { parsedBody: unknown }) => Promise<SharedV3ProviderMetadata | undefined>
/**
* Creates an extractor for handling streaming responses. The returned object provides
* methods to process individual chunks and build the final metadata from the accumulated
* stream data.
*
* @returns An object with methods to process chunks and build metadata from a stream
*/
createStreamExtractor: () => {
/**
* Process an individual chunk from the stream. Called for each chunk in the response stream
* to accumulate metadata throughout the streaming process.
*
* @param parsedChunk - The parsed JSON response chunk from the provider's API
*/
processChunk(parsedChunk: unknown): void
/**
* Builds the metadata object after all chunks have been processed.
* Called at the end of the stream to generate the complete provider metadata.
*
* @returns Provider-specific metadata or undefined if no metadata is available.
* The metadata should be under a key indicating the provider id.
*/
buildMetadata(): SharedV3ProviderMetadata | undefined
}
}

View File

@@ -0,0 +1,83 @@
import { type LanguageModelV3CallOptions, type SharedV3Warning, UnsupportedFunctionalityError } from "@ai-sdk/provider"
export function prepareTools({
tools,
toolChoice,
}: {
tools: LanguageModelV3CallOptions["tools"]
toolChoice?: LanguageModelV3CallOptions["toolChoice"]
}): {
tools:
| undefined
| Array<{
type: "function"
function: {
name: string
description: string | undefined
parameters: unknown
}
}>
toolChoice: { type: "function"; function: { name: string } } | "auto" | "none" | "required" | undefined
toolWarnings: SharedV3Warning[]
} {
// when the tools array is empty, change it to undefined to prevent errors:
tools = tools?.length ? tools : undefined
const toolWarnings: SharedV3Warning[] = []
if (tools == null) {
return { tools: undefined, toolChoice: undefined, toolWarnings }
}
const openaiCompatTools: Array<{
type: "function"
function: {
name: string
description: string | undefined
parameters: unknown
}
}> = []
for (const tool of tools) {
if (tool.type === "provider") {
toolWarnings.push({ type: "unsupported", feature: `tool type: ${tool.type}` })
} else {
openaiCompatTools.push({
type: "function",
function: {
name: tool.name,
description: tool.description,
parameters: tool.inputSchema,
},
})
}
}
if (toolChoice == null) {
return { tools: openaiCompatTools, toolChoice: undefined, toolWarnings }
}
const type = toolChoice.type
switch (type) {
case "auto":
case "none":
case "required":
return { tools: openaiCompatTools, toolChoice: type, toolWarnings }
case "tool":
return {
tools: openaiCompatTools,
toolChoice: {
type: "function",
function: { name: toolChoice.toolName },
},
toolWarnings,
}
default: {
const _exhaustiveCheck: never = type
throw new UnsupportedFunctionalityError({
functionality: `tool choice type: ${_exhaustiveCheck}`,
})
}
}
}

View File

@@ -0,0 +1,100 @@
import type { LanguageModelV3 } from "@ai-sdk/provider"
import { type FetchFunction, withoutTrailingSlash, withUserAgentSuffix } from "@ai-sdk/provider-utils"
import { OpenAICompatibleChatLanguageModel } from "./chat/openai-compatible-chat-language-model"
import { OpenAIResponsesLanguageModel } from "./responses/openai-responses-language-model"
// Import the version or define it
const VERSION = "0.1.0"
export type OpenaiCompatibleModelId = string
export interface OpenaiCompatibleProviderSettings {
/**
* API key for authenticating requests.
*/
apiKey?: string
/**
* Base URL for the OpenAI Compatible API calls.
*/
baseURL?: string
/**
* Name of the provider.
*/
name?: string
/**
* Custom headers to include in the requests.
*/
headers?: Record<string, string>
/**
* Custom fetch implementation.
*/
fetch?: FetchFunction
}
export interface OpenaiCompatibleProvider {
(modelId: OpenaiCompatibleModelId): LanguageModelV3
chat(modelId: OpenaiCompatibleModelId): LanguageModelV3
responses(modelId: OpenaiCompatibleModelId): LanguageModelV3
languageModel(modelId: OpenaiCompatibleModelId): LanguageModelV3
// embeddingModel(modelId: any): EmbeddingModelV2
// imageModel(modelId: any): ImageModelV2
}
/**
* Create an OpenAI Compatible provider instance.
*/
export function createOpenaiCompatible(options: OpenaiCompatibleProviderSettings = {}): OpenaiCompatibleProvider {
const baseURL = withoutTrailingSlash(options.baseURL ?? "https://api.openai.com/v1")
if (!baseURL) {
throw new Error("baseURL is required")
}
// Merge headers: defaults first, then user overrides
const headers = {
// Default OpenAI Compatible headers (can be overridden by user)
...(options.apiKey && { Authorization: `Bearer ${options.apiKey}` }),
...options.headers,
}
const getHeaders = () => withUserAgentSuffix(headers, `ai-sdk/openai-compatible/${VERSION}`)
const createChatModel = (modelId: OpenaiCompatibleModelId) => {
return new OpenAICompatibleChatLanguageModel(modelId, {
provider: `${options.name ?? "openai-compatible"}.chat`,
headers: getHeaders,
url: ({ path }) => `${baseURL}${path}`,
fetch: options.fetch,
})
}
const createResponsesModel = (modelId: OpenaiCompatibleModelId) => {
return new OpenAIResponsesLanguageModel(modelId, {
provider: `${options.name ?? "openai-compatible"}.responses`,
headers: getHeaders,
url: ({ path }) => `${baseURL}${path}`,
fetch: options.fetch,
})
}
const createLanguageModel = (modelId: OpenaiCompatibleModelId) => createChatModel(modelId)
const provider = function (modelId: OpenaiCompatibleModelId) {
return createChatModel(modelId)
}
provider.languageModel = createLanguageModel
provider.chat = createChatModel
provider.responses = createResponsesModel
return provider as OpenaiCompatibleProvider
}
// Default OpenAI Compatible provider instance
export const openaiCompatible = createOpenaiCompatible()

View File

@@ -0,0 +1,27 @@
import { z, type ZodType } from "zod/v4"
export const openaiCompatibleErrorDataSchema = z.object({
error: z.object({
message: z.string(),
// The additional information below is handled loosely to support
// OpenAI-compatible providers that have slightly different error
// responses:
type: z.string().nullish(),
param: z.any().nullish(),
code: z.union([z.string(), z.number()]).nullish(),
}),
})
export type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>
export type ProviderErrorStructure<T> = {
errorSchema: ZodType<T>
errorToMessage: (error: T) => string
isRetryable?: (response: Response, error?: T) => boolean
}
export const defaultOpenAICompatibleErrorStructure: ProviderErrorStructure<OpenAICompatibleErrorData> = {
errorSchema: openaiCompatibleErrorDataSchema,
errorToMessage: (data) => data.error.message,
}

View File

@@ -0,0 +1,335 @@
import {
type LanguageModelV3Prompt,
type LanguageModelV3ToolCallPart,
type SharedV3Warning,
UnsupportedFunctionalityError,
} from "@ai-sdk/provider"
import { convertToBase64, parseProviderOptions } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
import type { OpenAIResponsesInput, OpenAIResponsesReasoning } from "./openai-responses-api-types"
import { localShellInputSchema, localShellOutputSchema } from "./tool/local-shell"
/**
* Check if a string is a file ID based on the given prefixes
* Returns false if prefixes is undefined (disables file ID detection)
*/
function isFileId(data: string, prefixes?: readonly string[]): boolean {
if (!prefixes) return false
return prefixes.some((prefix) => data.startsWith(prefix))
}
export async function convertToOpenAIResponsesInput({
prompt,
systemMessageMode,
fileIdPrefixes,
store,
hasLocalShellTool = false,
}: {
prompt: LanguageModelV3Prompt
systemMessageMode: "system" | "developer" | "remove"
fileIdPrefixes?: readonly string[]
store: boolean
hasLocalShellTool?: boolean
}): Promise<{
input: OpenAIResponsesInput
warnings: Array<SharedV3Warning>
}> {
const input: OpenAIResponsesInput = []
const warnings: Array<SharedV3Warning> = []
const processedApprovalIds = new Set<string>()
for (const { role, content } of prompt) {
switch (role) {
case "system": {
switch (systemMessageMode) {
case "system": {
input.push({ role: "system", content })
break
}
case "developer": {
input.push({ role: "developer", content })
break
}
case "remove": {
warnings.push({
type: "other",
message: "system messages are removed for this model",
})
break
}
default: {
const _exhaustiveCheck: never = systemMessageMode
throw new Error(`Unsupported system message mode: ${_exhaustiveCheck}`)
}
}
break
}
case "user": {
input.push({
role: "user",
content: content.map((part, index) => {
switch (part.type) {
case "text": {
return { type: "input_text", text: part.text }
}
case "file": {
if (part.mediaType.startsWith("image/")) {
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType
return {
type: "input_image",
...(part.data instanceof URL
? { image_url: part.data.toString() }
: typeof part.data === "string" && isFileId(part.data, fileIdPrefixes)
? { file_id: part.data }
: {
image_url: `data:${mediaType};base64,${convertToBase64(part.data)}`,
}),
detail: part.providerOptions?.openai?.imageDetail,
}
} else if (part.mediaType === "application/pdf") {
if (part.data instanceof URL) {
return {
type: "input_file",
file_url: part.data.toString(),
}
}
return {
type: "input_file",
...(typeof part.data === "string" && isFileId(part.data, fileIdPrefixes)
? { file_id: part.data }
: {
filename: part.filename ?? `part-${index}.pdf`,
file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`,
}),
}
} else {
throw new UnsupportedFunctionalityError({
functionality: `file part media type ${part.mediaType}`,
})
}
}
}
}),
})
break
}
case "assistant": {
const reasoningMessages: Record<string, OpenAIResponsesReasoning> = {}
const toolCallParts: Record<string, LanguageModelV3ToolCallPart> = {}
for (const part of content) {
switch (part.type) {
case "text": {
input.push({
role: "assistant",
content: [{ type: "output_text", text: part.text }],
id: (part.providerOptions?.openai?.itemId as string) ?? undefined,
})
break
}
case "tool-call": {
toolCallParts[part.toolCallId] = part
if (part.providerExecuted) {
break
}
if (hasLocalShellTool && part.toolName === "local_shell") {
const parsedInput = localShellInputSchema.parse(part.input)
input.push({
type: "local_shell_call",
call_id: part.toolCallId,
id: (part.providerOptions?.openai?.itemId as string) ?? undefined,
action: {
type: "exec",
command: parsedInput.action.command,
timeout_ms: parsedInput.action.timeoutMs,
user: parsedInput.action.user,
working_directory: parsedInput.action.workingDirectory,
env: parsedInput.action.env,
},
})
break
}
input.push({
type: "function_call",
call_id: part.toolCallId,
name: part.toolName,
arguments: JSON.stringify(part.input),
id: (part.providerOptions?.openai?.itemId as string) ?? undefined,
})
break
}
// assistant tool result parts are from provider-executed tools:
case "tool-result": {
if (store) {
// use item references to refer to tool results from built-in tools
input.push({ type: "item_reference", id: part.toolCallId })
} else {
warnings.push({
type: "other",
message: `Results for OpenAI tool ${part.toolName} are not sent to the API when store is false`,
})
}
break
}
case "reasoning": {
const providerOptions = await parseProviderOptions({
provider: "copilot",
providerOptions: part.providerOptions,
schema: openaiResponsesReasoningProviderOptionsSchema,
})
const reasoningId = providerOptions?.itemId
if (reasoningId != null) {
const reasoningMessage = reasoningMessages[reasoningId]
if (store) {
if (reasoningMessage === undefined) {
// use item references to refer to reasoning (single reference)
input.push({ type: "item_reference", id: reasoningId })
// store unused reasoning message to mark id as used
reasoningMessages[reasoningId] = {
type: "reasoning",
id: reasoningId,
summary: [],
}
}
} else {
const summaryParts: Array<{
type: "summary_text"
text: string
}> = []
if (part.text.length > 0) {
summaryParts.push({
type: "summary_text",
text: part.text,
})
} else if (reasoningMessage !== undefined) {
warnings.push({
type: "other",
message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`,
})
}
if (reasoningMessage === undefined) {
reasoningMessages[reasoningId] = {
type: "reasoning",
id: reasoningId,
encrypted_content: providerOptions?.reasoningEncryptedContent,
summary: summaryParts,
}
input.push(reasoningMessages[reasoningId])
} else {
reasoningMessage.summary.push(...summaryParts)
}
}
} else {
warnings.push({
type: "other",
message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`,
})
}
break
}
}
}
break
}
case "tool": {
for (const part of content) {
if (part.type === "tool-approval-response") {
if (processedApprovalIds.has(part.approvalId)) {
continue
}
processedApprovalIds.add(part.approvalId)
if (store) {
input.push({
type: "item_reference",
id: part.approvalId,
})
}
input.push({
type: "mcp_approval_response",
approval_request_id: part.approvalId,
approve: part.approved,
})
continue
}
const output = part.output
if (output.type === "execution-denied") {
const approvalId = (output.providerOptions?.openai as { approvalId?: string } | undefined)?.approvalId
if (approvalId) {
continue
}
}
if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
input.push({
type: "local_shell_call_output",
call_id: part.toolCallId,
output: localShellOutputSchema.parse(output.value).output,
})
break
}
let contentValue: string
switch (output.type) {
case "text":
case "error-text":
contentValue = output.value
break
case "execution-denied":
contentValue = output.reason ?? "Tool execution denied."
break
case "content":
case "json":
case "error-json":
contentValue = JSON.stringify(output.value)
break
}
input.push({
type: "function_call_output",
call_id: part.toolCallId,
output: contentValue,
})
}
break
}
default: {
const _exhaustiveCheck: never = role
throw new Error(`Unsupported role: ${_exhaustiveCheck}`)
}
}
}
return { input, warnings }
}
const openaiResponsesReasoningProviderOptionsSchema = z.object({
itemId: z.string().nullish(),
reasoningEncryptedContent: z.string().nullish(),
})
export type OpenAIResponsesReasoningProviderOptions = z.infer<typeof openaiResponsesReasoningProviderOptionsSchema>

View File

@@ -0,0 +1,22 @@
import type { LanguageModelV3FinishReason } from "@ai-sdk/provider"
export function mapOpenAIResponseFinishReason({
finishReason,
hasFunctionCall,
}: {
finishReason: string | null | undefined
// flag that checks if there have been client-side tool calls (not executed by openai)
hasFunctionCall: boolean
}): LanguageModelV3FinishReason["unified"] {
switch (finishReason) {
case undefined:
case null:
return hasFunctionCall ? "tool-calls" : "stop"
case "max_output_tokens":
return "length"
case "content_filter":
return "content-filter"
default:
return hasFunctionCall ? "tool-calls" : "other"
}
}

View File

@@ -0,0 +1,18 @@
import type { FetchFunction } from "@ai-sdk/provider-utils"
export type OpenAIConfig = {
provider: string
url: (options: { modelId: string; path: string }) => string
headers: () => Record<string, string | undefined>
fetch?: FetchFunction
generateId?: () => string
/**
* File ID prefixes used to identify file IDs in Responses API.
* When undefined, all file data is treated as base64 content.
*
* Examples:
* - OpenAI: ['file-'] for IDs like 'file-abc123'
* - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123'
*/
fileIdPrefixes?: readonly string[]
}

View File

@@ -0,0 +1,22 @@
import { z } from "zod/v4"
import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils"
export const openaiErrorDataSchema = z.object({
error: z.object({
message: z.string(),
// The additional information below is handled loosely to support
// OpenAI-compatible providers that have slightly different error
// responses:
type: z.string().nullish(),
param: z.any().nullish(),
code: z.union([z.string(), z.number()]).nullish(),
}),
})
export type OpenAIErrorData = z.infer<typeof openaiErrorDataSchema>
export const openaiFailedResponseHandler: any = createJsonErrorResponseHandler({
errorSchema: openaiErrorDataSchema,
errorToMessage: (data) => data.error.message,
})

View File

@@ -0,0 +1,214 @@
import type { JSONSchema7 } from "@ai-sdk/provider"
export type OpenAIResponsesInput = Array<OpenAIResponsesInputItem>
export type OpenAIResponsesInputItem =
| OpenAIResponsesSystemMessage
| OpenAIResponsesUserMessage
| OpenAIResponsesAssistantMessage
| OpenAIResponsesFunctionCall
| OpenAIResponsesFunctionCallOutput
| OpenAIResponsesComputerCall
| OpenAIResponsesLocalShellCall
| OpenAIResponsesLocalShellCallOutput
| OpenAIResponsesReasoning
| OpenAIResponsesItemReference
| OpenAIResponsesMcpApprovalResponse
export type OpenAIResponsesIncludeValue =
| "web_search_call.action.sources"
| "code_interpreter_call.outputs"
| "computer_call_output.output.image_url"
| "file_search_call.results"
| "message.input_image.image_url"
| "message.output_text.logprobs"
| "reasoning.encrypted_content"
export type OpenAIResponsesIncludeOptions = Array<OpenAIResponsesIncludeValue> | undefined | null
export type OpenAIResponsesSystemMessage = {
role: "system" | "developer"
content: string
}
export type OpenAIResponsesUserMessage = {
role: "user"
content: Array<
| { type: "input_text"; text: string }
| { type: "input_image"; image_url: string }
| { type: "input_image"; file_id: string }
| { type: "input_file"; file_url: string }
| { type: "input_file"; filename: string; file_data: string }
| { type: "input_file"; file_id: string }
>
}
export type OpenAIResponsesAssistantMessage = {
role: "assistant"
content: Array<{ type: "output_text"; text: string }>
id?: string
}
export type OpenAIResponsesFunctionCall = {
type: "function_call"
call_id: string
name: string
arguments: string
id?: string
}
export type OpenAIResponsesFunctionCallOutput = {
type: "function_call_output"
call_id: string
output: string
}
export type OpenAIResponsesComputerCall = {
type: "computer_call"
id: string
status?: string
}
export type OpenAIResponsesLocalShellCall = {
type: "local_shell_call"
id: string
call_id: string
action: {
type: "exec"
command: string[]
timeout_ms?: number
user?: string
working_directory?: string
env?: Record<string, string>
}
}
export type OpenAIResponsesLocalShellCallOutput = {
type: "local_shell_call_output"
call_id: string
output: string
}
export type OpenAIResponsesItemReference = {
type: "item_reference"
id: string
}
export type OpenAIResponsesMcpApprovalResponse = {
type: "mcp_approval_response"
approval_request_id: string
approve: boolean
}
/**
* A filter used to compare a specified attribute key to a given value using a defined comparison operation.
*/
export type OpenAIResponsesFileSearchToolComparisonFilter = {
/**
* The key to compare against the value.
*/
key: string
/**
* Specifies the comparison operator: eq, ne, gt, gte, lt, lte.
*/
type: "eq" | "ne" | "gt" | "gte" | "lt" | "lte"
/**
* The value to compare against the attribute key; supports string, number, or boolean types.
*/
value: string | number | boolean
}
/**
* Combine multiple filters using and or or.
*/
export type OpenAIResponsesFileSearchToolCompoundFilter = {
/**
* Type of operation: and or or.
*/
type: "and" | "or"
/**
* Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.
*/
filters: Array<OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter>
}
export type OpenAIResponsesTool =
| {
type: "function"
name: string
description: string | undefined
parameters: JSONSchema7
strict: boolean | undefined
}
| {
type: "web_search"
filters: { allowed_domains: string[] | undefined } | undefined
search_context_size: "low" | "medium" | "high" | undefined
user_location:
| {
type: "approximate"
city?: string
country?: string
region?: string
timezone?: string
}
| undefined
}
| {
type: "web_search_preview"
search_context_size: "low" | "medium" | "high" | undefined
user_location:
| {
type: "approximate"
city?: string
country?: string
region?: string
timezone?: string
}
| undefined
}
| {
type: "code_interpreter"
container: string | { type: "auto"; file_ids: string[] | undefined }
}
| {
type: "file_search"
vector_store_ids: string[]
max_num_results: number | undefined
ranking_options: { ranker?: string; score_threshold?: number } | undefined
filters: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter | undefined
}
| {
type: "image_generation"
background: "auto" | "opaque" | "transparent" | undefined
input_fidelity: "low" | "high" | undefined
input_image_mask:
| {
file_id: string | undefined
image_url: string | undefined
}
| undefined
model: string | undefined
moderation: "auto" | undefined
output_compression: number | undefined
output_format: "png" | "jpeg" | "webp" | undefined
partial_images: number | undefined
quality: "auto" | "low" | "medium" | "high" | undefined
size: "auto" | "1024x1024" | "1024x1536" | "1536x1024" | undefined
}
| {
type: "local_shell"
}
export type OpenAIResponsesReasoning = {
type: "reasoning"
id: string
encrypted_content?: string | null
summary: Array<{
type: "summary_text"
text: string
}>
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,173 @@
import { type LanguageModelV3CallOptions, type SharedV3Warning, UnsupportedFunctionalityError } from "@ai-sdk/provider"
import { codeInterpreterArgsSchema } from "./tool/code-interpreter"
import { fileSearchArgsSchema } from "./tool/file-search"
import { webSearchArgsSchema } from "./tool/web-search"
import { webSearchPreviewArgsSchema } from "./tool/web-search-preview"
import { imageGenerationArgsSchema } from "./tool/image-generation"
import type { OpenAIResponsesTool } from "./openai-responses-api-types"
export function prepareResponsesTools({
tools,
toolChoice,
strictJsonSchema,
}: {
tools: LanguageModelV3CallOptions["tools"]
toolChoice?: LanguageModelV3CallOptions["toolChoice"]
strictJsonSchema: boolean
}): {
tools?: Array<OpenAIResponsesTool>
toolChoice?:
| "auto"
| "none"
| "required"
| { type: "file_search" }
| { type: "web_search_preview" }
| { type: "web_search" }
| { type: "function"; name: string }
| { type: "code_interpreter" }
| { type: "image_generation" }
toolWarnings: SharedV3Warning[]
} {
// when the tools array is empty, change it to undefined to prevent errors:
tools = tools?.length ? tools : undefined
const toolWarnings: SharedV3Warning[] = []
if (tools == null) {
return { tools: undefined, toolChoice: undefined, toolWarnings }
}
const openaiTools: Array<OpenAIResponsesTool> = []
for (const tool of tools) {
switch (tool.type) {
case "function":
openaiTools.push({
type: "function",
name: tool.name,
description: tool.description,
parameters: tool.inputSchema,
strict: strictJsonSchema,
})
break
case "provider": {
switch (tool.id) {
case "openai.file_search": {
const args = fileSearchArgsSchema.parse(tool.args)
openaiTools.push({
type: "file_search",
vector_store_ids: args.vectorStoreIds,
max_num_results: args.maxNumResults,
ranking_options: args.ranking
? {
ranker: args.ranking.ranker,
score_threshold: args.ranking.scoreThreshold,
}
: undefined,
filters: args.filters,
})
break
}
case "openai.local_shell": {
openaiTools.push({
type: "local_shell",
})
break
}
case "openai.web_search_preview": {
const args = webSearchPreviewArgsSchema.parse(tool.args)
openaiTools.push({
type: "web_search_preview",
search_context_size: args.searchContextSize,
user_location: args.userLocation,
})
break
}
case "openai.web_search": {
const args = webSearchArgsSchema.parse(tool.args)
openaiTools.push({
type: "web_search",
filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : undefined,
search_context_size: args.searchContextSize,
user_location: args.userLocation,
})
break
}
case "openai.code_interpreter": {
const args = codeInterpreterArgsSchema.parse(tool.args)
openaiTools.push({
type: "code_interpreter",
container:
args.container == null
? { type: "auto", file_ids: undefined }
: typeof args.container === "string"
? args.container
: { type: "auto", file_ids: args.container.fileIds },
})
break
}
case "openai.image_generation": {
const args = imageGenerationArgsSchema.parse(tool.args)
openaiTools.push({
type: "image_generation",
background: args.background,
input_fidelity: args.inputFidelity,
input_image_mask: args.inputImageMask
? {
file_id: args.inputImageMask.fileId,
image_url: args.inputImageMask.imageUrl,
}
: undefined,
model: args.model,
moderation: args.moderation,
partial_images: args.partialImages,
quality: args.quality,
output_compression: args.outputCompression,
output_format: args.outputFormat,
size: args.size,
})
break
}
}
break
}
default:
toolWarnings.push({ type: "unsupported", feature: "tool type" })
break
}
}
if (toolChoice == null) {
return { tools: openaiTools, toolChoice: undefined, toolWarnings }
}
const type = toolChoice.type
switch (type) {
case "auto":
case "none":
case "required":
return { tools: openaiTools, toolChoice: type, toolWarnings }
case "tool":
return {
tools: openaiTools,
toolChoice:
toolChoice.toolName === "code_interpreter" ||
toolChoice.toolName === "file_search" ||
toolChoice.toolName === "image_generation" ||
toolChoice.toolName === "web_search_preview" ||
toolChoice.toolName === "web_search"
? { type: toolChoice.toolName }
: { type: "function", name: toolChoice.toolName },
toolWarnings,
}
default: {
const _exhaustiveCheck: never = type
throw new UnsupportedFunctionalityError({
functionality: `tool choice type: ${_exhaustiveCheck}`,
})
}
}
}

View File

@@ -0,0 +1 @@
export type OpenAIResponsesModelId = string

View File

@@ -0,0 +1,87 @@
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
export const codeInterpreterInputSchema = z.object({
code: z.string().nullish(),
containerId: z.string(),
})
export const codeInterpreterOutputSchema = z.object({
outputs: z
.array(
z.discriminatedUnion("type", [
z.object({ type: z.literal("logs"), logs: z.string() }),
z.object({ type: z.literal("image"), url: z.string() }),
]),
)
.nullish(),
})
export const codeInterpreterArgsSchema = z.object({
container: z
.union([
z.string(),
z.object({
fileIds: z.array(z.string()).optional(),
}),
])
.optional(),
})
type CodeInterpreterArgs = {
/**
* The code interpreter container.
* Can be a container ID
* or an object that specifies uploaded file IDs to make available to your code.
*/
container?: string | { fileIds?: string[] }
}
export const codeInterpreterToolFactory = createProviderToolFactoryWithOutputSchema<
{
/**
* The code to run, or null if not available.
*/
code?: string | null
/**
* The ID of the container used to run the code.
*/
containerId: string
},
{
/**
* The outputs generated by the code interpreter, such as logs or images.
* Can be null if no outputs are available.
*/
outputs?: Array<
| {
type: "logs"
/**
* The logs output from the code interpreter.
*/
logs: string
}
| {
type: "image"
/**
* The URL of the image output from the code interpreter.
*/
url: string
}
> | null
},
CodeInterpreterArgs
>({
id: "openai.code_interpreter",
inputSchema: codeInterpreterInputSchema,
outputSchema: codeInterpreterOutputSchema,
})
export const codeInterpreter = (
args: CodeInterpreterArgs = {}, // default
) => {
return codeInterpreterToolFactory(args)
}

View File

@@ -0,0 +1,127 @@
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import type {
OpenAIResponsesFileSearchToolComparisonFilter,
OpenAIResponsesFileSearchToolCompoundFilter,
} from "../openai-responses-api-types"
import { z } from "zod/v4"
const comparisonFilterSchema = z.object({
key: z.string(),
type: z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
value: z.union([z.string(), z.number(), z.boolean()]),
})
const compoundFilterSchema: z.ZodType<any> = z.object({
type: z.enum(["and", "or"]),
filters: z.array(z.union([comparisonFilterSchema, z.lazy(() => compoundFilterSchema)])),
})
export const fileSearchArgsSchema = z.object({
vectorStoreIds: z.array(z.string()),
maxNumResults: z.number().optional(),
ranking: z
.object({
ranker: z.string().optional(),
scoreThreshold: z.number().optional(),
})
.optional(),
filters: z.union([comparisonFilterSchema, compoundFilterSchema]).optional(),
})
export const fileSearchOutputSchema = z.object({
queries: z.array(z.string()),
results: z
.array(
z.object({
attributes: z.record(z.string(), z.unknown()),
fileId: z.string(),
filename: z.string(),
score: z.number(),
text: z.string(),
}),
)
.nullable(),
})
export const fileSearch = createProviderToolFactoryWithOutputSchema<
{},
{
/**
* The search query to execute.
*/
queries: string[]
/**
* The results of the file search tool call.
*/
results:
| null
| {
/**
* Set of 16 key-value pairs that can be attached to an object.
* This can be useful for storing additional information about the object
* in a structured format, and querying for objects via API or the dashboard.
* Keys are strings with a maximum length of 64 characters.
* Values are strings with a maximum length of 512 characters, booleans, or numbers.
*/
attributes: Record<string, unknown>
/**
* The unique ID of the file.
*/
fileId: string
/**
* The name of the file.
*/
filename: string
/**
* The relevance score of the file - a value between 0 and 1.
*/
score: number
/**
* The text that was retrieved from the file.
*/
text: string
}[]
},
{
/**
* List of vector store IDs to search through.
*/
vectorStoreIds: string[]
/**
* Maximum number of search results to return. Defaults to 10.
*/
maxNumResults?: number
/**
* Ranking options for the search.
*/
ranking?: {
/**
* The ranker to use for the file search.
*/
ranker?: string
/**
* The score threshold for the file search, a number between 0 and 1.
* Numbers closer to 1 will attempt to return only the most relevant results,
* but may return fewer results.
*/
scoreThreshold?: number
}
/**
* A filter to apply.
*/
filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter
}
>({
id: "openai.file_search",
inputSchema: z.object({}),
outputSchema: fileSearchOutputSchema,
})

View File

@@ -0,0 +1,114 @@
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
export const imageGenerationArgsSchema = z
.object({
background: z.enum(["auto", "opaque", "transparent"]).optional(),
inputFidelity: z.enum(["low", "high"]).optional(),
inputImageMask: z
.object({
fileId: z.string().optional(),
imageUrl: z.string().optional(),
})
.optional(),
model: z.string().optional(),
moderation: z.enum(["auto"]).optional(),
outputCompression: z.number().int().min(0).max(100).optional(),
outputFormat: z.enum(["png", "jpeg", "webp"]).optional(),
partialImages: z.number().int().min(0).max(3).optional(),
quality: z.enum(["auto", "low", "medium", "high"]).optional(),
size: z.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional(),
})
.strict()
export const imageGenerationOutputSchema = z.object({
result: z.string(),
})
type ImageGenerationArgs = {
/**
* Background type for the generated image. Default is 'auto'.
*/
background?: "auto" | "opaque" | "transparent"
/**
* Input fidelity for the generated image. Default is 'low'.
*/
inputFidelity?: "low" | "high"
/**
* Optional mask for inpainting.
* Contains image_url (string, optional) and file_id (string, optional).
*/
inputImageMask?: {
/**
* File ID for the mask image.
*/
fileId?: string
/**
* Base64-encoded mask image.
*/
imageUrl?: string
}
/**
* The image generation model to use. Default: gpt-image-1.
*/
model?: string
/**
* Moderation level for the generated image. Default: auto.
*/
moderation?: "auto"
/**
* Compression level for the output image. Default: 100.
*/
outputCompression?: number
/**
* The output format of the generated image. One of png, webp, or jpeg.
* Default: png
*/
outputFormat?: "png" | "jpeg" | "webp"
/**
* Number of partial images to generate in streaming mode, from 0 (default value) to 3.
*/
partialImages?: number
/**
* The quality of the generated image.
* One of low, medium, high, or auto. Default: auto.
*/
quality?: "auto" | "low" | "medium" | "high"
/**
* The size of the generated image.
* One of 1024x1024, 1024x1536, 1536x1024, or auto.
* Default: auto.
*/
size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024"
}
const imageGenerationToolFactory = createProviderToolFactoryWithOutputSchema<
{},
{
/**
* The generated image encoded in base64.
*/
result: string
},
ImageGenerationArgs
>({
id: "openai.image_generation",
inputSchema: z.object({}),
outputSchema: imageGenerationOutputSchema,
})
export const imageGeneration = (
args: ImageGenerationArgs = {}, // default
) => {
return imageGenerationToolFactory(args)
}

View File

@@ -0,0 +1,64 @@
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
export const localShellInputSchema = z.object({
action: z.object({
type: z.literal("exec"),
command: z.array(z.string()),
timeoutMs: z.number().optional(),
user: z.string().optional(),
workingDirectory: z.string().optional(),
env: z.record(z.string(), z.string()).optional(),
}),
})
export const localShellOutputSchema = z.object({
output: z.string(),
})
export const localShell = createProviderToolFactoryWithOutputSchema<
{
/**
* Execute a shell command on the server.
*/
action: {
type: "exec"
/**
* The command to run.
*/
command: string[]
/**
* Optional timeout in milliseconds for the command.
*/
timeoutMs?: number
/**
* Optional user to run the command as.
*/
user?: string
/**
* Optional working directory to run the command in.
*/
workingDirectory?: string
/**
* Environment variables to set for the command.
*/
env?: Record<string, string>
}
},
{
/**
* The output of local shell tool call.
*/
output: string
},
{}
>({
id: "openai.local_shell",
inputSchema: localShellInputSchema,
outputSchema: localShellOutputSchema,
})

View File

@@ -0,0 +1,103 @@
import { createProviderToolFactory } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
// Args validation schema
export const webSearchPreviewArgsSchema = z.object({
/**
* Search context size to use for the web search.
* - high: Most comprehensive context, highest cost, slower response
* - medium: Balanced context, cost, and latency (default)
* - low: Least context, lowest cost, fastest response
*/
searchContextSize: z.enum(["low", "medium", "high"]).optional(),
/**
* User location information to provide geographically relevant search results.
*/
userLocation: z
.object({
/**
* Type of location (always 'approximate')
*/
type: z.literal("approximate"),
/**
* Two-letter ISO country code (e.g., 'US', 'GB')
*/
country: z.string().optional(),
/**
* City name (free text, e.g., 'Minneapolis')
*/
city: z.string().optional(),
/**
* Region name (free text, e.g., 'Minnesota')
*/
region: z.string().optional(),
/**
* IANA timezone (e.g., 'America/Chicago')
*/
timezone: z.string().optional(),
})
.optional(),
})
export const webSearchPreview = createProviderToolFactory<
{
// Web search doesn't take input parameters - it's controlled by the prompt
},
{
/**
* Search context size to use for the web search.
* - high: Most comprehensive context, highest cost, slower response
* - medium: Balanced context, cost, and latency (default)
* - low: Least context, lowest cost, fastest response
*/
searchContextSize?: "low" | "medium" | "high"
/**
* User location information to provide geographically relevant search results.
*/
userLocation?: {
/**
* Type of location (always 'approximate')
*/
type: "approximate"
/**
* Two-letter ISO country code (e.g., 'US', 'GB')
*/
country?: string
/**
* City name (free text, e.g., 'Minneapolis')
*/
city?: string
/**
* Region name (free text, e.g., 'Minnesota')
*/
region?: string
/**
* IANA timezone (e.g., 'America/Chicago')
*/
timezone?: string
}
}
>({
id: "openai.web_search_preview",
inputSchema: z.object({
action: z
.discriminatedUnion("type", [
z.object({
type: z.literal("search"),
query: z.string().nullish(),
}),
z.object({
type: z.literal("open_page"),
url: z.string(),
}),
z.object({
type: z.literal("find"),
url: z.string(),
pattern: z.string(),
}),
])
.nullish(),
}),
})

View File

@@ -0,0 +1,102 @@
import { createProviderToolFactory } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
export const webSearchArgsSchema = z.object({
filters: z
.object({
allowedDomains: z.array(z.string()).optional(),
})
.optional(),
searchContextSize: z.enum(["low", "medium", "high"]).optional(),
userLocation: z
.object({
type: z.literal("approximate"),
country: z.string().optional(),
city: z.string().optional(),
region: z.string().optional(),
timezone: z.string().optional(),
})
.optional(),
})
export const webSearchToolFactory = createProviderToolFactory<
{
// Web search doesn't take input parameters - it's controlled by the prompt
},
{
/**
* Filters for the search.
*/
filters?: {
/**
* Allowed domains for the search.
* If not provided, all domains are allowed.
* Subdomains of the provided domains are allowed as well.
*/
allowedDomains?: string[]
}
/**
* Search context size to use for the web search.
* - high: Most comprehensive context, highest cost, slower response
* - medium: Balanced context, cost, and latency (default)
* - low: Least context, lowest cost, fastest response
*/
searchContextSize?: "low" | "medium" | "high"
/**
* User location information to provide geographically relevant search results.
*/
userLocation?: {
/**
* Type of location (always 'approximate')
*/
type: "approximate"
/**
* Two-letter ISO country code (e.g., 'US', 'GB')
*/
country?: string
/**
* City name (free text, e.g., 'Minneapolis')
*/
city?: string
/**
* Region name (free text, e.g., 'Minnesota')
*/
region?: string
/**
* IANA timezone (e.g., 'America/Chicago')
*/
timezone?: string
}
}
>({
id: "openai.web_search",
inputSchema: z.object({
action: z
.discriminatedUnion("type", [
z.object({
type: z.literal("search"),
query: z.string().nullish(),
}),
z.object({
type: z.literal("open_page"),
url: z.string(),
}),
z.object({
type: z.literal("find"),
url: z.string(),
pattern: z.string(),
}),
])
.nullish(),
}),
})
export const webSearch = (
args: Parameters<typeof webSearchToolFactory>[0] = {}, // default
) => {
return webSearchToolFactory(args)
}

116
packages/core/src/model.ts Normal file
View File

@@ -0,0 +1,116 @@
import { DateTime, Schema } from "effect"
import { DateTimeUtcFromMillis } from "effect/Schema"
import { ProviderV2 } from "./provider"
export const ID = Schema.String.pipe(Schema.brand("ModelV2.ID"))
export type ID = typeof ID.Type
export const VariantID = Schema.String.pipe(Schema.brand("VariantID"))
export type VariantID = typeof VariantID.Type
// Grouping of models, eg claude opus, claude sonnet
export const Family = Schema.String.pipe(Schema.brand("Family"))
export type Family = typeof Family.Type
export const Capabilities = Schema.Struct({
tools: Schema.Boolean,
// mime patterns, image, audio, video/*, text/*
input: Schema.String.pipe(Schema.Array),
output: Schema.String.pipe(Schema.Array),
})
export type Capabilities = typeof Capabilities.Type
export const Cost = Schema.Struct({
tier: Schema.Struct({
type: Schema.Literal("context"),
size: Schema.Int,
}).pipe(Schema.optional),
input: Schema.Finite,
output: Schema.Finite,
cache: Schema.Struct({
read: Schema.Finite,
write: Schema.Finite,
}),
})
export const Ref = Schema.Struct({
id: ID,
providerID: ProviderV2.ID,
variant: VariantID,
})
export type Ref = typeof Ref.Type
export class Info extends Schema.Class<Info>("ModelV2.Info")({
id: ID,
apiID: ID,
providerID: ProviderV2.ID,
family: Family.pipe(Schema.optional),
name: Schema.String,
endpoint: ProviderV2.Endpoint,
capabilities: Capabilities,
options: Schema.Struct({
...ProviderV2.Options.fields,
variant: Schema.String.pipe(Schema.optional),
}),
variants: Schema.Struct({
id: VariantID,
...ProviderV2.Options.fields,
}).pipe(Schema.Array),
time: Schema.Struct({
released: DateTimeUtcFromMillis,
}),
cost: Cost.pipe(Schema.Array),
status: Schema.Literals(["alpha", "beta", "deprecated", "active"]),
enabled: Schema.Boolean,
limit: Schema.Struct({
context: Schema.Int,
input: Schema.Int.pipe(Schema.optional),
output: Schema.Int,
}),
}) {
static empty(providerID: ProviderV2.ID, modelID: ID) {
return new Info({
id: modelID,
apiID: modelID,
providerID,
name: modelID,
endpoint: {
type: "unknown",
},
capabilities: {
tools: false,
input: [],
output: [],
},
options: {
headers: {},
body: {},
aisdk: {
provider: {},
request: {},
},
},
variants: [],
time: {
released: DateTime.makeUnsafe(0),
},
cost: [],
status: "active",
enabled: true,
limit: {
context: 0,
output: 0,
},
})
}
}
export function parse(input: string): { providerID: ProviderV2.ID; modelID: ID } {
const [providerID, ...modelID] = input.split("/")
return {
providerID: ProviderV2.ID.make(providerID),
modelID: ID.make(modelID.join("/")),
}
}
export * as ModelV2 from "./model"

146
packages/core/src/plugin.ts Normal file
View File

@@ -0,0 +1,146 @@
export * as PluginV2 from "./plugin"
import { createDraft, finishDraft, type Draft } from "immer"
import type { LanguageModelV3 } from "@ai-sdk/provider"
import { type ProviderV2 } from "./provider"
import { Context, Effect, Layer, Schema } from "effect"
import type { ModelV2 } from "./model"
export const ID = Schema.String.pipe(Schema.brand("Plugin.ID"))
export type ID = typeof ID.Type
type HookSpec = {
"provider.update": {
input: {}
output: {
provider: ProviderV2.Info
cancel: boolean
}
}
"model.update": {
input: {}
output: {
model: ModelV2.Info
cancel: boolean
}
}
"aisdk.language": {
input: {
model: ModelV2.Info
sdk: any
options: Record<string, any>
}
output: {
language?: LanguageModelV3
}
}
"aisdk.sdk": {
input: {
model: ModelV2.Info
package: string
options: Record<string, any>
}
output: {
sdk?: any
}
}
}
export type Hooks = {
[Name in keyof HookSpec]: Readonly<HookSpec[Name]["input"]> & {
-readonly [Field in keyof HookSpec[Name]["output"]]: HookSpec[Name]["output"][Field] extends object
? Draft<HookSpec[Name]["output"][Field]>
: HookSpec[Name]["output"][Field]
}
}
export type HookFunctions = {
[key in keyof Hooks]?: (input: Hooks[key]) => Effect.Effect<void>
}
export type HookInput<Name extends keyof Hooks> = HookSpec[Name]["input"]
export type HookOutput<Name extends keyof Hooks> = HookSpec[Name]["output"]
export type Effect = Effect.Effect<HookFunctions | void, never, never>
export function define<R>(input: { id: ID; effect: Effect.Effect<HookFunctions | void, never, R> }) {
return input
}
export interface Interface {
readonly add: (input: { id: ID; effect: Effect }) => Effect.Effect<void>
readonly remove: (id: ID) => Effect.Effect<void>
readonly trigger: <Name extends keyof Hooks>(
name: Name,
input: HookInput<Name>,
output: HookOutput<Name>,
) => Effect.Effect<HookInput<Name> & HookOutput<Name>>
}
export class Service extends Context.Service<Service, Interface>()("@opencode/v2/Plugin") {}
export const layer = Layer.effect(
Service,
Effect.gen(function* () {
let hooks: {
id: ID
hooks: HookFunctions
}[] = []
const svc = Service.of({
add: Effect.fn("Plugin.add")(function* (input) {
const result = yield* input.effect
if (!result) return
hooks = [
...hooks.filter((item) => item.id !== input.id),
{
id: input.id,
hooks: result,
},
]
}),
trigger: Effect.fn("Plugin.trigger")(function* (name, input, output) {
const draftEntries = new Map<string, ReturnType<typeof createDraft>>()
const event = {
...input,
...output,
} as Record<string, unknown>
for (const [field, value] of Object.entries(output)) {
if (value && typeof value === "object") {
draftEntries.set(field, createDraft(value))
event[field] = draftEntries.get(field)
}
}
for (const item of hooks) {
const match = item.hooks[name]
if (!match) continue
yield* match(event as any).pipe(
Effect.withSpan(`Plugin.hook.${name}`, {
attributes: {
plugin: item.id,
hook: name,
},
}),
)
}
for (const [field, draft] of draftEntries) {
event[field] = finishDraft(draft)
}
return event as any
}),
remove: Effect.fn("Plugin.remove")(function* (id) {
hooks = hooks.filter((item) => item.id !== id)
}),
})
return svc
}),
)
export const defaultLayer = layer
// opencode
// sdcok

View File

@@ -0,0 +1,27 @@
import { Effect } from "effect"
import { AuthV2 } from "../auth"
import { PluginV2 } from "../plugin"
export const AuthPlugin = PluginV2.define({
id: PluginV2.ID.make("auth"),
effect: Effect.gen(function* () {
const auth = yield* AuthV2.Service
return {
"provider.update": Effect.fn(function* (evt) {
const account = yield* auth.active(AuthV2.ServiceID.make(evt.provider.id)).pipe(Effect.orDie)
if (!account) return
evt.provider.enabled = {
via: "auth",
service: account.serviceID,
}
if (account.credential.type === "api") {
evt.provider.options.aisdk.provider.apiKey = account.credential.key
Object.assign(evt.provider.options.aisdk.provider, account.credential.metadata ?? {})
}
if (account.credential.type === "oauth") {
evt.provider.options.aisdk.provider.apiKey = account.credential.access
}
}),
}
}),
})

View File

@@ -0,0 +1,18 @@
import { Effect } from "effect"
import { PluginV2 } from "../plugin"
export const EnvPlugin = PluginV2.define({
id: PluginV2.ID.make("env"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
const key = evt.provider.env.find((item) => process.env[item])
if (!key) return
evt.provider.enabled = {
via: "env",
name: key,
}
}),
}
}),
})

View File

@@ -0,0 +1 @@
export { ProviderPlugins } from "./provider/index"

View File

@@ -0,0 +1,15 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
export const AlibabaPlugin = PluginV2.define({
id: PluginV2.ID.make("alibaba"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/alibaba") return
const mod = yield* Effect.promise(() => import("@ai-sdk/alibaba"))
evt.sdk = mod.createAlibaba(evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,94 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
// Bedrock cross-region inference profiles require regional prefixes only for
// specific model/region combinations. Keep the mapping narrow and avoid
// double-prefixing model IDs that models.dev already marks as global/us/eu/etc.
function resolveModelID(modelID: string, region: string | undefined) {
const crossRegionPrefixes = ["global.", "us.", "eu.", "jp.", "apac.", "au."]
if (crossRegionPrefixes.some((prefix) => modelID.startsWith(prefix))) return modelID
const resolvedRegion = region ?? "us-east-1"
const regionPrefix = resolvedRegion.split("-")[0]
if (regionPrefix === "us") {
const requiresPrefix = ["nova-micro", "nova-lite", "nova-pro", "nova-premier", "nova-2", "claude", "deepseek"].some(
(item) => modelID.includes(item),
)
if (requiresPrefix && !resolvedRegion.startsWith("us-gov")) return `${regionPrefix}.${modelID}`
return modelID
}
if (regionPrefix === "eu") {
const regionRequiresPrefix = [
"eu-west-1",
"eu-west-2",
"eu-west-3",
"eu-north-1",
"eu-central-1",
"eu-south-1",
"eu-south-2",
].some((item) => resolvedRegion.includes(item))
const modelRequiresPrefix = ["claude", "nova-lite", "nova-micro", "llama3", "pixtral"].some((item) =>
modelID.includes(item),
)
return regionRequiresPrefix && modelRequiresPrefix ? `${regionPrefix}.${modelID}` : modelID
}
if (regionPrefix !== "ap") return modelID
const australia = ["ap-southeast-2", "ap-southeast-4"].includes(resolvedRegion)
if (australia && ["anthropic.claude-sonnet-4-5", "anthropic.claude-haiku"].some((item) => modelID.includes(item))) {
return `au.${modelID}`
}
const prefix = resolvedRegion === "ap-northeast-1" ? "jp" : "apac"
return ["claude", "nova-lite", "nova-micro", "nova-pro"].some((item) => modelID.includes(item))
? `${prefix}.${modelID}`
: modelID
}
export const AmazonBedrockPlugin = PluginV2.define({
id: PluginV2.ID.make("amazon-bedrock"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.amazonBedrock) return
if (evt.provider.endpoint.type !== "aisdk") return
if (typeof evt.provider.options.aisdk.provider.endpoint !== "string") return
// The AI SDK expects a base URL, but users configure Bedrock private/VPC
// endpoints as `endpoint`; move it into the catalog endpoint URL once.
evt.provider.endpoint.url = evt.provider.options.aisdk.provider.endpoint
delete evt.provider.options.aisdk.provider.endpoint
}),
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/amazon-bedrock") return
const options = { ...evt.options }
const profile = typeof options.profile === "string" ? options.profile : process.env.AWS_PROFILE
const region = typeof options.region === "string" ? options.region : (process.env.AWS_REGION ?? "us-east-1")
const bearerToken =
process.env.AWS_BEARER_TOKEN_BEDROCK ??
(typeof options.bearerToken === "string" ? options.bearerToken : undefined)
if (bearerToken && !process.env.AWS_BEARER_TOKEN_BEDROCK) process.env.AWS_BEARER_TOKEN_BEDROCK = bearerToken
const containerCreds = Boolean(
process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI || process.env.AWS_CONTAINER_CREDENTIALS_FULL_URI,
)
options.region = region
if (typeof options.endpoint === "string") options.baseURL = options.endpoint
if (!bearerToken && options.credentialProvider === undefined) {
// Do not gate SDK creation on explicit AWS env vars. The default chain
// also handles ~/.aws/credentials, SSO, process creds, and instance roles.
const { fromNodeProviderChain } = yield* Effect.promise(() => import("@aws-sdk/credential-providers"))
options.credentialProvider = fromNodeProviderChain(profile ? { profile } : {})
}
const mod = yield* Effect.promise(() => import("@ai-sdk/amazon-bedrock"))
evt.sdk = mod.createAmazonBedrock(options)
}),
"aisdk.language": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.amazonBedrock) return
const region = typeof evt.options.region === "string" ? evt.options.region : process.env.AWS_REGION
evt.language = evt.sdk.languageModel(resolveModelID(evt.model.apiID, region))
}),
}
}),
})

View File

@@ -0,0 +1,21 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
export const AnthropicPlugin = PluginV2.define({
id: PluginV2.ID.make("anthropic"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.anthropic) return
evt.provider.options.headers["anthropic-beta"] =
"interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14"
}),
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/anthropic") return
const mod = yield* Effect.promise(() => import("@ai-sdk/anthropic"))
evt.sdk = mod.createAnthropic(evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,67 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
function selectLanguage(sdk: any, modelID: string, useChat: boolean) {
if (useChat && sdk.chat) return sdk.chat(modelID)
if (sdk.responses) return sdk.responses(modelID)
if (sdk.messages) return sdk.messages(modelID)
if (sdk.chat) return sdk.chat(modelID)
return sdk.languageModel(modelID)
}
export const AzurePlugin = PluginV2.define({
id: PluginV2.ID.make("azure"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.azure) return
const configured = evt.provider.options.aisdk.provider.resourceName
const resourceName =
typeof configured === "string" && configured.trim() !== "" ? configured : process.env.AZURE_RESOURCE_NAME
if (resourceName) evt.provider.options.aisdk.provider.resourceName = resourceName
}),
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/azure") return
if (evt.model.providerID === ProviderV2.ID.azure) {
if (!evt.options.resourceName && !evt.options.baseURL && (evt.model.endpoint.type !== "aisdk" || !evt.model.endpoint.url)) {
throw new Error(
"AZURE_RESOURCE_NAME is missing, set it using env var or reconnecting the azure provider and setting it",
)
}
}
const mod = yield* Effect.promise(() => import("@ai-sdk/azure"))
evt.sdk = mod.createAzure(evt.options)
}),
"aisdk.language": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.azure) return
evt.language = selectLanguage(
evt.sdk,
evt.model.apiID,
Boolean(evt.options.useCompletionUrls),
)
}),
}
}),
})
export const AzureCognitiveServicesPlugin = PluginV2.define({
id: PluginV2.ID.make("azure-cognitive-services"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.make("azure-cognitive-services")) return
const resourceName = process.env.AZURE_COGNITIVE_SERVICES_RESOURCE_NAME
if (resourceName) evt.provider.options.aisdk.provider.baseURL = `https://${resourceName}.cognitiveservices.azure.com/openai`
}),
"aisdk.language": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.make("azure-cognitive-services")) return
evt.language = selectLanguage(
evt.sdk,
evt.model.apiID,
Boolean(evt.options.useCompletionUrls),
)
}),
}
}),
})

View File

@@ -0,0 +1,20 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
export const CerebrasPlugin = PluginV2.define({
id: PluginV2.ID.make("cerebras"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.make("cerebras")) return
evt.provider.options.headers["X-Cerebras-3rd-Party-Integration"] = "opencode"
}),
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/cerebras") return
const mod = yield* Effect.promise(() => import("@ai-sdk/cerebras"))
evt.sdk = mod.createCerebras(evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,81 @@
import os from "os"
import { InstallationVersion } from "../../installation/version"
import { Effect, Option, Schema } from "effect"
import { PluginV2 } from "../../plugin"
export const CloudflareAIGatewayPlugin = PluginV2.define({
id: PluginV2.ID.make("cloudflare-ai-gateway"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "ai-gateway-provider") return
if (evt.options.baseURL) return
const config = gatewayConfig(evt.options)
if (!config) return
const metadata = gatewayMetadata(evt.options)
const { createAiGateway } = yield* Effect.promise(() => import("ai-gateway-provider")).pipe(Effect.orDie)
const { createUnified } = yield* Effect.promise(() => import("ai-gateway-provider/providers/unified")).pipe(
Effect.orDie,
)
const gateway = createAiGateway({
accountId: config.accountId,
gateway: config.gatewayId,
apiKey: config.apiKey,
options: gatewayOptions(evt.options, metadata),
} as any)
const unified = createUnified()
evt.sdk = {
languageModel(modelID: string) {
return gateway(unified(modelID))
},
}
}),
}
}),
})
type GatewayConfig = {
accountId: string
gatewayId: string
apiKey: string
}
const decodeJson = Schema.decodeUnknownOption(Schema.UnknownFromJsonString)
function gatewayConfig(options: Record<string, unknown>): GatewayConfig | undefined {
const accountId = process.env.CLOUDFLARE_ACCOUNT_ID ?? stringOption(options, "accountId")
// AuthPlugin copies CLI prompt metadata into options. The prompt stores the
// gateway as gatewayId, while older config examples may use gateway.
const gatewayId =
process.env.CLOUDFLARE_GATEWAY_ID ?? stringOption(options, "gatewayId") ?? stringOption(options, "gateway")
const apiKey = process.env.CLOUDFLARE_API_TOKEN ?? process.env.CF_AIG_TOKEN ?? stringOption(options, "apiKey")
if (!accountId || !gatewayId || !apiKey) return undefined
return { accountId, gatewayId, apiKey }
}
function gatewayMetadata(options: Record<string, unknown>) {
// Preserve the legacy cf-aig-metadata header escape hatch for gateway logging
// metadata, but prefer the typed metadata option when present.
if (options.metadata !== undefined) return options.metadata
const raw = (options.headers as Record<string, string> | undefined)?.["cf-aig-metadata"]
return raw ? Option.getOrUndefined(decodeJson(raw)) : undefined
}
function gatewayOptions(options: Record<string, unknown>, metadata: unknown) {
return {
metadata,
cacheTtl: options.cacheTtl,
cacheKey: options.cacheKey,
skipCache: options.skipCache,
collectLog: options.collectLog,
headers: {
"User-Agent": `opencode/${InstallationVersion} cloudflare-ai-gateway (${os.platform()} ${os.release()}; ${os.arch()})`,
},
}
}
function stringOption(options: Record<string, unknown>, key: string) {
return typeof options[key] === "string" ? options[key] : undefined
}

View File

@@ -0,0 +1,69 @@
import os from "os"
import { InstallationVersion } from "../../installation/version"
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
const providerID = ProviderV2.ID.make("cloudflare-workers-ai")
export const CloudflareWorkersAIPlugin = PluginV2.define({
id: PluginV2.ID.make("cloudflare-workers-ai"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== providerID) return
if (evt.provider.endpoint.type !== "aisdk") return
if (evt.provider.endpoint.url) return
const accountId = resolveAccountId(evt.provider.options.aisdk.provider)
if (accountId) evt.provider.endpoint.url = workersEndpoint(accountId)
}),
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.model.providerID !== providerID) return
if (evt.package !== "@ai-sdk/openai-compatible") return
if (!hasWorkersEndpoint(evt.model.endpoint)) return
const mod = yield* Effect.promise(() => import("@ai-sdk/openai-compatible"))
evt.sdk = mod.createOpenAICompatible(sdkOptions(evt.options) as any)
}),
"aisdk.language": Effect.fn(function* (evt) {
if (evt.model.providerID !== providerID) return
evt.language = evt.sdk.languageModel(evt.model.apiID)
}),
}
}),
})
function resolveAccountId(options: Record<string, unknown>) {
return process.env.CLOUDFLARE_ACCOUNT_ID ?? stringOption(options, "accountId")
}
function workersEndpoint(accountId: string) {
return `https://api.cloudflare.com/client/v4/accounts/${accountId}/ai/v1`
}
function hasWorkersEndpoint(endpoint: ProviderV2.Endpoint) {
return endpoint.type === "aisdk" && Boolean(endpoint.url)
}
function sdkOptions(options: Record<string, any>) {
return {
...options,
baseURL: expandAccountId(options.baseURL),
apiKey: process.env.CLOUDFLARE_API_KEY ?? options.apiKey,
headers: {
"User-Agent": `opencode/${InstallationVersion} cloudflare-workers-ai (${os.platform()} ${os.release()}; ${os.arch()})`,
...options.headers,
},
name: providerID,
}
}
function expandAccountId(baseURL: unknown) {
if (typeof baseURL !== "string") return baseURL
return baseURL.replaceAll("${CLOUDFLARE_ACCOUNT_ID}", process.env.CLOUDFLARE_ACCOUNT_ID ?? "${CLOUDFLARE_ACCOUNT_ID}")
}
function stringOption(options: Record<string, unknown>, key: string) {
return typeof options[key] === "string" ? options[key] : undefined
}

View File

@@ -0,0 +1,15 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
export const CoherePlugin = PluginV2.define({
id: PluginV2.ID.make("cohere"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/cohere") return
const mod = yield* Effect.promise(() => import("@ai-sdk/cohere"))
evt.sdk = mod.createCohere(evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,15 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
export const DeepInfraPlugin = PluginV2.define({
id: PluginV2.ID.make("deepinfra"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/deepinfra") return
const mod = yield* Effect.promise(() => import("@ai-sdk/deepinfra"))
evt.sdk = mod.createDeepInfra(evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,31 @@
import { Npm } from "../../npm"
import { Effect, Option } from "effect"
import { pathToFileURL } from "url"
import { PluginV2 } from "../../plugin"
export const DynamicProviderPlugin = PluginV2.define({
id: PluginV2.ID.make("dynamic-provider"),
effect: Effect.gen(function* () {
const npm = yield* Npm.Service
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.sdk) return
const installedPath = evt.package.startsWith("file://")
? evt.package
: Option.getOrUndefined((yield* npm.add(evt.package).pipe(Effect.orDie)).entrypoint)
if (!installedPath) throw new Error(`Package ${evt.package} has no import entrypoint`)
const mod = yield* Effect.promise(async () => {
return (await import(
installedPath.startsWith("file://") ? installedPath : pathToFileURL(installedPath).href
)) as Record<string, (options: any) => any>
}).pipe(Effect.orDie)
const match = Object.keys(mod).find((name) => name.startsWith("create"))
if (!match) throw new Error(`Package ${evt.package} has no provider factory export`)
evt.sdk = mod[match](evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,15 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
export const GatewayPlugin = PluginV2.define({
id: PluginV2.ID.make("gateway"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/gateway") return
const mod = yield* Effect.promise(() => import("@ai-sdk/gateway"))
evt.sdk = mod.createGateway(evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,44 @@
import { Effect } from "effect"
import { ModelV2 } from "../../model"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
function shouldUseResponses(modelID: string) {
// Copilot supports Responses for GPT-5 class models, except mini variants
// which still need the chat-completions endpoint.
const match = /^gpt-(\d+)/.exec(modelID)
if (!match) return false
return Number(match[1]) >= 5 && !modelID.startsWith("gpt-5-mini")
}
export const GithubCopilotPlugin = PluginV2.define({
id: PluginV2.ID.make("github-copilot"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.githubCopilot) return
}),
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/github-copilot") return
const mod = yield* Effect.promise(() => import("../../github-copilot/copilot-provider"))
evt.sdk = mod.createOpenaiCompatible(evt.options)
}),
"aisdk.language": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.githubCopilot) return
if (evt.sdk.responses === undefined && evt.sdk.chat === undefined) {
evt.language = evt.sdk.languageModel(evt.model.apiID)
return
}
evt.language = shouldUseResponses(evt.model.apiID)
? evt.sdk.responses(evt.model.apiID)
: evt.sdk.chat(evt.model.apiID)
}),
"model.update": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.githubCopilot) return
// This chat-only alias conflicts with the Copilot GPT-5 Responses route,
// so hide it only for Copilot rather than for every provider catalog.
if (evt.model.id === ModelV2.ID.make("gpt-5-chat-latest")) evt.cancel = true
}),
}
}),
})

View File

@@ -0,0 +1,64 @@
import os from "os"
import { InstallationVersion } from "../../installation/version"
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
export const GitLabPlugin = PluginV2.define({
id: PluginV2.ID.make("gitlab"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "gitlab-ai-provider") return
const mod = yield* Effect.promise(() => import("gitlab-ai-provider"))
evt.sdk = mod.createGitLab({
...evt.options,
instanceUrl:
typeof evt.options.instanceUrl === "string"
? evt.options.instanceUrl
: (process.env.GITLAB_INSTANCE_URL ?? "https://gitlab.com"),
apiKey: typeof evt.options.apiKey === "string" ? evt.options.apiKey : process.env.GITLAB_TOKEN,
aiGatewayHeaders: {
"User-Agent": `opencode/${InstallationVersion} gitlab-ai-provider/${mod.VERSION} (${os.platform()} ${os.release()}; ${os.arch()})`,
"anthropic-beta": "context-1m-2025-08-07",
...evt.options.aiGatewayHeaders,
},
featureFlags: {
duo_agent_platform_agentic_chat: true,
duo_agent_platform: true,
...evt.options.featureFlags,
},
})
}),
"aisdk.language": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.gitlab) return
const featureFlags = typeof evt.options.featureFlags === "object" && evt.options.featureFlags ? evt.options.featureFlags : {}
if (evt.model.apiID.startsWith("duo-workflow-")) {
const gitlab = yield* Effect.promise(() => import("gitlab-ai-provider")).pipe(Effect.orDie)
const workflowRef =
typeof evt.model.options.aisdk.request.workflowRef === "string"
? evt.model.options.aisdk.request.workflowRef
: undefined
const workflowDefinition =
typeof evt.model.options.aisdk.request.workflowDefinition === "string"
? evt.model.options.aisdk.request.workflowDefinition
: undefined
const language = evt.sdk.workflowChat(
gitlab.isWorkflowModel(evt.model.apiID) ? evt.model.apiID : "duo-workflow",
{
featureFlags,
workflowDefinition,
},
)
if (workflowRef) language.selectedModelRef = workflowRef
evt.language = language
return
}
evt.language = evt.sdk.agenticChat(evt.model.apiID, {
aiGatewayHeaders: evt.options.aiGatewayHeaders,
featureFlags,
})
}),
}
}),
})

View File

@@ -0,0 +1,124 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
function resolveProject(options: Record<string, any>) {
// models.dev advertises GOOGLE_VERTEX_PROJECT for Vertex, while Google SDKs
// and ADC examples commonly use the broader Google Cloud project aliases.
return (
options.project ??
process.env.GOOGLE_VERTEX_PROJECT ??
process.env.GOOGLE_CLOUD_PROJECT ??
process.env.GCP_PROJECT ??
process.env.GCLOUD_PROJECT
)
}
function resolveLocation(options: Record<string, any>) {
return options.location ?? process.env.GOOGLE_VERTEX_LOCATION ?? process.env.GOOGLE_CLOUD_LOCATION ?? process.env.VERTEX_LOCATION ?? "us-central1"
}
function vertexEndpoint(location: string) {
return location === "global" ? "aiplatform.googleapis.com" : `${location}-aiplatform.googleapis.com`
}
function replaceVertexVars(value: string, project: string | undefined, location: string) {
// Vertex OpenAI-compatible endpoints are stored as templates in the catalog;
// expand them after provider config/env project and location have been resolved.
return value
.replaceAll("${GOOGLE_VERTEX_PROJECT}", project ?? "${GOOGLE_VERTEX_PROJECT}")
.replaceAll("${GOOGLE_VERTEX_LOCATION}", location)
.replaceAll("${GOOGLE_VERTEX_ENDPOINT}", vertexEndpoint(location))
}
function authFetch(fetchWithRuntimeOptions?: unknown) {
// Native Vertex SDKs handle ADC internally. OpenAI-compatible Vertex endpoints
// do not, so inject a Google access token into their fetch path.
return async (input: Parameters<typeof fetch>[0], init?: RequestInit) => {
const { GoogleAuth } = await import("google-auth-library")
const auth = new GoogleAuth()
const client = await auth.getApplicationDefault()
const token = await client.credential.getAccessToken()
const headers = new Headers(init?.headers)
headers.set("Authorization", `Bearer ${token.token}`)
return typeof fetchWithRuntimeOptions === "function"
? fetchWithRuntimeOptions(input, { ...init, headers })
: fetch(input, { ...init, headers })
}
}
export const GoogleVertexPlugin = PluginV2.define({
id: PluginV2.ID.make("google-vertex"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.googleVertex) return
const project = resolveProject(evt.provider.options.aisdk.provider)
const location = String(resolveLocation(evt.provider.options.aisdk.provider))
if (project) evt.provider.options.aisdk.provider.project = project
evt.provider.options.aisdk.provider.location = location
if (evt.provider.endpoint.type === "aisdk" && evt.provider.endpoint.url) {
evt.provider.endpoint.url = replaceVertexVars(evt.provider.endpoint.url, project, location)
}
if (evt.provider.endpoint.type === "aisdk" && evt.provider.endpoint.package.includes("@ai-sdk/openai-compatible")) {
evt.provider.options.aisdk.provider.fetch = authFetch(evt.provider.options.aisdk.provider.fetch)
}
}),
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.model.providerID === ProviderV2.ID.googleVertex && evt.package.includes("@ai-sdk/openai-compatible")) {
evt.options.fetch = authFetch(evt.options.fetch)
return
}
if (evt.package !== "@ai-sdk/google-vertex") return
const mod = yield* Effect.promise(() => import("@ai-sdk/google-vertex"))
const project = resolveProject(evt.options)
const location = resolveLocation(evt.options)
const options = { ...evt.options }
delete options.fetch
evt.sdk = mod.createVertex({
...options,
project,
location,
})
}),
"aisdk.language": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.googleVertex) return
evt.language = evt.sdk.languageModel(String(evt.model.apiID).trim())
}),
}
}),
})
export const GoogleVertexAnthropicPlugin = PluginV2.define({
id: PluginV2.ID.make("google-vertex-anthropic"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.make("google-vertex-anthropic")) return
const project = evt.provider.options.aisdk.provider.project ?? process.env.GOOGLE_CLOUD_PROJECT ?? process.env.GCP_PROJECT ?? process.env.GCLOUD_PROJECT
const location = evt.provider.options.aisdk.provider.location ?? process.env.GOOGLE_CLOUD_LOCATION ?? process.env.VERTEX_LOCATION ?? "global"
if (project) evt.provider.options.aisdk.provider.project = project
evt.provider.options.aisdk.provider.location = location
}),
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/google-vertex/anthropic") return
const mod = yield* Effect.promise(() => import("@ai-sdk/google-vertex/anthropic"))
evt.sdk = mod.createVertexAnthropic({
...evt.options,
project:
typeof evt.options.project === "string"
? evt.options.project
: (process.env.GOOGLE_CLOUD_PROJECT ?? process.env.GCP_PROJECT ?? process.env.GCLOUD_PROJECT),
location:
typeof evt.options.location === "string"
? evt.options.location
: (process.env.GOOGLE_CLOUD_LOCATION ?? process.env.VERTEX_LOCATION ?? "global"),
})
}),
"aisdk.language": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.make("google-vertex-anthropic")) return
evt.language = evt.sdk.languageModel(String(evt.model.apiID).trim())
}),
}
}),
})

View File

@@ -0,0 +1,15 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
export const GooglePlugin = PluginV2.define({
id: PluginV2.ID.make("google"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/google") return
const mod = yield* Effect.promise(() => import("@ai-sdk/google"))
evt.sdk = mod.createGoogleGenerativeAI(evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,15 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
export const GroqPlugin = PluginV2.define({
id: PluginV2.ID.make("groq"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/groq") return
const mod = yield* Effect.promise(() => import("@ai-sdk/groq"))
evt.sdk = mod.createGroq(evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,67 @@
import { AlibabaPlugin } from "./alibaba"
import { AmazonBedrockPlugin } from "./amazon-bedrock"
import { AnthropicPlugin } from "./anthropic"
import { AzureCognitiveServicesPlugin, AzurePlugin } from "./azure"
import { CerebrasPlugin } from "./cerebras"
import { CloudflareAIGatewayPlugin } from "./cloudflare-ai-gateway"
import { CloudflareWorkersAIPlugin } from "./cloudflare-workers-ai"
import { CoherePlugin } from "./cohere"
import { DeepInfraPlugin } from "./deepinfra"
import { DynamicProviderPlugin } from "./dynamic"
import { GatewayPlugin } from "./gateway"
import { GithubCopilotPlugin } from "./github-copilot"
import { GitLabPlugin } from "./gitlab"
import { GooglePlugin } from "./google"
import { GoogleVertexAnthropicPlugin, GoogleVertexPlugin } from "./google-vertex"
import { GroqPlugin } from "./groq"
import { KiloPlugin } from "./kilo"
import { LLMGatewayPlugin } from "./llmgateway"
import { MistralPlugin } from "./mistral"
import { NvidiaPlugin } from "./nvidia"
import { OpenAIPlugin } from "./openai"
import { OpenAICompatiblePlugin } from "./openai-compatible"
import { OpencodePlugin } from "./opencode"
import { OpenRouterPlugin } from "./openrouter"
import { PerplexityPlugin } from "./perplexity"
import { SapAICorePlugin } from "./sap-ai-core"
import { TogetherAIPlugin } from "./togetherai"
import { VercelPlugin } from "./vercel"
import { VenicePlugin } from "./venice"
import { XAIPlugin } from "./xai"
import { ZenmuxPlugin } from "./zenmux"
export const ProviderPlugins = [
AlibabaPlugin,
AmazonBedrockPlugin,
AnthropicPlugin,
AzureCognitiveServicesPlugin,
AzurePlugin,
CerebrasPlugin,
CloudflareAIGatewayPlugin,
CloudflareWorkersAIPlugin,
CoherePlugin,
DeepInfraPlugin,
GatewayPlugin,
GithubCopilotPlugin,
GitLabPlugin,
GooglePlugin,
GoogleVertexAnthropicPlugin,
GoogleVertexPlugin,
GroqPlugin,
KiloPlugin,
LLMGatewayPlugin,
MistralPlugin,
NvidiaPlugin,
OpencodePlugin,
OpenAICompatiblePlugin,
OpenAIPlugin,
OpenRouterPlugin,
PerplexityPlugin,
SapAICorePlugin,
TogetherAIPlugin,
VercelPlugin,
VenicePlugin,
XAIPlugin,
ZenmuxPlugin,
DynamicProviderPlugin,
]

View File

@@ -0,0 +1,16 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
export const KiloPlugin = PluginV2.define({
id: PluginV2.ID.make("kilo"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.make("kilo")) return
evt.provider.options.headers["HTTP-Referer"] = "https://opencode.ai/"
evt.provider.options.headers["X-Title"] = "opencode"
}),
}
}),
})

View File

@@ -0,0 +1,18 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
export const LLMGatewayPlugin = PluginV2.define({
id: PluginV2.ID.make("llmgateway"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.make("llmgateway")) return
if (evt.provider.enabled === false) return
evt.provider.options.headers["HTTP-Referer"] = "https://opencode.ai/"
evt.provider.options.headers["X-Title"] = "opencode"
evt.provider.options.headers["X-Source"] = "opencode"
}),
}
}),
})

View File

@@ -0,0 +1,15 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
export const MistralPlugin = PluginV2.define({
id: PluginV2.ID.make("mistral"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/mistral") return
const mod = yield* Effect.promise(() => import("@ai-sdk/mistral"))
evt.sdk = mod.createMistral(evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,16 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
export const NvidiaPlugin = PluginV2.define({
id: PluginV2.ID.make("nvidia"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.make("nvidia")) return
evt.provider.options.headers["HTTP-Referer"] = "https://opencode.ai/"
evt.provider.options.headers["X-Title"] = "opencode"
}),
}
}),
})

View File

@@ -0,0 +1,17 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
export const OpenAICompatiblePlugin = PluginV2.define({
id: PluginV2.ID.make("openai-compatible"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.sdk) return
if (!evt.package.includes("@ai-sdk/openai-compatible")) return
if (evt.options.includeUsage !== false) evt.options.includeUsage = true
const mod = yield* Effect.promise(() => import("@ai-sdk/openai-compatible"))
evt.sdk = mod.createOpenAICompatible(evt.options as any)
}),
}
}),
})

View File

@@ -0,0 +1,27 @@
import { Effect } from "effect"
import { ModelV2 } from "../../model"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
export const OpenAIPlugin = PluginV2.define({
id: PluginV2.ID.make("openai"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/openai") return
const mod = yield* Effect.promise(() => import("@ai-sdk/openai"))
evt.sdk = mod.createOpenAI(evt.options)
}),
"aisdk.language": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.openai) return
evt.language = evt.sdk.responses(evt.model.apiID)
}),
"model.update": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.openai) return
// OpenAIPlugin sends OpenAI models through Responses; this alias is a
// chat-completions-only model, so remove it only from OpenAI's catalog.
if (evt.model.id === ModelV2.ID.make("gpt-5-chat-latest")) evt.cancel = true
}),
}
}),
})

View File

@@ -0,0 +1,27 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
export const OpencodePlugin = PluginV2.define({
id: PluginV2.ID.make("opencode"),
effect: Effect.gen(function* () {
let hasKey = false
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.opencode) return
hasKey = Boolean(
process.env.OPENCODE_API_KEY ||
evt.provider.env.some((item) => process.env[item]) ||
evt.provider.options.aisdk.provider.apiKey ||
(evt.provider.enabled && evt.provider.enabled.via === "auth"),
)
if (!hasKey) evt.provider.options.aisdk.provider.apiKey = "public"
}),
"model.update": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.opencode) return
if (hasKey) return
if (evt.model.cost.some((item) => item.input > 0)) evt.cancel = true
}),
}
}),
})

View File

@@ -0,0 +1,29 @@
import { Effect } from "effect"
import { ModelV2 } from "../../model"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
export const OpenRouterPlugin = PluginV2.define({
id: PluginV2.ID.make("openrouter"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.openrouter) return
evt.provider.options.headers["HTTP-Referer"] = "https://opencode.ai/"
evt.provider.options.headers["X-Title"] = "opencode"
}),
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@openrouter/ai-sdk-provider") return
const mod = yield* Effect.promise(() => import("@openrouter/ai-sdk-provider"))
evt.sdk = mod.createOpenRouter(evt.options)
}),
"model.update": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.openrouter) return
// These are OpenRouter-specific OpenAI chat aliases that do not work on
// the generic path. Keep custom providers with matching IDs untouched.
if (evt.model.id === ModelV2.ID.make("gpt-5-chat-latest")) evt.cancel = true
if (evt.model.id === ModelV2.ID.make("openai/gpt-5-chat")) evt.cancel = true
}),
}
}),
})

View File

@@ -0,0 +1,15 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
export const PerplexityPlugin = PluginV2.define({
id: PluginV2.ID.make("perplexity"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/perplexity") return
const mod = yield* Effect.promise(() => import("@ai-sdk/perplexity"))
evt.sdk = mod.createPerplexity(evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,40 @@
import { Npm } from "../../npm"
import { Effect, Option } from "effect"
import { pathToFileURL } from "url"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
export const SapAICorePlugin = PluginV2.define({
id: PluginV2.ID.make("sap-ai-core"),
effect: Effect.gen(function* () {
const npm = yield* Npm.Service
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.make("sap-ai-core")) return
const serviceKey =
process.env.AICORE_SERVICE_KEY ??
(typeof evt.options.serviceKey === "string" ? evt.options.serviceKey : undefined)
if (serviceKey && !process.env.AICORE_SERVICE_KEY) process.env.AICORE_SERVICE_KEY = serviceKey
const installedPath = evt.package.startsWith("file://")
? evt.package
: Option.getOrUndefined((yield* npm.add(evt.package).pipe(Effect.orDie)).entrypoint)
if (!installedPath) throw new Error(`Package ${evt.package} has no import entrypoint`)
const mod = yield* Effect.promise(async () => {
return (await import(
installedPath.startsWith("file://") ? installedPath : pathToFileURL(installedPath).href
)) as Record<string, (options: any) => any>
}).pipe(Effect.orDie)
const match = Object.keys(mod).find((name) => name.startsWith("create"))
if (!match) throw new Error(`Package ${evt.package} has no provider factory export`)
evt.sdk = mod[match](serviceKey ? { deploymentId: process.env.AICORE_DEPLOYMENT_ID, resourceGroup: process.env.AICORE_RESOURCE_GROUP } : {})
}),
"aisdk.language": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.make("sap-ai-core")) return
evt.language = evt.sdk(evt.model.apiID)
}),
}
}),
})

View File

@@ -0,0 +1,15 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
export const TogetherAIPlugin = PluginV2.define({
id: PluginV2.ID.make("togetherai"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/togetherai") return
const mod = yield* Effect.promise(() => import("@ai-sdk/togetherai"))
evt.sdk = mod.createTogetherAI(evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,15 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
export const VenicePlugin = PluginV2.define({
id: PluginV2.ID.make("venice"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "venice-ai-sdk-provider") return
const mod = yield* Effect.promise(() => import("venice-ai-sdk-provider"))
evt.sdk = mod.createVenice(evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,21 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
export const VercelPlugin = PluginV2.define({
id: PluginV2.ID.make("vercel"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.make("vercel")) return
evt.provider.options.headers["http-referer"] = "https://opencode.ai/"
evt.provider.options.headers["x-title"] = "opencode"
}),
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/vercel") return
const mod = yield* Effect.promise(() => import("@ai-sdk/vercel"))
evt.sdk = mod.createVercel(evt.options)
}),
}
}),
})

View File

@@ -0,0 +1,20 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
export const XAIPlugin = PluginV2.define({
id: PluginV2.ID.make("xai"),
effect: Effect.gen(function* () {
return {
"aisdk.sdk": Effect.fn(function* (evt) {
if (evt.package !== "@ai-sdk/xai") return
const mod = yield* Effect.promise(() => import("@ai-sdk/xai"))
evt.sdk = mod.createXai(evt.options)
}),
"aisdk.language": Effect.fn(function* (evt) {
if (evt.model.providerID !== ProviderV2.ID.make("xai")) return
evt.language = evt.sdk.responses(evt.model.apiID)
}),
}
}),
})

View File

@@ -0,0 +1,16 @@
import { Effect } from "effect"
import { PluginV2 } from "../../plugin"
import { ProviderV2 } from "../../provider"
export const ZenmuxPlugin = PluginV2.define({
id: PluginV2.ID.make("zenmux"),
effect: Effect.gen(function* () {
return {
"provider.update": Effect.fn(function* (evt) {
if (evt.provider.id !== ProviderV2.ID.make("zenmux")) return
evt.provider.options.headers["HTTP-Referer"] ??= "https://opencode.ai/"
evt.provider.options.headers["X-Title"] ??= "opencode"
}),
}
}),
})

View File

@@ -0,0 +1,120 @@
export * as ProviderV2 from "./provider"
import { withStatics } from "./schema"
import { Schema } from "effect"
export const ID = Schema.String.pipe(
Schema.brand("ProviderV2.ID"),
withStatics((schema) => ({
// Well-known providers
opencode: schema.make("opencode"),
anthropic: schema.make("anthropic"),
openai: schema.make("openai"),
google: schema.make("google"),
googleVertex: schema.make("google-vertex"),
githubCopilot: schema.make("github-copilot"),
amazonBedrock: schema.make("amazon-bedrock"),
azure: schema.make("azure"),
openrouter: schema.make("openrouter"),
mistral: schema.make("mistral"),
gitlab: schema.make("gitlab"),
})),
)
export type ID = typeof ID.Type
const OpenAIResponses = Schema.Struct({
type: Schema.Literal("openai/responses"),
url: Schema.String,
websocket: Schema.optional(Schema.Boolean),
})
const OpenAICompletions = Schema.Struct({
type: Schema.Literal("openai/completions"),
url: Schema.String,
reasoning: Schema.Union([
Schema.Struct({
type: Schema.Literal("reasoning_content"),
}),
Schema.Struct({
type: Schema.Literal("reasoning_details"),
}),
]).pipe(Schema.optional),
})
export type OpenAICompletions = typeof OpenAICompletions.Type
const AISDK = Schema.Struct({
type: Schema.Literal("aisdk"),
package: Schema.String,
url: Schema.String.pipe(Schema.optional),
})
const AnthropicMessages = Schema.Struct({
type: Schema.Literal("anthropic/messages"),
url: Schema.String,
})
const UnknownEndpoint = Schema.Struct({
type: Schema.Literal("unknown"),
})
export const Endpoint = Schema.Union([
UnknownEndpoint,
OpenAIResponses,
OpenAICompletions,
AnthropicMessages,
AISDK,
]).pipe(Schema.toTaggedUnion("type"))
export type Endpoint = typeof Endpoint.Type
export const Options = Schema.Struct({
headers: Schema.Record(Schema.String, Schema.String),
body: Schema.Record(Schema.String, Schema.Any),
aisdk: Schema.Struct({
provider: Schema.Record(Schema.String, Schema.Any),
request: Schema.Record(Schema.String, Schema.Any),
}),
})
export type Options = typeof Options.Type
export class Info extends Schema.Class<Info>("ProviderV2.Info")({
id: ID,
name: Schema.String,
enabled: Schema.Union([
Schema.Literal(false),
Schema.Struct({
via: Schema.Literal("env"),
name: Schema.String,
}),
Schema.Struct({
via: Schema.Literal("auth"),
service: Schema.String,
}),
Schema.Struct({
via: Schema.Literal("custom"),
data: Schema.Record(Schema.String, Schema.Any),
}),
]),
env: Schema.String.pipe(Schema.Array),
endpoint: Endpoint,
options: Options,
}) {
static empty(providerID: ID) {
return new Info({
id: providerID,
name: providerID,
enabled: false,
env: [],
endpoint: {
type: "unknown",
},
options: {
headers: {},
body: {},
aisdk: {
provider: {},
request: {},
},
},
})
}
}

View File

@@ -0,0 +1,49 @@
import * as Schema from "effect/Schema"
export class Source extends Schema.Class<Source>("Prompt.Source")({
start: Schema.Finite,
end: Schema.Finite,
text: Schema.String,
}) {}
export class FileAttachment extends Schema.Class<FileAttachment>("Prompt.FileAttachment")({
uri: Schema.String,
mime: Schema.String,
name: Schema.String.pipe(Schema.optional),
description: Schema.String.pipe(Schema.optional),
source: Source.pipe(Schema.optional),
}) {
static create(input: FileAttachment) {
return new FileAttachment({
uri: input.uri,
mime: input.mime,
name: input.name,
description: input.description,
source: input.source,
})
}
}
export class AgentAttachment extends Schema.Class<AgentAttachment>("Prompt.AgentAttachment")({
name: Schema.String,
source: Source.pipe(Schema.optional),
}) {}
export class ReferenceAttachment extends Schema.Class<ReferenceAttachment>("Prompt.ReferenceAttachment")({
name: Schema.String,
kind: Schema.Literals(["local", "git", "invalid"]),
uri: Schema.String.pipe(Schema.optional),
repository: Schema.String.pipe(Schema.optional),
branch: Schema.String.pipe(Schema.optional),
target: Schema.String.pipe(Schema.optional),
targetUri: Schema.String.pipe(Schema.optional),
problem: Schema.String.pipe(Schema.optional),
source: Source.pipe(Schema.optional),
}) {}
export class Prompt extends Schema.Class<Prompt>("Prompt")({
text: Schema.String,
files: Schema.Array(FileAttachment).pipe(Schema.optional),
agents: Schema.Array(AgentAttachment).pipe(Schema.optional),
references: Schema.Array(ReferenceAttachment).pipe(Schema.optional),
}) {}

View File

@@ -0,0 +1,18 @@
export * as ToolOutput from "./tool-output"
import { Schema } from "effect"
export class TextContent extends Schema.Class<TextContent>("Tool.TextContent")({
type: Schema.Literal("text"),
text: Schema.String,
}) {}
export class FileContent extends Schema.Class<FileContent>("Tool.FileContent")({
type: Schema.Literal("file"),
uri: Schema.String,
mime: Schema.String,
name: Schema.String.pipe(Schema.optional),
}) {}
export const Content = Schema.Union([TextContent, FileContent]).pipe(Schema.toTaggedUnion("type"))
export const Structured = Schema.Record(Schema.String, Schema.Any)

View File

@@ -0,0 +1,10 @@
import { DateTime, Schema, SchemaGetter } from "effect"
export const DateTimeUtcFromMillis = Schema.Finite.pipe(
Schema.decodeTo(Schema.DateTimeUtc, {
decode: SchemaGetter.transform((value) => DateTime.makeUnsafe(value)),
encode: SchemaGetter.transform((value) => DateTime.toEpochMillis(value)),
}),
)
export * as V2Schema from "./v2-schema"

View File

@@ -0,0 +1,523 @@
import { convertToOpenAICompatibleChatMessages as convertToCopilotMessages } from "@opencode-ai/core/github-copilot/chat/convert-to-openai-compatible-chat-messages"
import { describe, test, expect } from "bun:test"
describe("system messages", () => {
test("should convert system message content to string", () => {
const result = convertToCopilotMessages([
{
role: "system",
content: "You are a helpful assistant with AGENTS.md instructions.",
},
])
expect(result).toEqual([
{
role: "system",
content: "You are a helpful assistant with AGENTS.md instructions.",
},
])
})
})
describe("user messages", () => {
test("should convert messages with only a text part to a string content", () => {
const result = convertToCopilotMessages([
{
role: "user",
content: [{ type: "text", text: "Hello" }],
},
])
expect(result).toEqual([{ role: "user", content: "Hello" }])
})
test("should convert messages with image parts", () => {
const result = convertToCopilotMessages([
{
role: "user",
content: [
{ type: "text", text: "Hello" },
{
type: "file",
data: Buffer.from([0, 1, 2, 3]).toString("base64"),
mediaType: "image/png",
},
],
},
])
expect(result).toEqual([
{
role: "user",
content: [
{ type: "text", text: "Hello" },
{
type: "image_url",
image_url: { url: "data:image/png;base64,AAECAw==" },
},
],
},
])
})
test("should convert messages with image parts from Uint8Array", () => {
const result = convertToCopilotMessages([
{
role: "user",
content: [
{ type: "text", text: "Hi" },
{
type: "file",
data: new Uint8Array([0, 1, 2, 3]),
mediaType: "image/png",
},
],
},
])
expect(result).toEqual([
{
role: "user",
content: [
{ type: "text", text: "Hi" },
{
type: "image_url",
image_url: { url: "data:image/png;base64,AAECAw==" },
},
],
},
])
})
test("should handle URL-based images", () => {
const result = convertToCopilotMessages([
{
role: "user",
content: [
{
type: "file",
data: new URL("https://example.com/image.jpg"),
mediaType: "image/*",
},
],
},
])
expect(result).toEqual([
{
role: "user",
content: [
{
type: "image_url",
image_url: { url: "https://example.com/image.jpg" },
},
],
},
])
})
test("should handle multiple text parts without flattening", () => {
const result = convertToCopilotMessages([
{
role: "user",
content: [
{ type: "text", text: "Part 1" },
{ type: "text", text: "Part 2" },
],
},
])
expect(result).toEqual([
{
role: "user",
content: [
{ type: "text", text: "Part 1" },
{ type: "text", text: "Part 2" },
],
},
])
})
})
describe("assistant messages", () => {
test("should convert assistant text messages", () => {
const result = convertToCopilotMessages([
{
role: "assistant",
content: [{ type: "text", text: "Hello back!" }],
},
])
expect(result).toEqual([
{
role: "assistant",
content: "Hello back!",
tool_calls: undefined,
reasoning_text: undefined,
reasoning_opaque: undefined,
},
])
})
test("should handle assistant message with null content when only tool calls", () => {
const result = convertToCopilotMessages([
{
role: "assistant",
content: [
{
type: "tool-call",
toolCallId: "call1",
toolName: "calculator",
input: { a: 1, b: 2 },
},
],
},
])
expect(result).toEqual([
{
role: "assistant",
content: null,
tool_calls: [
{
id: "call1",
type: "function",
function: {
name: "calculator",
arguments: JSON.stringify({ a: 1, b: 2 }),
},
},
],
reasoning_text: undefined,
reasoning_opaque: undefined,
},
])
})
test("should concatenate multiple text parts", () => {
const result = convertToCopilotMessages([
{
role: "assistant",
content: [
{ type: "text", text: "First part. " },
{ type: "text", text: "Second part." },
],
},
])
expect(result[0].content).toBe("First part. Second part.")
})
})
describe("tool calls", () => {
test("should stringify arguments to tool calls", () => {
const result = convertToCopilotMessages([
{
role: "assistant",
content: [
{
type: "tool-call",
input: { foo: "bar123" },
toolCallId: "quux",
toolName: "thwomp",
},
],
},
{
role: "tool",
content: [
{
type: "tool-result",
toolCallId: "quux",
toolName: "thwomp",
output: { type: "json", value: { oof: "321rab" } },
},
],
},
])
expect(result).toEqual([
{
role: "assistant",
content: null,
tool_calls: [
{
id: "quux",
type: "function",
function: {
name: "thwomp",
arguments: JSON.stringify({ foo: "bar123" }),
},
},
],
reasoning_text: undefined,
reasoning_opaque: undefined,
},
{
role: "tool",
tool_call_id: "quux",
content: JSON.stringify({ oof: "321rab" }),
},
])
})
test("should handle text output type in tool results", () => {
const result = convertToCopilotMessages([
{
role: "tool",
content: [
{
type: "tool-result",
toolCallId: "call-1",
toolName: "getWeather",
output: { type: "text", value: "It is sunny today" },
},
],
},
])
expect(result).toEqual([
{
role: "tool",
tool_call_id: "call-1",
content: "It is sunny today",
},
])
})
test("should handle multiple tool results as separate messages", () => {
const result = convertToCopilotMessages([
{
role: "tool",
content: [
{
type: "tool-result",
toolCallId: "call1",
toolName: "api1",
output: { type: "text", value: "Result 1" },
},
{
type: "tool-result",
toolCallId: "call2",
toolName: "api2",
output: { type: "text", value: "Result 2" },
},
],
},
])
expect(result).toHaveLength(2)
expect(result[0]).toEqual({
role: "tool",
tool_call_id: "call1",
content: "Result 1",
})
expect(result[1]).toEqual({
role: "tool",
tool_call_id: "call2",
content: "Result 2",
})
})
test("should handle text plus multiple tool calls", () => {
const result = convertToCopilotMessages([
{
role: "assistant",
content: [
{ type: "text", text: "Checking... " },
{
type: "tool-call",
toolCallId: "call1",
toolName: "searchTool",
input: { query: "Weather" },
},
{ type: "text", text: "Almost there..." },
{
type: "tool-call",
toolCallId: "call2",
toolName: "mapsTool",
input: { location: "Paris" },
},
],
},
])
expect(result).toEqual([
{
role: "assistant",
content: "Checking... Almost there...",
tool_calls: [
{
id: "call1",
type: "function",
function: {
name: "searchTool",
arguments: JSON.stringify({ query: "Weather" }),
},
},
{
id: "call2",
type: "function",
function: {
name: "mapsTool",
arguments: JSON.stringify({ location: "Paris" }),
},
},
],
reasoning_text: undefined,
reasoning_opaque: undefined,
},
])
})
})
describe("reasoning (copilot-specific)", () => {
test("should omit reasoning_text without reasoning_opaque", () => {
const result = convertToCopilotMessages([
{
role: "assistant",
content: [
{ type: "reasoning", text: "Let me think about this..." },
{ type: "text", text: "The answer is 42." },
],
},
])
expect(result).toEqual([
{
role: "assistant",
content: "The answer is 42.",
tool_calls: undefined,
reasoning_text: undefined,
reasoning_opaque: undefined,
},
])
})
test("should include reasoning_opaque from providerOptions", () => {
const result = convertToCopilotMessages([
{
role: "assistant",
content: [
{
type: "reasoning",
text: "Thinking...",
providerOptions: {
copilot: { reasoningOpaque: "opaque-signature-123" },
},
},
{ type: "text", text: "Done!" },
],
},
])
expect(result).toEqual([
{
role: "assistant",
content: "Done!",
tool_calls: undefined,
reasoning_text: "Thinking...",
reasoning_opaque: "opaque-signature-123",
},
])
})
test("should include reasoning_opaque from text part providerOptions", () => {
const result = convertToCopilotMessages([
{
role: "assistant",
content: [
{
type: "text",
text: "Done!",
providerOptions: {
copilot: { reasoningOpaque: "opaque-text-456" },
},
},
],
},
])
expect(result).toEqual([
{
role: "assistant",
content: "Done!",
tool_calls: undefined,
reasoning_text: undefined,
reasoning_opaque: "opaque-text-456",
},
])
})
test("should handle reasoning-only assistant message", () => {
const result = convertToCopilotMessages([
{
role: "assistant",
content: [
{
type: "reasoning",
text: "Just thinking, no response yet",
providerOptions: {
copilot: { reasoningOpaque: "sig-abc" },
},
},
],
},
])
expect(result).toEqual([
{
role: "assistant",
content: null,
tool_calls: undefined,
reasoning_text: "Just thinking, no response yet",
reasoning_opaque: "sig-abc",
},
])
})
})
describe("full conversation", () => {
test("should convert a multi-turn conversation with reasoning", () => {
const result = convertToCopilotMessages([
{
role: "system",
content: "You are a helpful assistant.",
},
{
role: "user",
content: [{ type: "text", text: "What is 2+2?" }],
},
{
role: "assistant",
content: [
{
type: "reasoning",
text: "Let me calculate 2+2...",
providerOptions: {
copilot: { reasoningOpaque: "sig-abc" },
},
},
{ type: "text", text: "2+2 equals 4." },
],
},
{
role: "user",
content: [{ type: "text", text: "What about 3+3?" }],
},
])
expect(result).toHaveLength(4)
const systemMsg = result[0]
expect(systemMsg.role).toBe("system")
// Assistant message should have reasoning fields
const assistantMsg = result[2] as {
reasoning_text?: string
reasoning_opaque?: string
}
expect(assistantMsg.reasoning_text).toBe("Let me calculate 2+2...")
expect(assistantMsg.reasoning_opaque).toBe("sig-abc")
})
})

View File

@@ -0,0 +1,592 @@
import { OpenAICompatibleChatLanguageModel } from "@opencode-ai/core/github-copilot/chat/openai-compatible-chat-language-model"
import { describe, test, expect, mock } from "bun:test"
import type { LanguageModelV3Prompt } from "@ai-sdk/provider"
async function convertReadableStreamToArray<T>(stream: ReadableStream<T>): Promise<T[]> {
const reader = stream.getReader()
const result: T[] = []
while (true) {
const { done, value } = await reader.read()
if (done) break
result.push(value)
}
return result
}
const TEST_PROMPT: LanguageModelV3Prompt = [{ role: "user", content: [{ type: "text", text: "Hello" }] }]
// Fixtures from copilot_test.exs
const FIXTURES = {
basicText: [
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1677652288,"model":"gemini-2.0-flash-001","choices":[{"index":0,"delta":{"role":"assistant","content":"Hello"},"finish_reason":null}]}`,
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1677652288,"model":"gemini-2.0-flash-001","choices":[{"index":0,"delta":{"content":" world"},"finish_reason":null}]}`,
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1677652288,"model":"gemini-2.0-flash-001","choices":[{"index":0,"delta":{"content":"!"},"finish_reason":"stop"}]}`,
`data: [DONE]`,
],
reasoningWithToolCalls: [
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Understanding Dayzee's Purpose**\\n\\nI'm starting to get a better handle on \`dayzee\`.\\n\\n"}}],"created":1764940861,"id":"OdwyabKMI9yel7oPlbzgwQM","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Assessing Dayzee's Functionality**\\n\\nI've reviewed the files.\\n\\n"}}],"created":1764940862,"id":"OdwyabKMI9yel7oPlbzgwQM","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\\"filePath\\":\\"/README.md\\"}","name":"read_file"},"id":"call_abc123","index":0,"type":"function"}],"reasoning_opaque":"4CUQ6696CwSXOdQ5rtvDimqA91tBzfmga4ieRbmZ5P67T2NLW3"}}],"created":1764940862,"id":"OdwyabKMI9yel7oPlbzgwQM","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
`data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\\"filePath\\":\\"/mix.exs\\"}","name":"read_file"},"id":"call_def456","index":1,"type":"function"}]}}],"created":1764940862,"id":"OdwyabKMI9yel7oPlbzgwQM","usage":{"completion_tokens":53,"prompt_tokens":19581,"prompt_tokens_details":{"cached_tokens":17068},"total_tokens":19768,"reasoning_tokens":134},"model":"gemini-3-pro-preview"}`,
`data: [DONE]`,
],
reasoningWithOpaqueAtEnd: [
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Analyzing the Inquiry's Nature**\\n\\nI'm currently parsing the user's question.\\n\\n"}}],"created":1765201729,"id":"Ptc2afqsCIHqlOoP653UiAI","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Reconciling User's Input**\\n\\nI'm grappling with the context.\\n\\n"}}],"created":1765201730,"id":"Ptc2afqsCIHqlOoP653UiAI","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
`data: {"choices":[{"index":0,"delta":{"content":"I am Tidewave, a highly skilled AI coding agent.\\n\\n","role":"assistant"}}],"created":1765201730,"id":"Ptc2afqsCIHqlOoP653UiAI","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
`data: {"choices":[{"finish_reason":"stop","index":0,"delta":{"content":"How can I help you?","role":"assistant","reasoning_opaque":"/PMlTqxqSJZnUBDHgnnJKLVI4eZQ"}}],"created":1765201730,"id":"Ptc2afqsCIHqlOoP653UiAI","usage":{"completion_tokens":59,"prompt_tokens":5778,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":5932,"reasoning_tokens":95},"model":"gemini-3-pro-preview"}`,
`data: [DONE]`,
],
// Case where reasoning_opaque and content come in the SAME chunk
reasoningWithOpaqueAndContentSameChunk: [
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Understanding the Query's Nature**\\n\\nI'm currently grappling with the user's philosophical query.\\n\\n"}}],"created":1766062103,"id":"FPhDacixL9zrlOoPqLSuyQ4","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Framing the Response's Core**\\n\\nNow, I'm structuring my response.\\n\\n"}}],"created":1766062103,"id":"FPhDacixL9zrlOoPqLSuyQ4","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
`data: {"choices":[{"index":0,"delta":{"content":"Of course. I'm thinking right now.","role":"assistant","reasoning_opaque":"ExXaGwW7jBo39OXRe9EPoFGN1rOtLJBx"}}],"created":1766062103,"id":"FPhDacixL9zrlOoPqLSuyQ4","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
`data: {"choices":[{"finish_reason":"stop","index":0,"delta":{"content":" What's on your mind?","role":"assistant"}}],"created":1766062103,"id":"FPhDacixL9zrlOoPqLSuyQ4","usage":{"completion_tokens":78,"prompt_tokens":3767,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":3915,"reasoning_tokens":70},"model":"gemini-2.5-pro"}`,
`data: [DONE]`,
],
// Case where reasoning_opaque and content come in same chunk, followed by tool calls
reasoningWithOpaqueContentAndToolCalls: [
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Analyzing the Structure**\\n\\nI'm currently trying to get a handle on the project's layout. My initial focus is on the file structure itself, specifically the directory organization. I'm hoping this will illuminate how different components interact. I'll need to identify the key modules and their dependencies.\\n\\n\\n"}}],"created":1766066995,"id":"MQtEafqbFYTZsbwPwuCVoAg","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
`data: {"choices":[{"index":0,"delta":{"content":"Okay, I need to check out the project's file structure.","role":"assistant","reasoning_opaque":"WHOd3dYFnxEBOsKUXjbX6c2rJa0fS214FHbsj+A3Q+i63SFo7H/92RsownAzyo0h2qEy3cOcrvAatsMx51eCKiMSqt4dYWZhd5YVSgF0CehkpDbWBP/SoRqLU1dhCmUJV/6b5uYFBOzKLBGNadyhI7T1gWFlXntwc6SNjH6DujnFPeVr+L8DdOoUJGJrw2aOfm9NtkXA6wZh9t7dt+831yIIImjD9MHczuXoXj8K7tyLpIJ9KlVXMhnO4IKSYNdKRtoHlGTmudAp5MgH/vLWb6oSsL+ZJl/OdF3WBOeanGhYNoByCRDSvR7anAR/9m5zf9yUax+u/nFg+gzmhFacnzZGtSmcvJ4/4HWKNtUkRASTKeN94DXB8j1ptB/i6ldaMAz2ZyU+sbjPWI8aI4fKJ2MuO01u3uE87xVwpWiM+0rahIzJsllI5edwOaOFtF4tnlCTQafbxHwCZR62uON2E+IjGzW80MzyfYrbLBJKS5zTeHCgPYQSNaKzPfpzkQvdwo3JUnJYcEHgGeKzkq5sbvS5qitCYI7Xue0V98S6/KnUSPnDQBjNnas2i6BqJV2vuCEU/Y3ucrlKVbuRIFCZXCyLzrsGeRLRKlrf5S/HDAQ04IOPQVQhBPvhX0nDjhZB"}}],"created":1766066995,"id":"MQtEafqbFYTZsbwPwuCVoAg","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
`data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{}","name":"list_project_files"},"id":"call_MHxqRDd5WVo3NU8wUXRaMmc0MFE","index":0,"type":"function"}]}}],"created":1766066995,"id":"MQtEafqbFYTZsbwPwuCVoAg","usage":{"completion_tokens":19,"prompt_tokens":3767,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":3797,"reasoning_tokens":11},"model":"gemini-2.5-pro"}`,
`data: [DONE]`,
],
// Case where reasoning goes directly to tool_calls with NO content
// reasoning_opaque and tool_calls come in the same chunk
reasoningDirectlyToToolCalls: [
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Executing and Analyzing HTML**\\n\\nI've successfully captured the HTML snapshot using the \`browser_eval\` tool, giving me a solid understanding of the page structure. Now, I'm shifting focus to Elixir code execution with \`project_eval\` to assess my ability to work within the project's environment.\\n\\n\\n"}}],"created":1766068643,"id":"oBFEaafzD9DVlOoPkY3l4Qs","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Testing Project Contexts**\\n\\nI've got the HTML body snapshot from \`browser_eval\`, which is a helpful reference. Next, I'm testing my ability to run Elixir code in the project with \`project_eval\`. I'm starting with a simple sum: \`1 + 1\`. This will confirm I'm set up to interact with the project's codebase.\\n\\n\\n"}}],"created":1766068644,"id":"oBFEaafzD9DVlOoPkY3l4Qs","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
`data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\\"code\\":\\"1 + 1\\"}","name":"project_eval"},"id":"call_MHw3RDhmT1J5Z3B6WlhpVjlveTc","index":0,"type":"function"}],"reasoning_opaque":"ytGNWFf2doK38peANDvm7whkLPKrd+Fv6/k34zEPBF6Qwitj4bTZT0FBXleydLb6"}}],"created":1766068644,"id":"oBFEaafzD9DVlOoPkY3l4Qs","usage":{"completion_tokens":12,"prompt_tokens":8677,"prompt_tokens_details":{"cached_tokens":3692},"total_tokens":8768,"reasoning_tokens":79},"model":"gemini-3-pro-preview"}`,
`data: [DONE]`,
],
reasoningOpaqueWithToolCallsNoReasoningText: [
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{}","name":"read_file"},"id":"call_reasoning_only","index":0,"type":"function"}],"reasoning_opaque":"opaque-xyz"}}],"created":1769917420,"id":"opaque-only","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-flash-preview"}`,
`data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{}","name":"read_file"},"id":"call_reasoning_only_2","index":1,"type":"function"}]}}],"created":1769917420,"id":"opaque-only","usage":{"completion_tokens":12,"prompt_tokens":123,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":135,"reasoning_tokens":0},"model":"gemini-3-flash-preview"}`,
`data: [DONE]`,
],
}
function createMockFetch(chunks: string[]) {
return mock(async () => {
const body = new ReadableStream({
start(controller) {
for (const chunk of chunks) {
controller.enqueue(new TextEncoder().encode(chunk + "\n\n"))
}
controller.close()
},
})
return new Response(body, {
status: 200,
headers: { "Content-Type": "text/event-stream" },
})
})
}
function createModel(fetchFn: ReturnType<typeof mock>) {
return new OpenAICompatibleChatLanguageModel("test-model", {
provider: "copilot.chat",
url: () => "https://api.test.com/chat/completions",
headers: () => ({ Authorization: "Bearer test-token" }),
fetch: fetchFn as any,
})
}
describe("doStream", () => {
test("should stream text deltas", async () => {
const mockFetch = createMockFetch(FIXTURES.basicText)
const model = createModel(mockFetch)
const { stream } = await model.doStream({
prompt: TEST_PROMPT,
includeRawChunks: false,
})
const parts = await convertReadableStreamToArray(stream)
// Filter to just the key events
const textParts = parts.filter(
(p) => p.type === "text-start" || p.type === "text-delta" || p.type === "text-end" || p.type === "finish",
)
expect(textParts).toMatchObject([
{ type: "text-start", id: "txt-0" },
{ type: "text-delta", id: "txt-0", delta: "Hello" },
{ type: "text-delta", id: "txt-0", delta: " world" },
{ type: "text-delta", id: "txt-0", delta: "!" },
{ type: "text-end", id: "txt-0" },
{ type: "finish", finishReason: { unified: "stop" } },
])
})
test("should stream reasoning with tool calls and capture reasoning_opaque", async () => {
const mockFetch = createMockFetch(FIXTURES.reasoningWithToolCalls)
const model = createModel(mockFetch)
const { stream } = await model.doStream({
prompt: TEST_PROMPT,
includeRawChunks: false,
})
const parts = await convertReadableStreamToArray(stream)
// Check reasoning parts
const reasoningParts = parts.filter(
(p) => p.type === "reasoning-start" || p.type === "reasoning-delta" || p.type === "reasoning-end",
)
expect(reasoningParts[0]).toEqual({
type: "reasoning-start",
id: "reasoning-0",
})
expect(reasoningParts[1]).toMatchObject({
type: "reasoning-delta",
id: "reasoning-0",
})
expect((reasoningParts[1] as { delta: string }).delta).toContain("**Understanding Dayzee's Purpose**")
expect(reasoningParts[2]).toMatchObject({
type: "reasoning-delta",
id: "reasoning-0",
})
expect((reasoningParts[2] as { delta: string }).delta).toContain("**Assessing Dayzee's Functionality**")
// reasoning_opaque should be in reasoning-end providerMetadata
const reasoningEnd = reasoningParts.find((p) => p.type === "reasoning-end")
expect(reasoningEnd).toMatchObject({
type: "reasoning-end",
id: "reasoning-0",
providerMetadata: {
copilot: {
reasoningOpaque: "4CUQ6696CwSXOdQ5rtvDimqA91tBzfmga4ieRbmZ5P67T2NLW3",
},
},
})
// Check tool calls
const toolParts = parts.filter(
(p) => p.type === "tool-input-start" || p.type === "tool-call" || p.type === "tool-input-end",
)
expect(toolParts).toContainEqual({
type: "tool-input-start",
id: "call_abc123",
toolName: "read_file",
})
expect(toolParts).toContainEqual(
expect.objectContaining({
type: "tool-call",
toolCallId: "call_abc123",
toolName: "read_file",
}),
)
expect(toolParts).toContainEqual({
type: "tool-input-start",
id: "call_def456",
toolName: "read_file",
})
// Check finish
const finish = parts.find((p) => p.type === "finish")
expect(finish).toMatchObject({
type: "finish",
finishReason: { unified: "tool-calls" },
usage: {
inputTokens: { total: 19581 },
outputTokens: { total: 53 },
},
})
})
test("should handle reasoning_opaque that comes at end with text in between", async () => {
const mockFetch = createMockFetch(FIXTURES.reasoningWithOpaqueAtEnd)
const model = createModel(mockFetch)
const { stream } = await model.doStream({
prompt: TEST_PROMPT,
includeRawChunks: false,
})
const parts = await convertReadableStreamToArray(stream)
// Check that reasoning comes first
const reasoningStart = parts.findIndex((p) => p.type === "reasoning-start")
const textStart = parts.findIndex((p) => p.type === "text-start")
expect(reasoningStart).toBeLessThan(textStart)
// Check reasoning deltas
const reasoningDeltas = parts.filter((p) => p.type === "reasoning-delta")
expect(reasoningDeltas).toHaveLength(2)
expect((reasoningDeltas[0] as { delta: string }).delta).toContain("**Analyzing the Inquiry's Nature**")
expect((reasoningDeltas[1] as { delta: string }).delta).toContain("**Reconciling User's Input**")
// Check text deltas
const textDeltas = parts.filter((p) => p.type === "text-delta")
expect(textDeltas).toHaveLength(2)
expect((textDeltas[0] as { delta: string }).delta).toContain("I am Tidewave")
expect((textDeltas[1] as { delta: string }).delta).toContain("How can I help you?")
// reasoning-end should be emitted before text-start
const reasoningEndIndex = parts.findIndex((p) => p.type === "reasoning-end")
const textStartIndex = parts.findIndex((p) => p.type === "text-start")
expect(reasoningEndIndex).toBeGreaterThan(-1)
expect(reasoningEndIndex).toBeLessThan(textStartIndex)
// In this fixture, reasoning_opaque comes AFTER content has started (in chunk 4)
// So it arrives too late to be attached to reasoning-end. But it should still
// be captured and included in the finish event's providerMetadata.
const reasoningEnd = parts.find((p) => p.type === "reasoning-end")
expect(reasoningEnd).toMatchObject({
type: "reasoning-end",
id: "reasoning-0",
})
// reasoning_opaque should be in the finish event's providerMetadata
const finish = parts.find((p) => p.type === "finish")
expect(finish).toMatchObject({
type: "finish",
finishReason: { unified: "stop" },
usage: {
inputTokens: { total: 5778 },
outputTokens: { total: 59 },
},
providerMetadata: {
copilot: {
reasoningOpaque: "/PMlTqxqSJZnUBDHgnnJKLVI4eZQ",
},
},
})
})
test("should handle reasoning_opaque and content in the same chunk", async () => {
const mockFetch = createMockFetch(FIXTURES.reasoningWithOpaqueAndContentSameChunk)
const model = createModel(mockFetch)
const { stream } = await model.doStream({
prompt: TEST_PROMPT,
includeRawChunks: false,
})
const parts = await convertReadableStreamToArray(stream)
// The critical test: reasoning-end should come BEFORE text-start
const reasoningEndIndex = parts.findIndex((p) => p.type === "reasoning-end")
const textStartIndex = parts.findIndex((p) => p.type === "text-start")
expect(reasoningEndIndex).toBeGreaterThan(-1)
expect(textStartIndex).toBeGreaterThan(-1)
expect(reasoningEndIndex).toBeLessThan(textStartIndex)
// Check reasoning deltas
const reasoningDeltas = parts.filter((p) => p.type === "reasoning-delta")
expect(reasoningDeltas).toHaveLength(2)
expect((reasoningDeltas[0] as { delta: string }).delta).toContain("**Understanding the Query's Nature**")
expect((reasoningDeltas[1] as { delta: string }).delta).toContain("**Framing the Response's Core**")
// reasoning_opaque should be in reasoning-end even though it came with content
const reasoningEnd = parts.find((p) => p.type === "reasoning-end")
expect(reasoningEnd).toMatchObject({
type: "reasoning-end",
id: "reasoning-0",
providerMetadata: {
copilot: {
reasoningOpaque: "ExXaGwW7jBo39OXRe9EPoFGN1rOtLJBx",
},
},
})
// Check text deltas
const textDeltas = parts.filter((p) => p.type === "text-delta")
expect(textDeltas).toHaveLength(2)
expect((textDeltas[0] as { delta: string }).delta).toContain("Of course. I'm thinking right now.")
expect((textDeltas[1] as { delta: string }).delta).toContain("What's on your mind?")
// Check finish
const finish = parts.find((p) => p.type === "finish")
expect(finish).toMatchObject({
type: "finish",
finishReason: { unified: "stop" },
})
})
test("should handle reasoning_opaque and content followed by tool calls", async () => {
const mockFetch = createMockFetch(FIXTURES.reasoningWithOpaqueContentAndToolCalls)
const model = createModel(mockFetch)
const { stream } = await model.doStream({
prompt: TEST_PROMPT,
includeRawChunks: false,
})
const parts = await convertReadableStreamToArray(stream)
// Check that reasoning comes first, then text, then tool calls
const reasoningEndIndex = parts.findIndex((p) => p.type === "reasoning-end")
const textStartIndex = parts.findIndex((p) => p.type === "text-start")
const toolStartIndex = parts.findIndex((p) => p.type === "tool-input-start")
expect(reasoningEndIndex).toBeGreaterThan(-1)
expect(textStartIndex).toBeGreaterThan(-1)
expect(toolStartIndex).toBeGreaterThan(-1)
expect(reasoningEndIndex).toBeLessThan(textStartIndex)
expect(textStartIndex).toBeLessThan(toolStartIndex)
// Check reasoning content
const reasoningDeltas = parts.filter((p) => p.type === "reasoning-delta")
expect(reasoningDeltas).toHaveLength(1)
expect((reasoningDeltas[0] as { delta: string }).delta).toContain("**Analyzing the Structure**")
// reasoning_opaque should be in reasoning-end (comes with content in same chunk)
const reasoningEnd = parts.find((p) => p.type === "reasoning-end")
expect(reasoningEnd).toMatchObject({
type: "reasoning-end",
id: "reasoning-0",
providerMetadata: {
copilot: {
reasoningOpaque: expect.stringContaining("WHOd3dYFnxEBOsKUXjbX6c2rJa0fS214"),
},
},
})
// Check text content
const textDeltas = parts.filter((p) => p.type === "text-delta")
expect(textDeltas).toHaveLength(1)
expect((textDeltas[0] as { delta: string }).delta).toContain(
"Okay, I need to check out the project's file structure.",
)
// Check tool call
const toolParts = parts.filter(
(p) => p.type === "tool-input-start" || p.type === "tool-call" || p.type === "tool-input-end",
)
expect(toolParts).toContainEqual({
type: "tool-input-start",
id: "call_MHxqRDd5WVo3NU8wUXRaMmc0MFE",
toolName: "list_project_files",
})
expect(toolParts).toContainEqual(
expect.objectContaining({
type: "tool-call",
toolCallId: "call_MHxqRDd5WVo3NU8wUXRaMmc0MFE",
toolName: "list_project_files",
}),
)
// Check finish
const finish = parts.find((p) => p.type === "finish")
expect(finish).toMatchObject({
type: "finish",
finishReason: { unified: "tool-calls" },
usage: {
inputTokens: { total: 3767 },
outputTokens: { total: 19 },
},
})
})
test("should emit reasoning-end before tool-input-start when reasoning goes directly to tool calls", async () => {
const mockFetch = createMockFetch(FIXTURES.reasoningDirectlyToToolCalls)
const model = createModel(mockFetch)
const { stream } = await model.doStream({
prompt: TEST_PROMPT,
includeRawChunks: false,
})
const parts = await convertReadableStreamToArray(stream)
// Critical check: reasoning-end MUST come before tool-input-start
const reasoningEndIndex = parts.findIndex((p) => p.type === "reasoning-end")
const toolStartIndex = parts.findIndex((p) => p.type === "tool-input-start")
expect(reasoningEndIndex).toBeGreaterThan(-1)
expect(toolStartIndex).toBeGreaterThan(-1)
expect(reasoningEndIndex).toBeLessThan(toolStartIndex)
// Check reasoning parts
const reasoningDeltas = parts.filter((p) => p.type === "reasoning-delta")
expect(reasoningDeltas).toHaveLength(2)
expect((reasoningDeltas[0] as { delta: string }).delta).toContain("**Executing and Analyzing HTML**")
expect((reasoningDeltas[1] as { delta: string }).delta).toContain("**Testing Project Contexts**")
// reasoning_opaque should be in reasoning-end providerMetadata
const reasoningEnd = parts.find((p) => p.type === "reasoning-end")
expect(reasoningEnd).toMatchObject({
type: "reasoning-end",
id: "reasoning-0",
providerMetadata: {
copilot: {
reasoningOpaque: "ytGNWFf2doK38peANDvm7whkLPKrd+Fv6/k34zEPBF6Qwitj4bTZT0FBXleydLb6",
},
},
})
// No text parts should exist
const textParts = parts.filter((p) => p.type === "text-start" || p.type === "text-delta" || p.type === "text-end")
expect(textParts).toHaveLength(0)
// Check tool call
const toolCall = parts.find((p) => p.type === "tool-call")
expect(toolCall).toMatchObject({
type: "tool-call",
toolCallId: "call_MHw3RDhmT1J5Z3B6WlhpVjlveTc",
toolName: "project_eval",
})
// Check finish
const finish = parts.find((p) => p.type === "finish")
expect(finish).toMatchObject({
type: "finish",
finishReason: { unified: "tool-calls" },
})
})
test("should attach reasoning_opaque to tool calls without reasoning_text", async () => {
const mockFetch = createMockFetch(FIXTURES.reasoningOpaqueWithToolCallsNoReasoningText)
const model = createModel(mockFetch)
const { stream } = await model.doStream({
prompt: TEST_PROMPT,
includeRawChunks: false,
})
const parts = await convertReadableStreamToArray(stream)
const reasoningParts = parts.filter(
(p) => p.type === "reasoning-start" || p.type === "reasoning-delta" || p.type === "reasoning-end",
)
expect(reasoningParts).toHaveLength(0)
const toolCall = parts.find((p) => p.type === "tool-call" && p.toolCallId === "call_reasoning_only")
expect(toolCall).toMatchObject({
type: "tool-call",
toolCallId: "call_reasoning_only",
toolName: "read_file",
providerMetadata: {
copilot: {
reasoningOpaque: "opaque-xyz",
},
},
})
})
test("should include response metadata from first chunk", async () => {
const mockFetch = createMockFetch(FIXTURES.basicText)
const model = createModel(mockFetch)
const { stream } = await model.doStream({
prompt: TEST_PROMPT,
includeRawChunks: false,
})
const parts = await convertReadableStreamToArray(stream)
const metadata = parts.find((p) => p.type === "response-metadata")
expect(metadata).toMatchObject({
type: "response-metadata",
id: "chatcmpl-123",
modelId: "gemini-2.0-flash-001",
})
})
test("should emit stream-start with warnings", async () => {
const mockFetch = createMockFetch(FIXTURES.basicText)
const model = createModel(mockFetch)
const { stream } = await model.doStream({
prompt: TEST_PROMPT,
includeRawChunks: false,
})
const parts = await convertReadableStreamToArray(stream)
const streamStart = parts.find((p) => p.type === "stream-start")
expect(streamStart).toEqual({
type: "stream-start",
warnings: [],
})
})
test("should include raw chunks when requested", async () => {
const mockFetch = createMockFetch(FIXTURES.basicText)
const model = createModel(mockFetch)
const { stream } = await model.doStream({
prompt: TEST_PROMPT,
includeRawChunks: true,
})
const parts = await convertReadableStreamToArray(stream)
const rawChunks = parts.filter((p) => p.type === "raw")
expect(rawChunks.length).toBeGreaterThan(0)
})
})
describe("request body", () => {
test("should send tools in OpenAI format", async () => {
let capturedBody: unknown
const mockFetch = mock(async (_url: string, init?: RequestInit) => {
capturedBody = JSON.parse(init?.body as string)
return new Response(
new ReadableStream({
start(controller) {
controller.enqueue(new TextEncoder().encode(`data: [DONE]\n\n`))
controller.close()
},
}),
{ status: 200, headers: { "Content-Type": "text/event-stream" } },
)
})
const model = createModel(mockFetch)
await model.doStream({
prompt: TEST_PROMPT,
tools: [
{
type: "function",
name: "get_weather",
description: "Get the weather for a location",
inputSchema: {
type: "object",
properties: {
location: { type: "string" },
},
required: ["location"],
},
},
],
includeRawChunks: false,
})
expect((capturedBody as { tools: unknown[] }).tools).toEqual([
{
type: "function",
function: {
name: "get_weather",
description: "Get the weather for a location",
parameters: {
type: "object",
properties: {
location: { type: "string" },
},
required: ["location"],
},
},
},
])
})
})

View File

@@ -0,0 +1,188 @@
import { describe, expect } from "bun:test"
import { Effect } from "effect"
import { ModelV2 } from "@opencode-ai/core/model"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { GithubCopilotPlugin } from "@opencode-ai/core/plugin/provider/github-copilot"
import { fakeSelectorSdk, it, model } from "../v2/plugin/provider-helper"
describe("GithubCopilotPlugin", () => {
it.effect("creates the bundled Copilot SDK for the GitHub Copilot package", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GithubCopilotPlugin)
const ignored = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("github-copilot", "gpt-5"),
package: "@ai-sdk/openai-compatible",
options: { name: "github-copilot" },
},
{},
)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("github-copilot", "gpt-5"),
package: "@ai-sdk/github-copilot",
options: { name: "github-copilot" },
},
{},
)
expect(ignored.sdk).toBeUndefined()
expect(result.sdk).toBeDefined()
}),
)
it.effect("selects languageModel when responses and chat are absent", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(GithubCopilotPlugin)
yield* plugin.trigger(
"aisdk.language",
{
model: model("github-copilot", "claude-sonnet-4"),
sdk: { languageModel: fakeSelectorSdk(calls).languageModel },
options: {},
},
{},
)
expect(calls).toEqual(["languageModel:claude-sonnet-4"])
}),
)
it.effect("selects languageModel with the API model ID when responses and chat are absent", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(GithubCopilotPlugin)
yield* plugin.trigger(
"aisdk.language",
{
model: model("github-copilot", "alias", { apiID: ModelV2.ID.make("claude-sonnet-4") }),
sdk: { languageModel: fakeSelectorSdk(calls).languageModel },
options: {},
},
{},
)
expect(calls).toEqual(["languageModel:claude-sonnet-4"])
}),
)
it.effect("uses responses for gpt-5 models except gpt-5-mini", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(GithubCopilotPlugin)
yield* plugin.trigger(
"aisdk.language",
{ model: model("github-copilot", "gpt-5"), sdk: fakeSelectorSdk(calls), options: {} },
{},
)
yield* plugin.trigger(
"aisdk.language",
{ model: model("github-copilot", "gpt-5.1-codex"), sdk: fakeSelectorSdk(calls), options: {} },
{},
)
yield* plugin.trigger(
"aisdk.language",
{ model: model("github-copilot", "gpt-4o"), sdk: fakeSelectorSdk(calls), options: {} },
{},
)
yield* plugin.trigger(
"aisdk.language",
{ model: model("github-copilot", "gpt-5-mini"), sdk: fakeSelectorSdk(calls), options: {} },
{},
)
yield* plugin.trigger(
"aisdk.language",
{ model: model("github-copilot", "gpt-5-mini-2025-08-07"), sdk: fakeSelectorSdk(calls), options: {} },
{},
)
expect(calls).toEqual([
"responses:gpt-5",
"responses:gpt-5.1-codex",
"chat:gpt-4o",
"chat:gpt-5-mini",
"chat:gpt-5-mini-2025-08-07",
])
}),
)
it.effect("uses the API model ID when selecting responses or chat", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(GithubCopilotPlugin)
yield* plugin.trigger(
"aisdk.language",
{
model: model("github-copilot", "default", { apiID: ModelV2.ID.make("gpt-5") }),
sdk: fakeSelectorSdk(calls),
options: {},
},
{},
)
yield* plugin.trigger(
"aisdk.language",
{
model: model("github-copilot", "small", { apiID: ModelV2.ID.make("gpt-5-mini") }),
sdk: fakeSelectorSdk(calls),
options: {},
},
{},
)
yield* plugin.trigger(
"aisdk.language",
{
model: model("github-copilot", "sonnet", { apiID: ModelV2.ID.make("claude-sonnet-4") }),
sdk: fakeSelectorSdk(calls),
options: {},
},
{},
)
expect(calls).toEqual(["responses:gpt-5", "chat:gpt-5-mini", "chat:claude-sonnet-4"])
}),
)
it.effect("filters gpt-5-chat-latest before Copilot language selection", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GithubCopilotPlugin)
const result = yield* plugin.trigger(
"model.update",
{},
{ model: model("github-copilot", "gpt-5-chat-latest"), cancel: false },
)
expect(result.cancel).toBe(true)
}),
)
it.effect("does not filter gpt-5-chat-latest for non-Copilot providers", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GithubCopilotPlugin)
const result = yield* plugin.trigger(
"model.update",
{},
{ model: model("custom-copilot", "gpt-5-chat-latest"), cancel: false },
)
expect(result.cancel).toBe(false)
}),
)
it.effect("ignores non-Copilot providers", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(GithubCopilotPlugin)
const result = yield* plugin.trigger(
"aisdk.language",
{ model: model("openai", "gpt-5"), sdk: fakeSelectorSdk(calls), options: {} },
{},
)
expect(calls).toEqual([])
expect(result.language).toBeUndefined()
}),
)
})

View File

@@ -0,0 +1,199 @@
import { describe, expect } from "bun:test"
import { DateTime, Effect, Layer, Option } from "effect"
import { Catalog } from "@opencode-ai/core/catalog"
import { ModelV2 } from "@opencode-ai/core/model"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { ProviderV2 } from "@opencode-ai/core/provider"
import { testEffect } from "../lib/effect"
const it = testEffect(Catalog.layer.pipe(Layer.provideMerge(PluginV2.defaultLayer)))
describe("CatalogV2", () => {
it.effect("normalizes provider baseURL into endpoint url", () =>
Effect.gen(function* () {
const catalog = yield* Catalog.Service
const providerID = ProviderV2.ID.make("test")
yield* catalog.provider.update(providerID, (provider) => {
provider.endpoint = {
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://default.example.com",
}
provider.options.aisdk.provider.baseURL = "https://override.example.com"
})
const provider = yield* catalog.provider.get(providerID)
expect(provider.endpoint).toEqual({
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://override.example.com",
})
expect(provider.options.aisdk.provider.baseURL).toBeUndefined()
}),
)
it.effect("normalizes model baseURL into endpoint url", () =>
Effect.gen(function* () {
const catalog = yield* Catalog.Service
const providerID = ProviderV2.ID.make("test")
const modelID = ModelV2.ID.make("model")
yield* catalog.provider.update(providerID, (provider) => {
provider.endpoint = {
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://provider.example.com",
}
})
yield* catalog.model.update(providerID, modelID, (model) => {
model.endpoint = {
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://model.example.com",
}
model.options.aisdk.provider.baseURL = "https://override.example.com"
})
const model = yield* catalog.model.get(providerID, modelID)
expect(model.endpoint).toEqual({
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://override.example.com",
})
expect(model.options.aisdk.provider.baseURL).toBeUndefined()
}),
)
it.effect("resolves unknown model endpoint from provider endpoint", () =>
Effect.gen(function* () {
const catalog = yield* Catalog.Service
const providerID = ProviderV2.ID.make("test")
const modelID = ModelV2.ID.make("model")
yield* catalog.provider.update(providerID, (provider) => {
provider.endpoint = {
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://provider.example.com",
}
})
yield* catalog.model.update(providerID, modelID, () => {})
const model = yield* catalog.model.get(providerID, modelID)
expect(model.endpoint).toEqual({
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://provider.example.com",
})
}),
)
it.effect("runs provider hooks after baseURL is normalized", () =>
Effect.gen(function* () {
const catalog = yield* Catalog.Service
const plugin = yield* PluginV2.Service
const providerID = ProviderV2.ID.make("test")
const seen: unknown[] = []
yield* plugin.add({
id: PluginV2.ID.make("test"),
effect: Effect.succeed({
"provider.update": (evt) =>
Effect.sync(() => {
seen.push(evt.provider.endpoint.type)
if (evt.provider.endpoint.type === "aisdk") seen.push(evt.provider.endpoint.url)
seen.push(evt.provider.options.aisdk.provider.baseURL)
}),
}),
})
yield* catalog.provider.update(providerID, (provider) => {
provider.endpoint = {
type: "aisdk",
package: "@ai-sdk/openai-compatible",
}
provider.options.aisdk.provider.baseURL = "https://provider.example.com"
})
expect(seen).toEqual(["aisdk", "https://provider.example.com", undefined])
}),
)
it.effect("resolves provider and model option merges", () =>
Effect.gen(function* () {
const catalog = yield* Catalog.Service
const providerID = ProviderV2.ID.make("test")
const modelID = ModelV2.ID.make("model")
yield* catalog.provider.update(providerID, (provider) => {
provider.options.headers.provider = "provider"
provider.options.headers.shared = "provider"
provider.options.body.provider = true
provider.options.aisdk.provider.provider = true
})
yield* catalog.model.update(providerID, modelID, (model) => {
model.options.headers.model = "model"
model.options.headers.shared = "model"
model.options.body.model = true
model.options.aisdk.provider.model = true
model.options.aisdk.request.request = true
})
const model = yield* catalog.model.get(providerID, modelID)
expect(model.options.headers).toEqual({ provider: "provider", shared: "model", model: "model" })
expect(model.options.body).toEqual({ provider: true, model: true })
expect(model.options.aisdk.provider).toEqual({ provider: true, model: true })
expect(model.options.aisdk.request).toEqual({ request: true })
}),
)
it.effect("falls back to newest available model when no default is configured", () =>
Effect.gen(function* () {
const catalog = yield* Catalog.Service
const providerID = ProviderV2.ID.make("test")
yield* catalog.provider.update(providerID, (provider) => {
provider.enabled = { via: "custom", data: {} }
})
yield* catalog.model.update(providerID, ModelV2.ID.make("old"), (model) => {
model.time.released = DateTime.makeUnsafe(1000)
})
yield* catalog.model.update(providerID, ModelV2.ID.make("new"), (model) => {
model.time.released = DateTime.makeUnsafe(2000)
})
const model = yield* catalog.model.default()
expect(Option.getOrUndefined(model)?.id).toMatch("new")
}),
)
it.effect("small model prefers small keyword candidates before cost scoring", () =>
Effect.gen(function* () {
const catalog = yield* Catalog.Service
const providerID = ProviderV2.ID.make("test")
yield* catalog.provider.update(providerID, () => {})
yield* catalog.model.update(providerID, ModelV2.ID.make("cheap-large"), (model) => {
model.capabilities.input = ["text"]
model.capabilities.output = ["text"]
model.cost = [{ input: 1, output: 1, cache: { read: 0, write: 0 } }]
model.time.released = DateTime.makeUnsafe(Date.now())
})
yield* catalog.model.update(providerID, ModelV2.ID.make("expensive-mini"), (model) => {
model.capabilities.input = ["text"]
model.capabilities.output = ["text"]
model.cost = [{ input: 10, output: 10, cache: { read: 0, write: 0 } }]
model.time.released = DateTime.makeUnsafe(Date.now())
})
const model = yield* catalog.model.small(providerID)
expect(Option.getOrUndefined(model)?.id).toMatch("expensive-mini")
}),
)
})

View File

@@ -0,0 +1,9 @@
export function createFixtureProvider(options: Record<string, unknown>) {
const captured = Object.fromEntries(Object.entries(options))
return Object.assign((modelID: string) => ({ modelID, options: captured }), {
options: captured,
languageModel(modelID: string) {
return { modelID, options: captured }
},
})
}

View File

@@ -0,0 +1,67 @@
import { describe, expect } from "bun:test"
import { createAlibaba } from "@ai-sdk/alibaba"
import { Effect } from "effect"
import { ModelV2 } from "@opencode-ai/core/model"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { AlibabaPlugin } from "@opencode-ai/core/plugin/provider/alibaba"
import { it, model } from "./provider-helper"
describe("AlibabaPlugin", () => {
it.effect("creates an Alibaba SDK for @ai-sdk/alibaba", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AlibabaPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("alibaba", "qwen"), package: "@ai-sdk/alibaba", options: { name: "alibaba" } },
{},
)
expect(result.sdk).toBeDefined()
}),
)
it.effect("ignores non-Alibaba SDK packages", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AlibabaPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("alibaba", "qwen"), package: "@ai-sdk/openai-compatible", options: { name: "alibaba" } },
{},
)
expect(result.sdk).toBeUndefined()
}),
)
it.effect("matches the old bundled Alibaba SDK provider naming", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AlibabaPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom-alibaba", "qwen"),
package: "@ai-sdk/alibaba",
options: { name: "custom-alibaba", apiKey: "test" },
},
{},
)
const expected = createAlibaba({ apiKey: "test", ...{ name: "custom-alibaba" } }).languageModel("qwen")
const actual = result.sdk?.languageModel("qwen")
expect(actual?.provider).toBe(expected.provider)
expect(actual?.modelId).toBe(expected.modelId)
}),
)
it.effect("uses the old default languageModel(apiID) behavior", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AlibabaPlugin)
const item = model("alibaba", "alias", { apiID: ModelV2.ID.make("qwen-plus") })
const result = yield* plugin.trigger("aisdk.sdk", { model: item, package: "@ai-sdk/alibaba", options: {} }, {})
const language = result.sdk?.languageModel(item.apiID)
expect(language?.modelId).toBe("qwen-plus")
expect(language?.provider).toBe("alibaba.chat")
}),
)
})

View File

@@ -0,0 +1,464 @@
import { describe, expect } from "bun:test"
import { Effect } from "effect"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { AmazonBedrockPlugin } from "@opencode-ai/core/plugin/provider/amazon-bedrock"
import { fakeSelectorSdk, it, model, provider, withEnv } from "./provider-helper"
function bedrockBaseURL(sdk: unknown, modelID = "anthropic.claude-sonnet-4-5") {
const language = (sdk as { languageModel: (id: string) => unknown }).languageModel(modelID)
return (language as { config: { baseUrl: () => string } }).config.baseUrl()
}
function bedrockFetch(sdk: unknown, modelID = "anthropic.claude-sonnet-4-5") {
const language = (sdk as { languageModel: (id: string) => unknown }).languageModel(modelID)
return (language as { config: { fetch: (input: Parameters<typeof fetch>[0], init?: RequestInit) => Promise<Response> } }).config
.fetch
}
describe("AmazonBedrockPlugin", () => {
it.effect("moves endpoint option to endpoint URL", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AmazonBedrockPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("amazon-bedrock", {
options: {
headers: {},
body: {},
aisdk: { provider: { endpoint: "https://bedrock.example" }, request: {} },
},
}),
cancel: false,
},
)
expect(result.provider.endpoint).toEqual({
type: "aisdk",
package: "test-provider",
url: "https://bedrock.example",
})
expect(result.provider.options.aisdk.provider.endpoint).toBeUndefined()
}),
)
it.effect("prefers endpoint over baseURL for SDK base URL", () =>
withEnv({ AWS_BEARER_TOKEN_BEDROCK: undefined, AWS_PROFILE: undefined, AWS_ACCESS_KEY_ID: undefined }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AmazonBedrockPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
package: "@ai-sdk/amazon-bedrock",
options: {
name: "amazon-bedrock",
bearerToken: "token",
baseURL: "https://base.example",
endpoint: "https://endpoint.example",
region: "us-east-1",
},
},
{},
)
expect(bedrockBaseURL(result.sdk)).toBe("https://endpoint.example")
}),
),
)
it.effect("uses baseURL as SDK base URL", () =>
withEnv({ AWS_BEARER_TOKEN_BEDROCK: undefined, AWS_PROFILE: undefined, AWS_ACCESS_KEY_ID: undefined }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AmazonBedrockPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
package: "@ai-sdk/amazon-bedrock",
options: {
name: "amazon-bedrock",
bearerToken: "token",
baseURL: "https://base.example",
region: "us-east-1",
},
},
{},
)
expect(bedrockBaseURL(result.sdk)).toBe("https://base.example")
}),
),
)
it.effect("creates SDK without explicit credential env so the default AWS chain can resolve credentials", () =>
withEnv(
{
AWS_ACCESS_KEY_ID: undefined,
AWS_BEARER_TOKEN_BEDROCK: undefined,
AWS_CONTAINER_CREDENTIALS_FULL_URI: undefined,
AWS_CONTAINER_CREDENTIALS_RELATIVE_URI: undefined,
AWS_PROFILE: undefined,
AWS_REGION: undefined,
AWS_WEB_IDENTITY_TOKEN_FILE: undefined,
},
() =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AmazonBedrockPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
package: "@ai-sdk/amazon-bedrock",
options: { name: "amazon-bedrock" },
},
{},
)
expect(result.sdk).toBeDefined()
expect(bedrockBaseURL(result.sdk)).toBe("https://bedrock-runtime.us-east-1.amazonaws.com")
}),
),
)
it.effect("uses config region over AWS_REGION for SDK base URL", () =>
withEnv({ AWS_BEARER_TOKEN_BEDROCK: "token", AWS_REGION: "us-east-1" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AmazonBedrockPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
package: "@ai-sdk/amazon-bedrock",
options: { name: "amazon-bedrock", region: "eu-west-1" },
},
{},
)
expect(bedrockBaseURL(result.sdk)).toBe("https://bedrock-runtime.eu-west-1.amazonaws.com")
}),
),
)
it.effect("uses AWS_REGION for SDK base URL when config region is absent", () =>
withEnv({ AWS_BEARER_TOKEN_BEDROCK: "token", AWS_REGION: "eu-west-1" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AmazonBedrockPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
package: "@ai-sdk/amazon-bedrock",
options: { name: "amazon-bedrock" },
},
{},
)
expect(bedrockBaseURL(result.sdk)).toBe("https://bedrock-runtime.eu-west-1.amazonaws.com")
}),
),
)
it.effect("defaults SDK region to us-east-1", () =>
withEnv({ AWS_BEARER_TOKEN_BEDROCK: "token", AWS_REGION: undefined }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AmazonBedrockPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
package: "@ai-sdk/amazon-bedrock",
options: { name: "amazon-bedrock" },
},
{},
)
expect(bedrockBaseURL(result.sdk)).toBe("https://bedrock-runtime.us-east-1.amazonaws.com")
}),
),
)
it.effect("loads bearer token option into env and uses bearer auth", () =>
withEnv({ AWS_ACCESS_KEY_ID: undefined, AWS_BEARER_TOKEN_BEDROCK: undefined, AWS_PROFILE: undefined }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const headers: Array<string | null> = []
yield* plugin.add(AmazonBedrockPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
package: "@ai-sdk/amazon-bedrock",
options: {
name: "amazon-bedrock",
bearerToken: "option-token",
fetch: async (_input: Parameters<typeof fetch>[0], init?: RequestInit) => {
headers.push(new Headers(init?.headers).get("Authorization"))
return new Response("{}")
},
},
},
{},
)
yield* Effect.promise(() => bedrockFetch(result.sdk)("https://bedrock.example", { method: "POST" }))
expect(process.env.AWS_BEARER_TOKEN_BEDROCK).toBe("option-token")
expect(headers).toEqual(["Bearer option-token"])
}),
),
)
it.effect("prefers bearer token env over bearer token option", () =>
withEnv({ AWS_BEARER_TOKEN_BEDROCK: "env-token" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const headers: Array<string | null> = []
yield* plugin.add(AmazonBedrockPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
package: "@ai-sdk/amazon-bedrock",
options: {
name: "amazon-bedrock",
bearerToken: "option-token",
fetch: async (_input: Parameters<typeof fetch>[0], init?: RequestInit) => {
headers.push(new Headers(init?.headers).get("Authorization"))
return new Response("{}")
},
},
},
{},
)
yield* Effect.promise(() => bedrockFetch(result.sdk)("https://bedrock.example", { method: "POST" }))
expect(process.env.AWS_BEARER_TOKEN_BEDROCK).toBe("env-token")
expect(headers).toEqual(["Bearer env-token"])
}),
),
)
it.effect("uses SigV4 credential env when bearer token is absent", () =>
withEnv(
{
AWS_ACCESS_KEY_ID: "test-access-key",
AWS_BEARER_TOKEN_BEDROCK: undefined,
AWS_REGION: "us-east-1",
AWS_SECRET_ACCESS_KEY: "test-secret-key",
AWS_SESSION_TOKEN: "test-session-token",
},
() =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const headers: Array<string | null> = []
yield* plugin.add(AmazonBedrockPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
package: "@ai-sdk/amazon-bedrock",
options: {
name: "amazon-bedrock",
fetch: async (_input: Parameters<typeof fetch>[0], init?: RequestInit) => {
headers.push(new Headers(init?.headers).get("Authorization"))
return new Response("{}")
},
},
},
{},
)
yield* Effect.promise(() =>
bedrockFetch(result.sdk)("https://bedrock-runtime.us-east-1.amazonaws.com/model/test/invoke", {
body: "{}",
method: "POST",
}),
)
expect(headers[0]?.startsWith("AWS4-HMAC-SHA256 ")).toBe(true)
}),
),
)
it.effect("applies legacy cross-region inference prefixes", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(AmazonBedrockPlugin)
yield* plugin.trigger(
"aisdk.language",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
sdk: { languageModel: fakeSelectorSdk(calls).languageModel },
options: {},
},
{},
)
yield* plugin.trigger(
"aisdk.language",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
sdk: { languageModel: fakeSelectorSdk(calls).languageModel },
options: { region: "eu-west-1" },
},
{},
)
yield* plugin.trigger(
"aisdk.language",
{
model: model("amazon-bedrock", "global.anthropic.claude-sonnet-4-5"),
sdk: { languageModel: fakeSelectorSdk(calls).languageModel },
options: { region: "eu-west-1" },
},
{},
)
yield* plugin.trigger(
"aisdk.language",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
sdk: { languageModel: fakeSelectorSdk(calls).languageModel },
options: { region: "ap-northeast-1" },
},
{},
)
yield* plugin.trigger(
"aisdk.language",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
sdk: { languageModel: fakeSelectorSdk(calls).languageModel },
options: { region: "ap-southeast-2" },
},
{},
)
expect(calls).toEqual([
"languageModel:us.anthropic.claude-sonnet-4-5",
"languageModel:eu.anthropic.claude-sonnet-4-5",
"languageModel:global.anthropic.claude-sonnet-4-5",
"languageModel:jp.anthropic.claude-sonnet-4-5",
"languageModel:au.anthropic.claude-sonnet-4-5",
])
}),
)
it.effect("uses AWS_REGION for language prefixes when region option is absent", () =>
withEnv({ AWS_REGION: "eu-west-1" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(AmazonBedrockPlugin)
yield* plugin.trigger(
"aisdk.language",
{
model: model("amazon-bedrock", "anthropic.claude-sonnet-4-5"),
sdk: { languageModel: fakeSelectorSdk(calls).languageModel },
options: {},
},
{},
)
expect(calls).toEqual(["languageModel:eu.anthropic.claude-sonnet-4-5"])
}),
),
)
it.effect("applies the full legacy cross-region prefix matrix", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
const cases = [
{ region: "us-east-1", modelID: "amazon.nova-micro-v1:0", expected: "us.amazon.nova-micro-v1:0" },
{ region: "us-east-1", modelID: "amazon.nova-lite-v1:0", expected: "us.amazon.nova-lite-v1:0" },
{ region: "us-east-1", modelID: "amazon.nova-pro-v1:0", expected: "us.amazon.nova-pro-v1:0" },
{ region: "us-east-1", modelID: "amazon.nova-premier-v1:0", expected: "us.amazon.nova-premier-v1:0" },
{ region: "us-east-1", modelID: "amazon.nova-2-lite-v1:0", expected: "us.amazon.nova-2-lite-v1:0" },
{ region: "us-east-1", modelID: "anthropic.claude-sonnet-4-5", expected: "us.anthropic.claude-sonnet-4-5" },
{ region: "us-east-1", modelID: "deepseek.r1-v1:0", expected: "us.deepseek.r1-v1:0" },
{ region: "us-gov-west-1", modelID: "anthropic.claude-sonnet-4-5", expected: "anthropic.claude-sonnet-4-5" },
{ region: "us-east-1", modelID: "cohere.command-r-plus-v1:0", expected: "cohere.command-r-plus-v1:0" },
{ region: "eu-west-1", modelID: "anthropic.claude-sonnet-4-5", expected: "eu.anthropic.claude-sonnet-4-5" },
{ region: "eu-west-2", modelID: "amazon.nova-lite-v1:0", expected: "eu.amazon.nova-lite-v1:0" },
{ region: "eu-west-3", modelID: "amazon.nova-micro-v1:0", expected: "eu.amazon.nova-micro-v1:0" },
{
region: "eu-north-1",
modelID: "meta.llama3-70b-instruct-v1:0",
expected: "eu.meta.llama3-70b-instruct-v1:0",
},
{ region: "eu-central-1", modelID: "mistral.pixtral-large-v1:0", expected: "eu.mistral.pixtral-large-v1:0" },
{ region: "eu-south-1", modelID: "anthropic.claude-sonnet-4-5", expected: "eu.anthropic.claude-sonnet-4-5" },
{ region: "eu-south-2", modelID: "anthropic.claude-sonnet-4-5", expected: "eu.anthropic.claude-sonnet-4-5" },
{ region: "eu-central-2", modelID: "anthropic.claude-sonnet-4-5", expected: "anthropic.claude-sonnet-4-5" },
{ region: "eu-west-1", modelID: "cohere.command-r-plus-v1:0", expected: "cohere.command-r-plus-v1:0" },
{
region: "ap-southeast-2",
modelID: "anthropic.claude-sonnet-4-5",
expected: "au.anthropic.claude-sonnet-4-5",
},
{
region: "ap-southeast-4",
modelID: "anthropic.claude-haiku-v1:0",
expected: "au.anthropic.claude-haiku-v1:0",
},
{ region: "ap-southeast-2", modelID: "anthropic.claude-opus-4", expected: "apac.anthropic.claude-opus-4" },
{
region: "ap-northeast-1",
modelID: "anthropic.claude-sonnet-4-5",
expected: "jp.anthropic.claude-sonnet-4-5",
},
{ region: "ap-northeast-1", modelID: "amazon.nova-pro-v1:0", expected: "jp.amazon.nova-pro-v1:0" },
{ region: "ap-south-1", modelID: "anthropic.claude-sonnet-4-5", expected: "apac.anthropic.claude-sonnet-4-5" },
{ region: "ap-south-1", modelID: "amazon.nova-lite-v1:0", expected: "apac.amazon.nova-lite-v1:0" },
{ region: "ca-central-1", modelID: "anthropic.claude-sonnet-4-5", expected: "anthropic.claude-sonnet-4-5" },
{
region: "us-east-1",
modelID: "global.anthropic.claude-sonnet-4-5",
expected: "global.anthropic.claude-sonnet-4-5",
},
{ region: "us-east-1", modelID: "us.anthropic.claude-sonnet-4-5", expected: "us.anthropic.claude-sonnet-4-5" },
{ region: "eu-west-1", modelID: "eu.anthropic.claude-sonnet-4-5", expected: "eu.anthropic.claude-sonnet-4-5" },
{
region: "ap-northeast-1",
modelID: "jp.anthropic.claude-sonnet-4-5",
expected: "jp.anthropic.claude-sonnet-4-5",
},
{
region: "ap-south-1",
modelID: "apac.anthropic.claude-sonnet-4-5",
expected: "apac.anthropic.claude-sonnet-4-5",
},
{
region: "ap-southeast-2",
modelID: "au.anthropic.claude-sonnet-4-5",
expected: "au.anthropic.claude-sonnet-4-5",
},
]
yield* plugin.add(AmazonBedrockPlugin)
for (const item of cases) {
yield* plugin.trigger(
"aisdk.language",
{
model: model("amazon-bedrock", item.modelID),
sdk: { languageModel: fakeSelectorSdk(calls).languageModel },
options: { region: item.region },
},
{},
)
}
expect(calls).toEqual(cases.map((item) => `languageModel:${item.expected}`))
}),
)
it.effect("ignores non-Bedrock providers for language selection", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(AmazonBedrockPlugin)
const result = yield* plugin.trigger(
"aisdk.language",
{
model: model("openai", "anthropic.claude-sonnet-4-5"),
sdk: { languageModel: fakeSelectorSdk(calls).languageModel },
options: { region: "eu-west-1" },
},
{},
)
expect(calls).toEqual([])
expect(result.language).toBeUndefined()
}),
)
})

View File

@@ -0,0 +1,91 @@
import { describe, expect } from "bun:test"
import { Effect } from "effect"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { AnthropicPlugin } from "@opencode-ai/core/plugin/provider/anthropic"
import { it, model, provider } from "./provider-helper"
describe("AnthropicPlugin", () => {
it.effect("applies legacy beta headers", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AnthropicPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("anthropic", {
options: { headers: { Existing: "1" }, body: {}, aisdk: { provider: {}, request: {} } },
}),
cancel: false,
},
)
expect(result.provider.options.headers["anthropic-beta"]).toBe(
"interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14",
)
expect(result.provider.options.headers.Existing).toBe("1")
}),
)
it.effect("ignores non-Anthropic providers", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AnthropicPlugin)
const result = yield* plugin.trigger("provider.update", {}, { provider: provider("openai"), cancel: false })
expect(result.provider.options.headers["anthropic-beta"]).toBeUndefined()
}),
)
it.effect("creates Anthropic SDKs with the model provider ID as the SDK name", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const providers: string[] = []
yield* plugin.add(AnthropicPlugin)
yield* plugin.add({
id: PluginV2.ID.make("anthropic-sdk-inspector"),
effect: Effect.succeed({
"aisdk.sdk": (evt) =>
Effect.sync(() => {
providers.push(evt.sdk.languageModel("claude-sonnet-4-5").provider)
}),
}),
})
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom-anthropic", "claude-sonnet-4-5"),
package: "@ai-sdk/anthropic",
options: { name: "custom-anthropic", apiKey: "test" },
},
{},
)
expect(providers).toEqual(["custom-anthropic"])
}),
)
it.effect("uses the Anthropic provider ID as the SDK name for the bundled Anthropic provider", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const providers: string[] = []
yield* plugin.add(AnthropicPlugin)
yield* plugin.add({
id: PluginV2.ID.make("anthropic-sdk-inspector"),
effect: Effect.succeed({
"aisdk.sdk": (evt) =>
Effect.sync(() => {
providers.push(evt.sdk.languageModel("claude-sonnet-4-5").provider)
}),
}),
})
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("anthropic", "claude-sonnet-4-5"),
package: "@ai-sdk/anthropic",
options: { name: "anthropic", apiKey: "test" },
},
{},
)
expect(providers).toEqual(["anthropic"])
}),
)
})

View File

@@ -0,0 +1,127 @@
import { describe, expect } from "bun:test"
import { Effect } from "effect"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { AzureCognitiveServicesPlugin } from "@opencode-ai/core/plugin/provider/azure"
import { fakeSelectorSdk, it, model, provider, withEnv } from "./provider-helper"
describe("AzureCognitiveServicesPlugin", () => {
it.effect("maps the resource env var to the Azure SDK baseURL", () =>
withEnv({ AZURE_COGNITIVE_SERVICES_RESOURCE_NAME: "cognitive" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AzureCognitiveServicesPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{ provider: provider("azure-cognitive-services"), cancel: false },
)
expect(result.provider.endpoint).toEqual({
type: "aisdk",
package: "test-provider",
})
expect(result.provider.options.aisdk.provider.baseURL).toBe(
"https://cognitive.cognitiveservices.azure.com/openai",
)
expect(result.provider.options.aisdk.provider.resourceName).toBeUndefined()
}),
),
)
it.effect("leaves baseURL unset without resource env and ignores other providers", () =>
withEnv({ AZURE_COGNITIVE_SERVICES_RESOURCE_NAME: undefined }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AzureCognitiveServicesPlugin)
const azure = yield* plugin.trigger(
"provider.update",
{},
{ provider: provider("azure-cognitive-services"), cancel: false },
)
const other = yield* plugin.trigger("provider.update", {}, { provider: provider("openai"), cancel: false })
expect(azure.provider.options.aisdk.provider.baseURL).toBeUndefined()
expect(azure.provider.endpoint).toEqual({ type: "aisdk", package: "test-provider" })
expect(other.provider.options.aisdk.provider.baseURL).toBeUndefined()
expect(other.provider.endpoint).toEqual({ type: "aisdk", package: "test-provider" })
}),
),
)
it.effect("selects chat only for completion URLs", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(AzureCognitiveServicesPlugin)
yield* plugin.trigger(
"aisdk.language",
{
model: model("azure-cognitive-services", "deployment"),
sdk: fakeSelectorSdk(calls),
options: { useCompletionUrls: true },
},
{},
)
expect(calls).toEqual(["chat:deployment"])
}),
)
it.effect("uses the legacy Azure selector order and provider guard", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(AzureCognitiveServicesPlugin)
yield* plugin.trigger(
"aisdk.language",
{ model: model("azure-cognitive-services", "deployment"), sdk: fakeSelectorSdk(calls), options: {} },
{},
)
const ignored = yield* plugin.trigger(
"aisdk.language",
{ model: model("openai", "deployment"), sdk: fakeSelectorSdk(calls), options: {} },
{},
)
expect(calls).toEqual(["responses:deployment"])
expect(ignored.language).toBeUndefined()
}),
)
it.effect("falls back from responses to messages, chat, then languageModel", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
const sdk = fakeSelectorSdk(calls)
yield* plugin.add(AzureCognitiveServicesPlugin)
yield* plugin.trigger(
"aisdk.language",
{
model: model("azure-cognitive-services", "messages-deployment"),
sdk: { messages: sdk.messages, chat: sdk.chat, languageModel: sdk.languageModel },
options: {},
},
{},
)
yield* plugin.trigger(
"aisdk.language",
{
model: model("azure-cognitive-services", "chat-deployment"),
sdk: { chat: sdk.chat, languageModel: sdk.languageModel },
options: {},
},
{},
)
yield* plugin.trigger(
"aisdk.language",
{
model: model("azure-cognitive-services", "language-deployment"),
sdk: { languageModel: sdk.languageModel },
options: {},
},
{},
)
expect(calls).toEqual([
"messages:messages-deployment",
"chat:chat-deployment",
"languageModel:language-deployment",
])
}),
)
})

View File

@@ -0,0 +1,245 @@
import { describe, expect } from "bun:test"
import { Effect, Layer } from "effect"
import { AuthV2 } from "@opencode-ai/core/auth"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { AuthPlugin } from "@opencode-ai/core/plugin/auth"
import { AzurePlugin } from "@opencode-ai/core/plugin/provider/azure"
import { testEffect } from "../../lib/effect"
import { fakeSelectorSdk, it, model, npmLayer, provider, withEnv } from "./provider-helper"
const itWithAuth = testEffect(Layer.mergeAll(PluginV2.defaultLayer, AuthV2.defaultLayer, npmLayer))
describe("AzurePlugin", () => {
it.effect("resolves resourceName from env", () =>
withEnv({ AZURE_RESOURCE_NAME: "from-env" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AzurePlugin)
const result = yield* plugin.trigger("provider.update", {}, { provider: provider("azure"), cancel: false })
expect(result.provider.options.aisdk.provider.resourceName).toBe("from-env")
}),
),
)
it.effect("keeps explicit resourceName over env and ignores other providers", () =>
withEnv({ AZURE_RESOURCE_NAME: "from-env" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AzurePlugin)
const azure = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("azure", {
options: { headers: {}, body: {}, aisdk: { provider: { resourceName: "from-config" }, request: {} } },
}),
cancel: false,
},
)
const other = yield* plugin.trigger("provider.update", {}, { provider: provider("openai"), cancel: false })
expect(azure.provider.options.aisdk.provider.resourceName).toBe("from-config")
expect(other.provider.options.aisdk.provider.resourceName).toBeUndefined()
}),
),
)
itWithAuth.effect("prefers auth resourceName over env", () =>
withEnv(
{
AZURE_RESOURCE_NAME: "from-env",
},
() =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const auth = yield* AuthV2.Service
yield* auth.create({
serviceID: AuthV2.ServiceID.make("azure"),
credential: new AuthV2.ApiKeyCredential({
type: "api",
key: "key",
metadata: { resourceName: "from-auth" },
}),
active: true,
})
yield* plugin.add({
...AuthPlugin,
effect: AuthPlugin.effect.pipe(Effect.provideService(AuthV2.Service, auth)),
})
yield* plugin.add(AzurePlugin)
const result = yield* plugin.trigger("provider.update", {}, { provider: provider("azure"), cancel: false })
expect(result.provider.options.aisdk.provider.resourceName).toBe("from-auth")
}),
),
)
it.effect("falls back to env when configured resourceName is blank", () =>
withEnv({ AZURE_RESOURCE_NAME: "from-env" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AzurePlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("azure", {
options: { headers: {}, body: {}, aisdk: { provider: { resourceName: "" }, request: {} } },
}),
cancel: false,
},
)
expect(result.provider.options.aisdk.provider.resourceName).toBe("from-env")
}),
),
)
it.effect("falls back to env when configured resourceName is whitespace", () =>
withEnv({ AZURE_RESOURCE_NAME: "from-env" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AzurePlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("azure", {
options: { headers: {}, body: {}, aisdk: { provider: { resourceName: " " }, request: {} } },
}),
cancel: false,
},
)
expect(result.provider.options.aisdk.provider.resourceName).toBe("from-env")
}),
),
)
it.effect("allows configured baseURL without resourceName", () =>
withEnv({ AZURE_RESOURCE_NAME: undefined }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AzurePlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("azure", "deployment"),
package: "@ai-sdk/azure",
options: { name: "azure", baseURL: "https://proxy.example.com/openai" },
},
{},
)
expect(result.sdk).toBeDefined()
}),
),
)
it.effect("rejects missing resourceName when baseURL is not configured", () =>
withEnv({ AZURE_RESOURCE_NAME: undefined }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(AzurePlugin)
const exit = yield* plugin
.trigger(
"aisdk.sdk",
{ model: model("azure", "deployment"), package: "@ai-sdk/azure", options: { name: "azure" } },
{},
)
.pipe(Effect.exit)
expect(exit._tag).toBe("Failure")
}),
),
)
it.effect("selects chat only for completion URLs", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(AzurePlugin)
yield* plugin.trigger(
"aisdk.language",
{ model: model("azure", "deployment"), sdk: fakeSelectorSdk(calls), options: { useCompletionUrls: true } },
{},
)
expect(calls).toEqual(["chat:deployment"])
}),
)
it.effect("selects chat from per-call useCompletionUrls", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(AzurePlugin)
yield* plugin.trigger(
"aisdk.language",
{ model: model("azure", "deployment"), sdk: fakeSelectorSdk(calls), options: { useCompletionUrls: true } },
{},
)
expect(calls).toEqual(["chat:deployment"])
}),
)
it.effect("ignores model useCompletionUrls when per-call option is unset", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(AzurePlugin)
yield* plugin.trigger(
"aisdk.language",
{
model: model("azure", "deployment", {
options: { headers: {}, body: {}, aisdk: { provider: {}, request: { useCompletionUrls: true } } },
}),
sdk: fakeSelectorSdk(calls),
options: {},
},
{},
)
expect(calls).toEqual(["responses:deployment"])
}),
)
it.effect("uses the legacy Azure selector order and provider guard", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(AzurePlugin)
yield* plugin.trigger(
"aisdk.language",
{ model: model("azure", "deployment"), sdk: fakeSelectorSdk(calls), options: {} },
{},
)
const ignored = yield* plugin.trigger(
"aisdk.language",
{ model: model("openai", "deployment"), sdk: fakeSelectorSdk(calls), options: {} },
{},
)
expect(calls).toEqual(["responses:deployment"])
expect(ignored.language).toBeUndefined()
}),
)
it.effect("falls back through the legacy Azure selector order", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
const make = (method: string) => (id: string) => {
calls.push(`${method}:${id}`)
return { modelId: id, provider: method, specificationVersion: "v3" }
}
yield* plugin.add(AzurePlugin)
yield* plugin.trigger(
"aisdk.language",
{
model: model("azure", "messages-deployment"),
sdk: { messages: make("messages"), chat: make("chat"), languageModel: make("languageModel") },
options: {},
},
{},
)
yield* plugin.trigger(
"aisdk.language",
{ model: model("azure", "language-deployment"), sdk: { languageModel: make("languageModel") }, options: {} },
{},
)
expect(calls).toEqual(["messages:messages-deployment", "languageModel:language-deployment"])
}),
)
})

View File

@@ -0,0 +1,102 @@
import { describe, expect, mock } from "bun:test"
import { Effect } from "effect"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { CerebrasPlugin } from "@opencode-ai/core/plugin/provider/cerebras"
import { it, model, provider } from "./provider-helper"
const cerebrasOptions: Record<string, unknown>[] = []
void mock.module("@ai-sdk/cerebras", () => ({
createCerebras: (options: Record<string, unknown>) => {
const snapshot = { ...options }
cerebrasOptions.push(snapshot)
return {
languageModel: (modelID: string) => ({ modelID, provider: snapshot.name, specificationVersion: "v3" }),
}
},
}))
describe("CerebrasPlugin", () => {
it.effect("applies the legacy integration header", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(CerebrasPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("cerebras", {
options: { headers: { Existing: "1" }, body: {}, aisdk: { provider: {}, request: {} } },
}),
cancel: false,
},
)
expect(result.provider.options.headers).toEqual({ Existing: "1", "X-Cerebras-3rd-Party-Integration": "opencode" })
}),
)
it.effect("ignores non-Cerebras providers", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(CerebrasPlugin)
const result = yield* plugin.trigger("provider.update", {}, { provider: provider("groq"), cancel: false })
expect(result.provider.options.headers).toEqual({})
}),
)
it.effect("creates a bundled Cerebras SDK with the model provider ID as the SDK name", () =>
Effect.gen(function* () {
cerebrasOptions.length = 0
const plugin = yield* PluginV2.Service
yield* plugin.add(CerebrasPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom-cerebras", "llama-4-scout-17b-16e-instruct"),
package: "@ai-sdk/cerebras",
options: { name: "custom-cerebras", apiKey: "test" },
},
{},
)
expect(cerebrasOptions).toEqual([{ name: "custom-cerebras", apiKey: "test" }])
expect(result.sdk.languageModel("llama-4-scout-17b-16e-instruct").provider).toBe("custom-cerebras")
}),
)
it.effect("preserves an explicit bundled Cerebras SDK name option", () =>
Effect.gen(function* () {
cerebrasOptions.length = 0
const plugin = yield* PluginV2.Service
yield* plugin.add(CerebrasPlugin)
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom-cerebras", "llama-4-scout-17b-16e-instruct"),
package: "@ai-sdk/cerebras",
options: { name: "configured-cerebras", apiKey: "test" },
},
{},
)
expect(cerebrasOptions).toEqual([{ name: "configured-cerebras", apiKey: "test" }])
}),
)
it.effect("ignores non-Cerebras SDK packages", () =>
Effect.gen(function* () {
cerebrasOptions.length = 0
const plugin = yield* PluginV2.Service
yield* plugin.add(CerebrasPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom-cerebras", "llama-4-scout-17b-16e-instruct"),
package: "@ai-sdk/groq",
options: { name: "custom-cerebras", apiKey: "test" },
},
{},
)
expect(cerebrasOptions).toEqual([])
expect(result.sdk).toBeUndefined()
}),
)
})

View File

@@ -0,0 +1,384 @@
import { describe, expect, mock } from "bun:test"
import { Effect } from "effect"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { CloudflareAIGatewayPlugin } from "@opencode-ai/core/plugin/provider/cloudflare-ai-gateway"
import { it, model, withEnv } from "./provider-helper"
const aiGatewayCalls: Record<string, unknown>[] = []
const unifiedCalls: string[] = []
const gatewayModelCalls: unknown[] = []
function captureAiGatewayOptions(options: Record<string, unknown>) {
const nested =
options.options && typeof options.options === "object" ? (options.options as Record<string, unknown>) : undefined
return {
...options,
...(nested
? {
options: {
...nested,
headers:
nested.headers && typeof nested.headers === "object"
? { ...(nested.headers as Record<string, unknown>) }
: nested.headers,
},
}
: {}),
}
}
function resetCalls() {
aiGatewayCalls.length = 0
unifiedCalls.length = 0
gatewayModelCalls.length = 0
}
function cloudflareEnv(overrides: Record<string, string | undefined> = {}) {
return {
CLOUDFLARE_ACCOUNT_ID: "env-account",
CLOUDFLARE_GATEWAY_ID: "env-gateway",
CLOUDFLARE_API_TOKEN: "env-token",
CF_AIG_TOKEN: undefined,
...overrides,
}
}
mock.module("ai-gateway-provider", () => ({
createAiGateway(options: Record<string, unknown>) {
aiGatewayCalls.push(captureAiGatewayOptions(options))
return (input: unknown) => {
gatewayModelCalls.push(input)
return {
modelId: input,
provider: "cloudflare-ai-gateway",
specificationVersion: "v3",
}
}
},
}))
mock.module("ai-gateway-provider/providers/unified", () => ({
createUnified() {
return (modelID: string) => {
unifiedCalls.push(modelID)
return { unifiedModelID: modelID }
}
},
}))
describe("CloudflareAIGatewayPlugin", () => {
it.effect("requires account, gateway, and token before creating the unified SDK", () =>
withEnv(
{
CLOUDFLARE_ACCOUNT_ID: "acct",
CLOUDFLARE_GATEWAY_ID: "gateway",
CLOUDFLARE_API_TOKEN: "token",
CF_AIG_TOKEN: undefined,
},
() =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareAIGatewayPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-ai-gateway", "openai/gpt-5"),
package: "ai-gateway-provider",
options: { name: "cloudflare-ai-gateway" },
},
{},
)
expect(result.sdk.languageModel("openai/gpt-5")).toBeDefined()
}),
),
)
it.effect("passes legacy metadata, cache, log, and User-Agent values under the AI Gateway options key", () =>
withEnv(cloudflareEnv(), () =>
Effect.gen(function* () {
resetCalls()
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareAIGatewayPlugin)
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-ai-gateway", "openai/gpt-5"),
package: "ai-gateway-provider",
options: {
name: "cloudflare-ai-gateway",
metadata: { invoked_by: "test", project: "opencode" },
cacheTtl: 300,
cacheKey: "cache-key",
skipCache: true,
collectLog: false,
},
},
{},
)
expect(aiGatewayCalls).toHaveLength(1)
expect(aiGatewayCalls[0]).toEqual({
accountId: "env-account",
gateway: "env-gateway",
apiKey: "env-token",
options: {
metadata: { invoked_by: "test", project: "opencode" },
cacheTtl: 300,
cacheKey: "cache-key",
skipCache: true,
collectLog: false,
headers: {
"User-Agent": expect.stringContaining("opencode/"),
},
},
})
}),
),
)
it.effect("parses legacy cf-aig-metadata header when metadata option is absent", () =>
withEnv(cloudflareEnv(), () =>
Effect.gen(function* () {
resetCalls()
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareAIGatewayPlugin)
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-ai-gateway", "openai/gpt-5"),
package: "ai-gateway-provider",
options: {
name: "cloudflare-ai-gateway",
headers: {
"cf-aig-metadata": JSON.stringify({ invoked_by: "header", project: "opencode" }),
},
},
},
{},
)
expect(aiGatewayCalls[0]?.options).toMatchObject({
metadata: { invoked_by: "header", project: "opencode" },
})
}),
),
)
it.effect("prefers Cloudflare env values over auth/config-derived options", () =>
withEnv(cloudflareEnv(), () =>
Effect.gen(function* () {
resetCalls()
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareAIGatewayPlugin)
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-ai-gateway", "openai/gpt-5"),
package: "ai-gateway-provider",
options: {
name: "cloudflare-ai-gateway",
accountId: "auth-account",
gateway: "auth-gateway",
apiKey: "auth-token",
},
},
{},
)
expect(aiGatewayCalls[0]).toMatchObject({
accountId: "env-account",
gateway: "env-gateway",
apiKey: "env-token",
})
}),
),
)
it.effect("accepts gatewayId metadata copied from auth into provider options", () =>
withEnv(
cloudflareEnv({
CLOUDFLARE_ACCOUNT_ID: undefined,
CLOUDFLARE_GATEWAY_ID: undefined,
CLOUDFLARE_API_TOKEN: undefined,
}),
() =>
Effect.gen(function* () {
resetCalls()
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareAIGatewayPlugin)
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-ai-gateway", "openai/gpt-5"),
package: "ai-gateway-provider",
options: {
name: "cloudflare-ai-gateway",
accountId: "auth-account",
gatewayId: "auth-gateway",
apiKey: "auth-token",
},
},
{},
)
expect(aiGatewayCalls[0]).toMatchObject({
accountId: "auth-account",
gateway: "auth-gateway",
apiKey: "auth-token",
})
}),
),
)
it.effect("falls back to CF_AIG_TOKEN when CLOUDFLARE_API_TOKEN is unset", () =>
withEnv(cloudflareEnv({ CLOUDFLARE_API_TOKEN: undefined, CF_AIG_TOKEN: "cf-aig-token" }), () =>
Effect.gen(function* () {
resetCalls()
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareAIGatewayPlugin)
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-ai-gateway", "openai/gpt-5"),
package: "ai-gateway-provider",
options: { name: "cloudflare-ai-gateway" },
},
{},
)
expect(aiGatewayCalls[0]).toMatchObject({ apiKey: "cf-aig-token" })
}),
),
)
it.effect("does not create an SDK when account and gateway IDs are missing", () =>
withEnv(cloudflareEnv({ CLOUDFLARE_ACCOUNT_ID: undefined, CLOUDFLARE_GATEWAY_ID: undefined }), () =>
Effect.gen(function* () {
resetCalls()
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareAIGatewayPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-ai-gateway", "openai/gpt-5"),
package: "ai-gateway-provider",
options: { name: "cloudflare-ai-gateway" },
},
{},
)
expect(result.sdk).toBeUndefined()
expect(aiGatewayCalls).toHaveLength(0)
}),
),
)
it.effect("does not create an SDK when the token is missing", () =>
withEnv(cloudflareEnv({ CLOUDFLARE_API_TOKEN: undefined, CF_AIG_TOKEN: undefined }), () =>
Effect.gen(function* () {
resetCalls()
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareAIGatewayPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-ai-gateway", "openai/gpt-5"),
package: "ai-gateway-provider",
options: { name: "cloudflare-ai-gateway" },
},
{},
)
expect(result.sdk).toBeUndefined()
expect(aiGatewayCalls).toHaveLength(0)
}),
),
)
it.effect("does not replace a configured baseURL with the Cloudflare AI Gateway SDK", () =>
withEnv(
cloudflareEnv({
CLOUDFLARE_ACCOUNT_ID: undefined,
CLOUDFLARE_GATEWAY_ID: undefined,
CLOUDFLARE_API_TOKEN: undefined,
}),
() =>
Effect.gen(function* () {
resetCalls()
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareAIGatewayPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-ai-gateway", "openai/gpt-5"),
package: "ai-gateway-provider",
options: { name: "cloudflare-ai-gateway", baseURL: "https://proxy.example/v1" },
},
{},
)
expect(result.sdk).toBeUndefined()
expect(aiGatewayCalls).toHaveLength(0)
}),
),
)
it.effect("maps provider/model IDs through the unified Cloudflare provider unchanged", () =>
withEnv(cloudflareEnv(), () =>
Effect.gen(function* () {
resetCalls()
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareAIGatewayPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-ai-gateway", "anthropic/claude-sonnet-4-5"),
package: "ai-gateway-provider",
options: { name: "cloudflare-ai-gateway" },
},
{},
)
expect(result.sdk.languageModel("anthropic/claude-sonnet-4-5")).toEqual({
modelId: { unifiedModelID: "anthropic/claude-sonnet-4-5" },
provider: "cloudflare-ai-gateway",
specificationVersion: "v3",
})
expect(unifiedCalls).toEqual(["anthropic/claude-sonnet-4-5"])
expect(gatewayModelCalls).toEqual([{ unifiedModelID: "anthropic/claude-sonnet-4-5" }])
}),
),
)
it.effect("ignores non Cloudflare AI Gateway packages", () =>
withEnv(cloudflareEnv(), () =>
Effect.gen(function* () {
resetCalls()
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareAIGatewayPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-ai-gateway", "openai/gpt-5"),
package: "@ai-sdk/openai-compatible",
options: { name: "cloudflare-ai-gateway" },
},
{},
)
expect(result.sdk).toBeUndefined()
expect(aiGatewayCalls).toHaveLength(0)
}),
),
)
})

View File

@@ -0,0 +1,267 @@
import { describe, expect } from "bun:test"
import { Effect, Layer } from "effect"
import { AuthV2 } from "@opencode-ai/core/auth"
import { ModelV2 } from "@opencode-ai/core/model"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { AuthPlugin } from "@opencode-ai/core/plugin/auth"
import { CloudflareWorkersAIPlugin } from "@opencode-ai/core/plugin/provider/cloudflare-workers-ai"
import { testEffect } from "../../lib/effect"
import { fakeSelectorSdk, it, model, npmLayer, provider, withEnv } from "./provider-helper"
const itWithAuth = testEffect(Layer.mergeAll(PluginV2.defaultLayer, AuthV2.defaultLayer, npmLayer))
function cloudflareLanguage(sdk: unknown, modelID = "@cf/model") {
return (sdk as { languageModel: (id: string) => { config: CloudflareConfig; provider: string } }).languageModel(
modelID,
)
}
type CloudflareConfig = {
url: (input: { path: string; modelId: string }) => string
headers: () => Record<string, string> | Promise<Record<string, string>>
}
function cloudflareURL(sdk: unknown, modelID = "@cf/model") {
return cloudflareLanguage(sdk, modelID).config.url({ path: "/chat/completions", modelId: modelID })
}
function cloudflareHeaders(sdk: unknown, modelID = "@cf/model") {
return cloudflareLanguage(sdk, modelID).config.headers()
}
describe("CloudflareWorkersAIPlugin", () => {
it.effect("maps account ID to endpoint URL and creates an OpenAI-compatible SDK", () =>
withEnv({ CLOUDFLARE_ACCOUNT_ID: "acct", CLOUDFLARE_API_KEY: "key" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareWorkersAIPlugin)
const updated = yield* plugin.trigger(
"provider.update",
{},
{ provider: provider("cloudflare-workers-ai"), cancel: false },
)
const sdk = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-workers-ai", "@cf/model", { endpoint: updated.provider.endpoint }),
package: "@ai-sdk/openai-compatible",
options: { name: "cloudflare-workers-ai", headers: { custom: "header" } },
},
{},
)
expect(updated.provider.endpoint).toEqual({
type: "aisdk",
package: "test-provider",
url: "https://api.cloudflare.com/client/v4/accounts/acct/ai/v1",
})
expect(sdk.sdk).toBeDefined()
}),
),
)
it.effect("preserves a configured endpoint URL instead of deriving one from account ID", () =>
withEnv({ CLOUDFLARE_ACCOUNT_ID: "acct" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareWorkersAIPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("cloudflare-workers-ai", {
endpoint: { type: "aisdk", package: "test-provider", url: "https://proxy.example/v1" },
}),
cancel: false,
},
)
expect(result.provider.endpoint).toEqual({
type: "aisdk",
package: "test-provider",
url: "https://proxy.example/v1",
})
}),
),
)
it.effect("allows a configured baseURL without account ID", () =>
withEnv({ CLOUDFLARE_ACCOUNT_ID: undefined, CLOUDFLARE_API_KEY: "key" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareWorkersAIPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-workers-ai", "@cf/model", {
endpoint: { type: "aisdk", package: "@ai-sdk/openai-compatible", url: "https://proxy.example/v1" },
}),
package: "@ai-sdk/openai-compatible",
options: { name: "cloudflare-workers-ai", baseURL: "https://proxy.example/v1" },
},
{},
)
expect(cloudflareURL(result.sdk)).toBe("https://proxy.example/v1/chat/completions")
}),
),
)
itWithAuth.effect("falls back to auth account metadata when account env is absent", () =>
withEnv(
{
CLOUDFLARE_ACCOUNT_ID: undefined,
CLOUDFLARE_API_KEY: undefined,
},
() =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const auth = yield* AuthV2.Service
yield* auth.create({
serviceID: AuthV2.ServiceID.make("cloudflare-workers-ai"),
credential: new AuthV2.ApiKeyCredential({
type: "api",
key: "auth-key",
metadata: { accountId: "auth-acct" },
}),
active: true,
})
yield* plugin.add({
...AuthPlugin,
effect: AuthPlugin.effect.pipe(Effect.provideService(AuthV2.Service, auth)),
})
yield* plugin.add(CloudflareWorkersAIPlugin)
const updated = yield* plugin.trigger(
"provider.update",
{},
{ provider: provider("cloudflare-workers-ai"), cancel: false },
)
expect(updated.provider.endpoint).toEqual({
type: "aisdk",
package: "test-provider",
url: "https://api.cloudflare.com/client/v4/accounts/auth-acct/ai/v1",
})
}),
),
)
it.effect("uses env account ID over configured account ID", () =>
withEnv({ CLOUDFLARE_ACCOUNT_ID: "env-acct" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareWorkersAIPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("cloudflare-workers-ai", {
options: { headers: {}, body: {}, aisdk: { provider: { accountId: "configured-acct" }, request: {} } },
}),
cancel: false,
},
)
expect(result.provider.endpoint).toEqual({
type: "aisdk",
package: "test-provider",
url: "https://api.cloudflare.com/client/v4/accounts/env-acct/ai/v1",
})
}),
),
)
it.effect("uses env API key over auth or configured API key and keeps the Cloudflare User-Agent", () =>
withEnv({ CLOUDFLARE_ACCOUNT_ID: "acct", CLOUDFLARE_API_KEY: "env-key" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareWorkersAIPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-workers-ai", "@cf/model", {
endpoint: { type: "aisdk", package: "@ai-sdk/openai-compatible", url: "https://proxy.example/v1" },
}),
package: "@ai-sdk/openai-compatible",
options: {
name: "cloudflare-workers-ai",
apiKey: "auth-key",
baseURL: "https://proxy.example/v1",
headers: { custom: "header" },
},
},
{},
)
const headers = yield* Effect.promise(() => Promise.resolve(cloudflareHeaders(result.sdk)))
expect(headers.authorization).toBe("Bearer env-key")
expect(headers.custom).toBe("header")
expect(headers["user-agent"]).toMatch(/^opencode\/.* cloudflare-workers-ai \(.+\) ai-sdk\/openai-compatible\//)
}),
),
)
it.effect("expands account ID vars in endpoint URLs", () =>
withEnv({ CLOUDFLARE_ACCOUNT_ID: "acct", CLOUDFLARE_API_KEY: "key" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareWorkersAIPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-workers-ai", "@cf/model", {
endpoint: {
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://api.cloudflare.com/client/v4/accounts/${CLOUDFLARE_ACCOUNT_ID}/ai/v1",
},
}),
package: "@ai-sdk/openai-compatible",
options: {
name: "cloudflare-workers-ai",
baseURL: "https://api.cloudflare.com/client/v4/accounts/${CLOUDFLARE_ACCOUNT_ID}/ai/v1",
},
},
{},
)
expect(cloudflareURL(result.sdk)).toBe(
"https://api.cloudflare.com/client/v4/accounts/acct/ai/v1/chat/completions",
)
}),
),
)
it.effect("selects languageModel with the API model ID", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(CloudflareWorkersAIPlugin)
const result = yield* plugin.trigger(
"aisdk.language",
{
model: model("cloudflare-workers-ai", "alias", { apiID: ModelV2.ID.make("@cf/api-model") }),
sdk: fakeSelectorSdk(calls),
options: {},
},
{},
)
expect(result.language).toBeDefined()
expect(calls).toEqual(["languageModel:@cf/api-model"])
}),
)
it.effect("does not create an SDK for non OpenAI-compatible packages", () =>
withEnv({ CLOUDFLARE_ACCOUNT_ID: "acct", CLOUDFLARE_API_KEY: "key" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(CloudflareWorkersAIPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-workers-ai", "@cf/model", {
endpoint: { type: "aisdk", package: "@ai-sdk/anthropic", url: "https://proxy.example/v1" },
}),
package: "@ai-sdk/anthropic",
options: { name: "cloudflare-workers-ai" },
},
{},
)
expect(result.sdk).toBeUndefined()
}),
),
)
})

View File

@@ -0,0 +1,86 @@
import { describe, expect, mock } from "bun:test"
import { Effect } from "effect"
import { ModelV2 } from "@opencode-ai/core/model"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { CoherePlugin } from "@opencode-ai/core/plugin/provider/cohere"
import { fakeSelectorSdk, it, model } from "./provider-helper"
const cohereOptions: Record<string, any>[] = []
void mock.module("@ai-sdk/cohere", () => ({
createCohere: (options: Record<string, any>) => {
cohereOptions.push({ ...options })
return {
languageModel: (modelID: string) => ({
modelID,
provider: `${options.name ?? "cohere"}.chat`,
specificationVersion: "v3",
}),
}
},
}))
describe("CoherePlugin", () => {
it.effect("creates a Cohere SDK only for @ai-sdk/cohere", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(CoherePlugin)
const ignored = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("cohere", "command"), package: "@ai-sdk/openai-compatible", options: { name: "cohere" } },
{},
)
expect(ignored.sdk).toBeUndefined()
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("cohere", "command"), package: "@ai-sdk/cohere", options: { name: "cohere" } },
{},
)
expect(result.sdk).toBeDefined()
}),
)
it.effect("uses the model provider ID as the bundled SDK name", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(CoherePlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom-cohere", "command-r-plus"),
package: "@ai-sdk/cohere",
options: { name: "custom-cohere", apiKey: "test", baseURL: "https://cohere.example" },
},
{},
)
expect(cohereOptions.at(-1)).toEqual({
name: "custom-cohere",
apiKey: "test",
baseURL: "https://cohere.example",
})
expect(result.sdk?.languageModel("command-r-plus").provider).toBe("custom-cohere.chat")
}),
)
it.effect("leaves language selection to the default languageModel fallback", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
const sdk = fakeSelectorSdk(calls)
yield* plugin.add(CoherePlugin)
const result = yield* plugin.trigger(
"aisdk.language",
{ model: model("cohere", "alias", { apiID: ModelV2.ID.make("command-r-plus") }), sdk, options: {} },
{},
)
expect(result.language).toBeUndefined()
expect(calls).toEqual([])
expect(result.language ?? sdk.languageModel("command-r-plus")).toBeDefined()
expect(calls).toEqual(["languageModel:command-r-plus"])
}),
)
})

View File

@@ -0,0 +1,129 @@
import { describe, expect, mock } from "bun:test"
import { Effect, Layer } from "effect"
import { AISDK } from "@opencode-ai/core/aisdk"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { DeepInfraPlugin } from "@opencode-ai/core/plugin/provider/deepinfra"
import { testEffect } from "../../lib/effect"
import { it, model } from "./provider-helper"
const itAISDK = testEffect(Layer.provideMerge(AISDK.layer, PluginV2.defaultLayer))
const deepinfraOptions: Record<string, any>[] = []
const deepinfraLanguageModels: string[] = []
void mock.module("@ai-sdk/deepinfra", () => ({
createDeepInfra: (options: Record<string, any>) => {
const captured = { ...options }
deepinfraOptions.push(captured)
return {
languageModel: (modelID: string) => {
deepinfraLanguageModels.push(modelID)
return { modelID, provider: `${captured.name ?? "deepinfra"}.chat`, specificationVersion: "v3" }
},
}
},
}))
function resetDeepInfraMock() {
deepinfraOptions.length = 0
deepinfraLanguageModels.length = 0
}
describe("DeepInfraPlugin", () => {
it.effect("creates a DeepInfra SDK for @ai-sdk/deepinfra", () =>
Effect.gen(function* () {
resetDeepInfraMock()
const plugin = yield* PluginV2.Service
yield* plugin.add(DeepInfraPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("deepinfra", "model"), package: "@ai-sdk/deepinfra", options: { name: "deepinfra" } },
{},
)
expect(result.sdk).toBeDefined()
}),
)
it.effect("passes the model provider ID as the bundled DeepInfra SDK name", () =>
Effect.gen(function* () {
resetDeepInfraMock()
const plugin = yield* PluginV2.Service
yield* plugin.add(DeepInfraPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom-deepinfra", "model"),
package: "@ai-sdk/deepinfra",
options: { name: "custom-deepinfra", apiKey: "test" },
},
{},
)
expect(result.sdk.languageModel("model").provider).toBe("custom-deepinfra.chat")
expect(deepinfraOptions).toEqual([{ name: "custom-deepinfra", apiKey: "test" }])
}),
)
it.effect("uses the canonical provider ID as the bundled DeepInfra SDK name", () =>
Effect.gen(function* () {
resetDeepInfraMock()
const plugin = yield* PluginV2.Service
yield* plugin.add(DeepInfraPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("deepinfra", "model"),
package: "@ai-sdk/deepinfra",
options: { name: "deepinfra", apiKey: "test" },
},
{},
)
expect(result.sdk.languageModel("model").provider).toBe("deepinfra.chat")
expect(deepinfraOptions).toEqual([{ name: "deepinfra", apiKey: "test" }])
}),
)
it.effect("matches only the exact bundled DeepInfra package", () =>
Effect.gen(function* () {
resetDeepInfraMock()
const plugin = yield* PluginV2.Service
yield* plugin.add(DeepInfraPlugin)
const packages = [
"unmatched-package",
"@ai-sdk/deepinfra-compatible",
"file:///tmp/@ai-sdk/deepinfra-provider.js",
]
yield* Effect.forEach(packages, (item) =>
Effect.gen(function* () {
const ignored = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("deepinfra", "model"), package: item, options: { name: "deepinfra" } },
{},
)
expect(ignored.sdk).toBeUndefined()
}),
)
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("deepinfra", "model"), package: "@ai-sdk/deepinfra", options: { name: "deepinfra" } },
{},
)
expect(result.sdk).toBeDefined()
expect(deepinfraOptions).toEqual([{ name: "deepinfra" }])
}),
)
itAISDK.effect("uses the default languageModel selection for DeepInfra models", () =>
Effect.gen(function* () {
resetDeepInfraMock()
const plugin = yield* PluginV2.Service
const aisdk = yield* AISDK.Service
yield* plugin.add(DeepInfraPlugin)
const language = yield* aisdk.language(
model("deepinfra", "meta-llama/Llama-3.3-70B-Instruct", {
endpoint: { type: "aisdk", package: "@ai-sdk/deepinfra" },
}),
)
expect(language.provider).toBe("deepinfra.chat")
expect(deepinfraLanguageModels).toEqual(["meta-llama/Llama-3.3-70B-Instruct"])
}),
)
})

View File

@@ -0,0 +1,172 @@
import { Npm } from "@opencode-ai/core/npm"
import { describe, expect } from "bun:test"
import { Cause, Effect, Layer, Option } from "effect"
import fs from "fs/promises"
import os from "os"
import path from "path"
import { fileURLToPath } from "url"
import { AISDK } from "@opencode-ai/core/aisdk"
import { ModelV2 } from "@opencode-ai/core/model"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { DynamicProviderPlugin } from "@opencode-ai/core/plugin/provider/dynamic"
import { testEffect } from "../../lib/effect"
import { fixtureProvider, it, model, npmLayer } from "./provider-helper"
const fixtureProviderPath = fileURLToPath(fixtureProvider)
const itWithAISDK = testEffect(AISDK.layer.pipe(Layer.provideMerge(PluginV2.defaultLayer)))
function npmEntrypointLayer(entrypoint: Option.Option<string>) {
return Layer.succeed(
Npm.Service,
Npm.Service.of({
add: () => Effect.succeed({ directory: "", entrypoint }),
install: () => Effect.void,
which: () => Effect.succeed(Option.none<string>()),
}),
)
}
function dynamicPlugin(layer = npmLayer) {
return { id: DynamicProviderPlugin.id, effect: DynamicProviderPlugin.effect.pipe(Effect.provide(layer)) }
}
function tempEntrypoint(source: string) {
return Effect.acquireRelease(
Effect.promise(async () => {
const directory = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-provider-dynamic-"))
const entrypoint = path.join(directory, "provider.mjs")
await Bun.write(entrypoint, source)
return { directory, entrypoint }
}),
(tmp) => Effect.promise(() => fs.rm(tmp.directory, { recursive: true, force: true })),
)
}
describe("DynamicProviderPlugin", () => {
it.effect("creates an SDK from a provider factory export", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(dynamicPlugin())
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom", "test-model"),
package: fixtureProvider,
options: { name: "custom", marker: "dynamic" },
},
{},
)
expect(result.sdk.options).toEqual({ marker: "dynamic", name: "custom" })
expect(result.sdk.languageModel("x")).toEqual({ modelID: "x", options: { marker: "dynamic", name: "custom" } })
}),
)
it.effect("does not override an SDK already supplied by an earlier plugin", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const sdk = { marker: "existing" }
yield* plugin.add(dynamicPlugin())
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom", "test-model"),
package: fixtureProvider,
options: { name: "custom", marker: "dynamic" },
},
{ sdk },
)
expect(result.sdk).toBe(sdk)
}),
)
it.effect("injects the provider ID as the SDK factory name", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(dynamicPlugin())
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom-provider", "test-model"),
package: fixtureProvider,
options: { name: "custom-provider", marker: "dynamic" },
},
{},
)
expect(result.sdk.options).toEqual({ marker: "dynamic", name: "custom-provider" })
}),
)
it.effect("loads npm packages through their resolved import entrypoint", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(dynamicPlugin(npmEntrypointLayer(Option.some(fixtureProviderPath))))
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("npm-provider", "test-model"),
package: "fixture-provider",
options: { name: "npm-provider", marker: "npm" },
},
{},
)
expect(result.sdk.languageModel("x")).toEqual({ modelID: "x", options: { marker: "npm", name: "npm-provider" } })
}),
)
itWithAISDK.effect("wraps missing npm entrypoint failures as AISDK init errors", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const aisdk = yield* AISDK.Service
yield* plugin.add(dynamicPlugin(npmEntrypointLayer(Option.none<string>())))
const exit = yield* aisdk
.language(model("missing-entrypoint", "alias", { endpoint: { type: "aisdk", package: "fixture-provider" } }))
.pipe(Effect.exit)
expect(exit._tag).toBe("Failure")
if (exit._tag === "Failure") expect(Cause.prettyErrors(exit.cause).join("\n")).toContain("AISDK.InitError")
}),
)
itWithAISDK.effect("wraps dynamic import failures as AISDK init errors", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const aisdk = yield* AISDK.Service
yield* plugin.add(dynamicPlugin())
const exit = yield* aisdk
.language(
model("bad-import", "alias", { endpoint: { type: "aisdk", package: "file:///missing/provider-factory.js" } }),
)
.pipe(Effect.exit)
expect(exit._tag).toBe("Failure")
if (exit._tag === "Failure") expect(Cause.prettyErrors(exit.cause).join("\n")).toContain("AISDK.InitError")
}),
)
itWithAISDK.live("wraps missing provider factory exports as AISDK init errors", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const aisdk = yield* AISDK.Service
const tmp = yield* tempEntrypoint("export const notAProviderFactory = true\n")
yield* plugin.add(dynamicPlugin(npmEntrypointLayer(Option.some(tmp.entrypoint))))
const exit = yield* aisdk
.language(model("missing-factory", "alias", { endpoint: { type: "aisdk", package: "fixture-provider" } }))
.pipe(Effect.exit)
expect(exit._tag).toBe("Failure")
if (exit._tag === "Failure") expect(Cause.prettyErrors(exit.cause).join("\n")).toContain("AISDK.InitError")
}),
)
itWithAISDK.effect("uses the model apiID for the default language model", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const aisdk = yield* AISDK.Service
yield* plugin.add(dynamicPlugin())
const language = yield* aisdk.language(
model("custom", "alias", {
apiID: ModelV2.ID.make("test-model-api"),
endpoint: { type: "aisdk", package: fixtureProvider },
}),
)
expect(language).toMatchObject({ modelID: "test-model-api", options: { name: "custom" } })
}),
)
})

View File

@@ -0,0 +1,87 @@
import { describe, expect, mock } from "bun:test"
import { Effect } from "effect"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { GatewayPlugin } from "@opencode-ai/core/plugin/provider/gateway"
import { it, model } from "./provider-helper"
const gatewayCalls: Record<string, unknown>[] = []
const vercelGatewayModels = ["anthropic/claude-sonnet-4", "openai/gpt-5", "google/gemini-2.5-pro"]
mock.module("@ai-sdk/gateway", () => ({
createGateway(options: Record<string, unknown>) {
gatewayCalls.push({ ...options })
return {
languageModel(modelID: string) {
return {
modelId: modelID,
provider: options.name,
specificationVersion: "v3",
}
},
}
},
}))
describe("GatewayPlugin", () => {
it.effect("creates a Gateway SDK for @ai-sdk/gateway", () =>
Effect.gen(function* () {
gatewayCalls.length = 0
const plugin = yield* PluginV2.Service
yield* plugin.add(GatewayPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("gateway", "model"), package: "@ai-sdk/gateway", options: { name: "gateway" } },
{},
)
expect(result.sdk).toBeDefined()
expect(gatewayCalls).toHaveLength(1)
}),
)
it.effect("passes the model providerID as the Gateway SDK name", () =>
Effect.gen(function* () {
gatewayCalls.length = 0
const plugin = yield* PluginV2.Service
yield* plugin.add(GatewayPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("vercel", "anthropic/claude-sonnet-4"),
package: "@ai-sdk/gateway",
options: { name: "vercel", apiKey: "test-key" },
},
{},
)
expect(gatewayCalls).toEqual([{ name: "vercel", apiKey: "test-key" }])
expect(result.sdk.languageModel("anthropic/claude-sonnet-4").provider).toBe("vercel")
}),
)
it.effect("matches Vercel AI Gateway models by their @ai-sdk/gateway package", () =>
Effect.gen(function* () {
gatewayCalls.length = 0
const plugin = yield* PluginV2.Service
yield* plugin.add(GatewayPlugin)
for (const modelID of vercelGatewayModels) {
const ignored = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("vercel", modelID), package: "@ai-sdk/vercel", options: { name: "vercel" } },
{},
)
expect(ignored.sdk).toBeUndefined()
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("vercel", modelID), package: "@ai-sdk/gateway", options: { name: "vercel" } },
{},
)
expect(result.sdk).toBeDefined()
}
expect(gatewayCalls).toHaveLength(3)
}),
)
})

View File

@@ -0,0 +1,346 @@
import { describe, expect, mock } from "bun:test"
import { Effect, Layer } from "effect"
import { AuthV2 } from "@opencode-ai/core/auth"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { AuthPlugin } from "@opencode-ai/core/plugin/auth"
import { GitLabPlugin } from "@opencode-ai/core/plugin/provider/gitlab"
import { testEffect } from "../../lib/effect"
import { it, model, npmLayer, provider, withEnv } from "./provider-helper"
const gitlabSDKOptions: Record<string, unknown>[] = []
void mock.module("gitlab-ai-provider", () => ({
VERSION: "test-version",
createGitLab: (options: Record<string, unknown>) => {
gitlabSDKOptions.push(options)
return {
agenticChat: (id: string, options: unknown) => ({ id, options, type: "agentic" }),
workflowChat: (id: string, options: unknown) => ({ id, options, type: "workflow" }),
}
},
discoverWorkflowModels: async () => ({ models: [], project: undefined }),
isWorkflowModel: (id: string) => id === "duo-workflow" || id === "duo-workflow-exact",
}))
const itWithAuth = testEffect(Layer.mergeAll(PluginV2.defaultLayer, AuthV2.defaultLayer, npmLayer))
describe("GitLabPlugin", () => {
it.effect("creates SDKs with legacy default instance URL, token env, headers, and feature flags", () =>
withEnv(
{
GITLAB_INSTANCE_URL: undefined,
GITLAB_TOKEN: "env-token",
},
() =>
Effect.gen(function* () {
gitlabSDKOptions.length = 0
const plugin = yield* PluginV2.Service
yield* plugin.add(GitLabPlugin)
yield* plugin.trigger(
"aisdk.sdk",
{ model: model("gitlab", "claude"), package: "gitlab-ai-provider", options: { name: "gitlab" } },
{},
)
expect(gitlabSDKOptions).toHaveLength(1)
expect(gitlabSDKOptions[0].instanceUrl).toBe("https://gitlab.com")
expect(gitlabSDKOptions[0].apiKey).toBe("env-token")
expect(gitlabSDKOptions[0].aiGatewayHeaders).toMatchObject({
"anthropic-beta": "context-1m-2025-08-07",
})
expect(String((gitlabSDKOptions[0].aiGatewayHeaders as Record<string, string>)["User-Agent"])).toContain(
"gitlab-ai-provider/test-version",
)
expect(gitlabSDKOptions[0].featureFlags).toEqual({
duo_agent_platform_agentic_chat: true,
duo_agent_platform: true,
})
}),
),
)
it.effect("uses GITLAB_INSTANCE_URL when instanceUrl is not configured", () =>
withEnv(
{
GITLAB_INSTANCE_URL: "https://env.gitlab.example",
GITLAB_TOKEN: undefined,
},
() =>
Effect.gen(function* () {
gitlabSDKOptions.length = 0
const plugin = yield* PluginV2.Service
yield* plugin.add(GitLabPlugin)
yield* plugin.trigger(
"aisdk.sdk",
{ model: model("gitlab", "claude"), package: "gitlab-ai-provider", options: { name: "gitlab" } },
{},
)
expect(gitlabSDKOptions[0].instanceUrl).toBe("https://env.gitlab.example")
}),
),
)
it.effect("keeps configured instance URL, apiKey, aiGatewayHeaders, and featureFlags over env/defaults", () =>
withEnv(
{
GITLAB_INSTANCE_URL: "https://env.gitlab.example",
GITLAB_TOKEN: "env-token",
},
() =>
Effect.gen(function* () {
gitlabSDKOptions.length = 0
const plugin = yield* PluginV2.Service
yield* plugin.add(GitLabPlugin)
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("gitlab", "claude"),
package: "gitlab-ai-provider",
options: {
name: "gitlab",
instanceUrl: "https://configured.gitlab.example",
apiKey: "configured-token",
aiGatewayHeaders: {
"anthropic-beta": "configured-beta",
"x-gitlab-test": "1",
},
featureFlags: {
duo_agent_platform: false,
custom_flag: true,
},
},
},
{},
)
expect(gitlabSDKOptions[0].instanceUrl).toBe("https://configured.gitlab.example")
expect(gitlabSDKOptions[0].apiKey).toBe("configured-token")
expect(gitlabSDKOptions[0].aiGatewayHeaders).toMatchObject({
"anthropic-beta": "configured-beta",
"x-gitlab-test": "1",
})
expect(gitlabSDKOptions[0].featureFlags).toEqual({
duo_agent_platform_agentic_chat: true,
duo_agent_platform: false,
custom_flag: true,
})
}),
),
)
it.effect("ignores non-GitLab SDK packages", () =>
Effect.gen(function* () {
gitlabSDKOptions.length = 0
const plugin = yield* PluginV2.Service
yield* plugin.add(GitLabPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("gitlab", "claude"), package: "@ai-sdk/openai", options: { name: "gitlab" } },
{},
)
expect(result.sdk).toBeUndefined()
expect(gitlabSDKOptions).toHaveLength(0)
}),
)
itWithAuth.effect("uses active API auth token over GITLAB_TOKEN", () =>
withEnv(
{
GITLAB_TOKEN: "env-token",
},
() =>
Effect.gen(function* () {
gitlabSDKOptions.length = 0
const plugin = yield* PluginV2.Service
const auth = yield* AuthV2.Service
yield* auth.create({
serviceID: AuthV2.ServiceID.make("gitlab"),
credential: new AuthV2.ApiKeyCredential({ type: "api", key: "auth-token" }),
active: true,
})
yield* plugin.add({
...AuthPlugin,
effect: AuthPlugin.effect.pipe(Effect.provideService(AuthV2.Service, auth)),
})
yield* plugin.add(GitLabPlugin)
const updated = yield* plugin.trigger("provider.update", {}, { provider: provider("gitlab"), cancel: false })
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("gitlab", "claude"),
package: "gitlab-ai-provider",
options: updated.provider.options.aisdk.provider,
},
{},
)
expect(gitlabSDKOptions[0].apiKey).toBe("auth-token")
}),
),
)
itWithAuth.effect("uses active OAuth access token when no API auth exists", () =>
withEnv(
{
GITLAB_TOKEN: undefined,
},
() =>
Effect.gen(function* () {
gitlabSDKOptions.length = 0
const plugin = yield* PluginV2.Service
const auth = yield* AuthV2.Service
yield* auth.create({
serviceID: AuthV2.ServiceID.make("gitlab"),
credential: new AuthV2.OAuthCredential({
type: "oauth",
refresh: "refresh-token",
access: "oauth-token",
expires: 9999999999999,
}),
active: true,
})
yield* plugin.add({
...AuthPlugin,
effect: AuthPlugin.effect.pipe(Effect.provideService(AuthV2.Service, auth)),
})
yield* plugin.add(GitLabPlugin)
const updated = yield* plugin.trigger("provider.update", {}, { provider: provider("gitlab"), cancel: false })
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("gitlab", "claude"),
package: "gitlab-ai-provider",
options: updated.provider.options.aisdk.provider,
},
{},
)
expect(gitlabSDKOptions[0].apiKey).toBe("oauth-token")
}),
),
)
it.effect("uses workflowChat for duo workflow models and preserves selectedModelRef", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: [string, unknown][] = []
yield* plugin.add(GitLabPlugin)
const result = yield* plugin.trigger(
"aisdk.language",
{
model: model("gitlab", "duo-workflow-custom", {
options: {
headers: {},
body: {},
aisdk: { provider: {}, request: { workflowRef: "ref", workflowDefinition: "definition" } },
},
}),
sdk: {
workflowChat: (id: string, options: unknown) => {
calls.push([id, options])
return { id, options }
},
agenticChat: () => undefined,
},
options: { featureFlags: { configured: true } },
},
{},
)
expect(calls).toEqual([
["duo-workflow", { featureFlags: { configured: true }, workflowDefinition: "definition" }],
])
expect(result.language as unknown).toEqual({
id: "duo-workflow",
options: calls[0]?.[1],
selectedModelRef: "ref",
})
}),
)
it.effect("uses exact static workflow model ids when the provider recognizes them", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: [string, unknown][] = []
yield* plugin.add(GitLabPlugin)
const result = yield* plugin.trigger(
"aisdk.language",
{
model: model("gitlab", "duo-workflow-exact"),
sdk: {
workflowChat: (id: string, options: unknown) => {
calls.push([id, options])
return { id, options }
},
agenticChat: () => undefined,
},
options: { featureFlags: { configured: true } },
},
{},
)
expect(calls).toEqual([
["duo-workflow-exact", { featureFlags: { configured: true }, workflowDefinition: undefined }],
])
expect(result.language as unknown).toEqual({ id: "duo-workflow-exact", options: calls[0]?.[1] })
}),
)
it.effect("uses provider feature flags instead of request feature flags", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: [string, unknown][] = []
yield* plugin.add(GitLabPlugin)
yield* plugin.trigger(
"aisdk.language",
{
model: model("gitlab", "duo-workflow-custom", {
options: {
headers: {},
body: {},
aisdk: { provider: {}, request: { featureFlags: { request_flag: true } } },
},
}),
sdk: {
workflowChat: (id: string, options: unknown) => {
calls.push([id, options])
return { id, options }
},
agenticChat: () => undefined,
},
options: { featureFlags: { configured: true } },
},
{},
)
expect(calls).toEqual([["duo-workflow", { featureFlags: { configured: true }, workflowDefinition: undefined }]])
}),
)
it.effect("uses agenticChat with provider aiGatewayHeaders and feature flags for normal models", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: [string, unknown][] = []
yield* plugin.add(GitLabPlugin)
yield* plugin.trigger(
"aisdk.language",
{
model: model("gitlab", "claude", {
options: { headers: { h: "v" }, body: {}, aisdk: { provider: {}, request: {} } },
}),
sdk: {
workflowChat: () => undefined,
agenticChat: (id: string, options: unknown) => {
const selected = options as {
aiGatewayHeaders?: Record<string, string>
featureFlags?: Record<string, boolean>
}
calls.push([
id,
{ aiGatewayHeaders: { ...selected.aiGatewayHeaders }, featureFlags: { ...selected.featureFlags } },
])
},
},
options: { aiGatewayHeaders: { fallback: "header" }, featureFlags: { duo_agent_platform: true } },
},
{},
)
expect(calls).toEqual([
["claude", { aiGatewayHeaders: { fallback: "header" }, featureFlags: { duo_agent_platform: true } }],
])
}),
)
})

View File

@@ -0,0 +1,147 @@
import { describe, expect } from "bun:test"
import { Effect } from "effect"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { GoogleVertexAnthropicPlugin } from "@opencode-ai/core/plugin/provider/google-vertex"
import { fakeSelectorSdk, it, model, provider, withEnv } from "./provider-helper"
describe("GoogleVertexAnthropicPlugin", () => {
it.effect("resolves legacy project and location env on provider update", () =>
withEnv(
{
GOOGLE_CLOUD_PROJECT: "cloud-project",
GCP_PROJECT: "gcp-project",
GCLOUD_PROJECT: "gcloud-project",
GOOGLE_CLOUD_LOCATION: "cloud-location",
VERTEX_LOCATION: "vertex-location",
GOOGLE_VERTEX_LOCATION: "google-vertex-location",
},
() =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GoogleVertexAnthropicPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{ provider: provider("google-vertex-anthropic"), cancel: false },
)
expect(result.provider.options.aisdk.provider.project).toBe("cloud-project")
expect(result.provider.options.aisdk.provider.location).toBe("cloud-location")
}),
),
)
it.effect("keeps configured project and location over env fallback", () =>
withEnv({ GOOGLE_CLOUD_PROJECT: "env-project", GOOGLE_CLOUD_LOCATION: "env-location" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GoogleVertexAnthropicPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("google-vertex-anthropic", {
options: {
headers: {},
body: {},
aisdk: { provider: { project: "configured-project", location: "configured-location" }, request: {} },
},
}),
cancel: false,
},
)
expect(result.provider.options.aisdk.provider.project).toBe("configured-project")
expect(result.provider.options.aisdk.provider.location).toBe("configured-location")
}),
),
)
it.effect("creates SDKs from legacy env fallback and default location", () =>
withEnv(
{
GOOGLE_CLOUD_PROJECT: undefined,
GCP_PROJECT: "gcp-project",
GCLOUD_PROJECT: "gcloud-project",
GOOGLE_CLOUD_LOCATION: undefined,
VERTEX_LOCATION: undefined,
GOOGLE_VERTEX_LOCATION: "ignored-location",
},
() =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GoogleVertexAnthropicPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("google-vertex-anthropic", "claude-sonnet-4-5"),
package: "@ai-sdk/google-vertex/anthropic",
options: { name: "google-vertex-anthropic" },
},
{},
)
expect(result.sdk.languageModel("claude-sonnet-4-5").config.baseURL).toBe(
"https://aiplatform.googleapis.com/v1/projects/gcp-project/locations/global/publishers/anthropic/models",
)
}),
),
)
it.effect("uses GOOGLE_CLOUD_LOCATION before VERTEX_LOCATION when creating SDKs", () =>
withEnv(
{ GOOGLE_CLOUD_PROJECT: "project", GOOGLE_CLOUD_LOCATION: "cloud-location", VERTEX_LOCATION: "vertex-location" },
() =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GoogleVertexAnthropicPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("google-vertex-anthropic", "claude-sonnet-4-5"),
package: "@ai-sdk/google-vertex/anthropic",
options: { name: "google-vertex-anthropic" },
},
{},
)
expect(result.sdk.languageModel("claude-sonnet-4-5").config.baseURL).toBe(
"https://cloud-location-aiplatform.googleapis.com/v1/projects/project/locations/cloud-location/publishers/anthropic/models",
)
}),
),
)
it.effect("trims model IDs before selecting language models", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(GoogleVertexAnthropicPlugin)
yield* plugin.trigger(
"aisdk.language",
{
model: model("google-vertex-anthropic", " claude-sonnet-4-5 "),
sdk: { languageModel: fakeSelectorSdk(calls).languageModel },
options: {},
},
{},
)
expect(calls).toEqual(["languageModel:claude-sonnet-4-5"])
}),
)
it.effect("ignores non Vertex Anthropic providers for language selection", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(GoogleVertexAnthropicPlugin)
const result = yield* plugin.trigger(
"aisdk.language",
{
model: model("google-vertex", "claude-sonnet-4-5"),
sdk: { languageModel: fakeSelectorSdk(calls).languageModel },
options: {},
},
{},
)
expect(calls).toEqual([])
expect(result.language).toBeUndefined()
}),
)
})

View File

@@ -0,0 +1,300 @@
import { describe, expect, mock } from "bun:test"
import { Effect } from "effect"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { GoogleVertexPlugin } from "@opencode-ai/core/plugin/provider/google-vertex"
import { fakeSelectorSdk, it, model, provider, withEnv } from "./provider-helper"
const vertexOptions: Record<string, any>[] = []
void mock.module("@ai-sdk/google-vertex", () => ({
createVertex: (options: Record<string, any>) => {
vertexOptions.push(options)
return {
languageModel: (modelID: string) => ({ modelID, provider: "google-vertex", specificationVersion: "v3" }),
}
},
}))
void mock.module("google-auth-library", () => ({
GoogleAuth: class {
async getApplicationDefault() {
return {
credential: {
async getAccessToken() {
return { token: "vertex-token" }
},
},
}
}
},
}))
describe("GoogleVertexPlugin", () => {
it.effect("resolves project and location from env using legacy precedence", () =>
withEnv(
{
GOOGLE_CLOUD_PROJECT: "google-cloud-project",
GCP_PROJECT: "gcp-project",
GCLOUD_PROJECT: "gcloud-project",
GOOGLE_VERTEX_LOCATION: "google-vertex-location",
GOOGLE_CLOUD_LOCATION: "google-cloud-location",
VERTEX_LOCATION: "vertex-location",
},
() =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GoogleVertexPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("google-vertex", {
endpoint: {
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://${GOOGLE_VERTEX_ENDPOINT}/v1/projects/${GOOGLE_VERTEX_PROJECT}/locations/${GOOGLE_VERTEX_LOCATION}",
},
}),
cancel: false,
},
)
expect(result.provider.options.aisdk.provider.project).toBe("google-cloud-project")
expect(result.provider.options.aisdk.provider.location).toBe("google-vertex-location")
expect(result.provider.endpoint).toEqual({
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://google-vertex-location-aiplatform.googleapis.com/v1/projects/google-cloud-project/locations/google-vertex-location",
})
}),
),
)
it.effect("resolves the advertised GOOGLE_VERTEX_PROJECT env for provider updates and SDKs", () =>
withEnv(
{
GOOGLE_VERTEX_PROJECT: "vertex-project",
GOOGLE_CLOUD_PROJECT: undefined,
GCP_PROJECT: undefined,
GCLOUD_PROJECT: undefined,
GOOGLE_VERTEX_LOCATION: "europe-west4",
GOOGLE_CLOUD_LOCATION: undefined,
VERTEX_LOCATION: undefined,
},
() =>
Effect.gen(function* () {
vertexOptions.length = 0
const plugin = yield* PluginV2.Service
yield* plugin.add(GoogleVertexPlugin)
const updated = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("google-vertex", {
endpoint: {
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://${GOOGLE_VERTEX_ENDPOINT}/v1/projects/${GOOGLE_VERTEX_PROJECT}/locations/${GOOGLE_VERTEX_LOCATION}",
},
}),
cancel: false,
},
)
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("google-vertex", "gemini", {
endpoint: { type: "aisdk", package: "@ai-sdk/google-vertex" },
}),
package: "@ai-sdk/google-vertex",
options: { name: "google-vertex" },
},
{},
)
expect(updated.provider.options.aisdk.provider.project).toBe("vertex-project")
expect(updated.provider.endpoint).toEqual({
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://europe-west4-aiplatform.googleapis.com/v1/projects/vertex-project/locations/europe-west4",
})
expect(vertexOptions[0].project).toBe("vertex-project")
expect(vertexOptions[0].location).toBe("europe-west4")
}),
),
)
it.effect("keeps configured project and location over env and uses global endpoint", () =>
withEnv(
{
GOOGLE_CLOUD_PROJECT: "env-project",
GCP_PROJECT: "env-gcp-project",
GCLOUD_PROJECT: "env-gcloud-project",
GOOGLE_VERTEX_LOCATION: "env-location",
GOOGLE_CLOUD_LOCATION: "env-google-cloud-location",
VERTEX_LOCATION: "env-vertex-location",
},
() =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GoogleVertexPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("google-vertex", {
endpoint: {
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://${GOOGLE_VERTEX_ENDPOINT}/v1/projects/${GOOGLE_VERTEX_PROJECT}/locations/${GOOGLE_VERTEX_LOCATION}",
},
options: {
headers: {},
body: {},
aisdk: { provider: { project: "config-project", location: "global" }, request: {} },
},
}),
cancel: false,
},
)
expect(result.provider.options.aisdk.provider.project).toBe("config-project")
expect(result.provider.options.aisdk.provider.location).toBe("global")
expect(result.provider.endpoint).toEqual({
type: "aisdk",
package: "@ai-sdk/openai-compatible",
url: "https://aiplatform.googleapis.com/v1/projects/config-project/locations/global",
})
}),
),
)
it.effect("defaults location to us-central1 when only project is configured", () =>
withEnv(
{
GOOGLE_CLOUD_PROJECT: undefined,
GCP_PROJECT: undefined,
GCLOUD_PROJECT: undefined,
GOOGLE_VERTEX_LOCATION: undefined,
GOOGLE_CLOUD_LOCATION: undefined,
VERTEX_LOCATION: undefined,
},
() =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GoogleVertexPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("google-vertex", {
options: { headers: {}, body: {}, aisdk: { provider: { project: "config-project" }, request: {} } },
}),
cancel: false,
},
)
expect(result.provider.options.aisdk.provider.project).toBe("config-project")
expect(result.provider.options.aisdk.provider.location).toBe("us-central1")
}),
),
)
it.effect("does not pass Google auth fetch to the native Vertex SDK", () =>
withEnv(
{
GOOGLE_CLOUD_PROJECT: "env-project",
GOOGLE_VERTEX_LOCATION: "env-location",
},
() =>
Effect.gen(function* () {
vertexOptions.length = 0
const plugin = yield* PluginV2.Service
yield* plugin.add(GoogleVertexPlugin)
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("google-vertex", "gemini", {
endpoint: { type: "aisdk", package: "@ai-sdk/google-vertex" },
}),
package: "@ai-sdk/google-vertex",
options: { name: "google-vertex" },
},
{},
)
expect(vertexOptions).toHaveLength(1)
expect(vertexOptions[0].project).toBe("env-project")
expect(vertexOptions[0].location).toBe("env-location")
expect(vertexOptions[0].fetch).toBeUndefined()
}),
),
)
it.effect("keeps Google auth fetch for OpenAI-compatible Vertex endpoints", () =>
Effect.gen(function* () {
const fetchCalls: { input: Parameters<typeof fetch>[0]; init?: RequestInit }[] = []
const plugin = yield* PluginV2.Service
yield* plugin.add(GoogleVertexPlugin)
yield* plugin.add({
id: PluginV2.ID.make("capture-openai-compatible"),
effect: Effect.succeed({
"aisdk.sdk": (evt) =>
Effect.promise(async () => {
if (evt.model.providerID !== "google-vertex") return
if (evt.package !== "@ai-sdk/openai-compatible") return
expect(typeof evt.options.fetch).toBe("function")
await evt.options.fetch("https://vertex.example", {
headers: { "x-test": "1" },
})
}),
}),
})
const originalFetch = fetch
;(globalThis as typeof globalThis & { fetch: typeof fetch }).fetch = (async (
input: Parameters<typeof fetch>[0],
init?: RequestInit,
) => {
fetchCalls.push({ input, init })
return new Response("ok")
}) as typeof fetch
yield* Effect.acquireUseRelease(
Effect.void,
() =>
plugin.trigger(
"aisdk.sdk",
{
model: model("google-vertex", "gemini", {
endpoint: { type: "aisdk", package: "@ai-sdk/openai-compatible" },
}),
package: "@ai-sdk/openai-compatible",
options: { name: "google-vertex" },
},
{},
),
() =>
Effect.sync(() => {
;(globalThis as typeof globalThis & { fetch: typeof fetch }).fetch = originalFetch
}),
)
expect(fetchCalls).toHaveLength(1)
expect(fetchCalls[0].input).toBe("https://vertex.example")
expect(new Headers(fetchCalls[0].init?.headers).get("authorization")).toBe("Bearer vertex-token")
expect(new Headers(fetchCalls[0].init?.headers).get("x-test")).toBe("1")
}),
)
it.effect("trims model IDs before selecting language models", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(GoogleVertexPlugin)
yield* plugin.trigger(
"aisdk.language",
{
model: model("google-vertex", " gemini-2.5-pro "),
sdk: { languageModel: fakeSelectorSdk(calls).languageModel },
options: {},
},
{},
)
expect(calls).toEqual(["languageModel:gemini-2.5-pro"])
}),
)
})

View File

@@ -0,0 +1,70 @@
import { describe, expect } from "bun:test"
import { Effect, Layer } from "effect"
import { AISDK } from "@opencode-ai/core/aisdk"
import { ModelV2 } from "@opencode-ai/core/model"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { GooglePlugin } from "@opencode-ai/core/plugin/provider/google"
import { testEffect } from "../../lib/effect"
import { it, model } from "./provider-helper"
const itWithAISDK = testEffect(AISDK.layer.pipe(Layer.provideMerge(PluginV2.defaultLayer)))
describe("GooglePlugin", () => {
it.effect("creates a Google Generative AI SDK for @ai-sdk/google using the provider ID as SDK name", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GooglePlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom-google", "gemini"),
package: "@ai-sdk/google",
options: { name: "custom-google", apiKey: "test" },
},
{},
)
expect(result.sdk).toBeDefined()
expect(result.sdk?.languageModel("gemini").provider).toBe("custom-google")
}),
)
it.effect("ignores non-Google SDK packages", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GooglePlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("google", "gemini"), package: "@ai-sdk/google-vertex", options: { name: "google" } },
{},
)
expect(result.sdk).toBeUndefined()
}),
)
itWithAISDK.effect("uses default languageModel loading with provider ID parity", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const aisdk = yield* AISDK.Service
yield* plugin.add(GooglePlugin)
const language = yield* aisdk.language(
model("custom-google", "alias", {
apiID: ModelV2.ID.make("gemini-api"),
endpoint: {
type: "aisdk",
package: "@ai-sdk/google",
},
options: {
headers: {},
body: {},
aisdk: {
provider: { apiKey: "test" },
request: {},
},
},
}),
)
expect(language.modelId).toBe("gemini-api")
expect(language.provider).toBe("custom-google")
}),
)
})

View File

@@ -0,0 +1,101 @@
import { describe, expect } from "bun:test"
import { createGroq } from "@ai-sdk/groq"
import { Effect, Layer } from "effect"
import { AISDK } from "@opencode-ai/core/aisdk"
import { ModelV2 } from "@opencode-ai/core/model"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { GroqPlugin } from "@opencode-ai/core/plugin/provider/groq"
import { it, model } from "./provider-helper"
import { testEffect } from "../../lib/effect"
const aisdkIt = testEffect(AISDK.layer.pipe(Layer.provideMerge(PluginV2.defaultLayer)))
describe("GroqPlugin", () => {
it.effect("creates a Groq SDK for @ai-sdk/groq", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GroqPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("groq", "llama"), package: "@ai-sdk/groq", options: { name: "groq" } },
{},
)
expect(result.sdk).toBeDefined()
}),
)
it.effect("ignores non-Groq SDK packages", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GroqPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("groq", "llama"), package: "@ai-sdk/openai-compatible", options: { name: "groq" } },
{},
)
expect(result.sdk).toBeUndefined()
}),
)
it.effect("only matches the bundled @ai-sdk/groq package exactly", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GroqPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("groq", "llama"), package: "@ai-sdk/groq/compat", options: { name: "groq" } },
{},
)
expect(result.sdk).toBeUndefined()
}),
)
it.effect("matches the old bundled Groq SDK provider naming", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(GroqPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom-groq", "llama"),
package: "@ai-sdk/groq",
options: { name: "custom-groq", apiKey: "test" },
},
{},
)
const expected = createGroq({ name: "custom-groq", apiKey: "test" } as Parameters<typeof createGroq>[0] & {
name: string
}).languageModel("llama")
const actual = result.sdk?.languageModel("llama")
expect(actual?.provider).toBe(expected.provider)
expect(actual?.modelId).toBe(expected.modelId)
}),
)
aisdkIt.effect("uses the default languageModel(apiID) behavior", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const aisdk = yield* AISDK.Service
yield* plugin.add(GroqPlugin)
const result = yield* aisdk.language(
model("groq", "alias", {
apiID: ModelV2.ID.make("llama-api"),
endpoint: {
type: "aisdk",
package: "@ai-sdk/groq",
},
options: {
headers: {},
body: {},
aisdk: {
provider: { apiKey: "test" },
request: {},
},
},
}),
)
expect(result.modelId).toBe("llama-api")
expect(result.provider).toBe("groq.chat")
}),
)
})

View File

@@ -0,0 +1,100 @@
import { Npm } from "@opencode-ai/core/npm"
import type { LanguageModelV3 } from "@ai-sdk/provider"
import { expect } from "bun:test"
import { Effect, Layer, Option } from "effect"
import { ModelV2 } from "@opencode-ai/core/model"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { ProviderV2 } from "@opencode-ai/core/provider"
import { testEffect } from "../../lib/effect"
export const fixtureProvider = new URL("./fixtures/provider-factory.ts", import.meta.url).href
export const npmLayer = Layer.succeed(
Npm.Service,
Npm.Service.of({
add: () => Effect.succeed({ directory: "", entrypoint: Option.none<string>() }),
install: () => Effect.void,
which: () => Effect.succeed(Option.none<string>()),
}),
)
export const it = testEffect(Layer.mergeAll(PluginV2.defaultLayer, npmLayer))
export function provider(providerID: string, options?: Partial<ProviderV2.Info>) {
return new ProviderV2.Info({
...ProviderV2.Info.empty(ProviderV2.ID.make(providerID)),
endpoint: {
type: "aisdk",
package: "test-provider",
},
...options,
options: {
headers: {},
body: {},
aisdk: {
provider: {},
request: {},
},
...options?.options,
},
})
}
export function model(providerID: string, modelID: string, options?: Partial<ModelV2.Info>) {
return new ModelV2.Info({
...ModelV2.Info.empty(ProviderV2.ID.make(providerID), ModelV2.ID.make(modelID)),
apiID: ModelV2.ID.make(modelID),
endpoint: {
type: "aisdk",
package: "test-provider",
},
...options,
options: {
headers: {},
body: {},
aisdk: {
provider: {},
request: {},
},
...options?.options,
},
})
}
export function withEnv<A, E, R>(vars: Record<string, string | undefined>, fx: () => Effect.Effect<A, E, R>) {
return Effect.acquireUseRelease(
Effect.sync(() => {
const previous = Object.fromEntries(Object.keys(vars).map((key) => [key, process.env[key]]))
for (const [key, value] of Object.entries(vars)) {
if (value === undefined) delete process.env[key]
else process.env[key] = value
}
return previous
}),
() => fx(),
(previous) =>
Effect.sync(() => {
for (const [key, value] of Object.entries(previous)) {
if (value === undefined) delete process.env[key]
else process.env[key] = value
}
}),
)
}
export function fakeSelectorSdk(calls: string[]) {
const make = (method: string) => (id: string) => {
calls.push(`${method}:${id}`)
return { modelId: id, provider: method, specificationVersion: "v3" } as unknown as LanguageModelV3
}
return {
responses: make("responses"),
messages: make("messages"),
chat: make("chat"),
languageModel: make("languageModel"),
}
}
export function expectPluginRegistered(ids: string[], id: string) {
expect(ids).toContain(PluginV2.ID.make(id))
}

View File

@@ -0,0 +1,90 @@
import { describe, expect } from "bun:test"
import { Effect } from "effect"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { ProviderPlugins } from "@opencode-ai/core/plugin/provider"
import { KiloPlugin } from "@opencode-ai/core/plugin/provider/kilo"
import { expectPluginRegistered, it, provider } from "./provider-helper"
describe("KiloPlugin", () => {
it.effect("is registered so legacy referer headers can be applied", () =>
Effect.sync(() =>
expectPluginRegistered(
ProviderPlugins.map((item) => item.id),
"kilo",
),
),
)
it.effect("applies legacy referer headers only to kilo", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(KiloPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("kilo", {
options: { headers: { Existing: "value" }, body: {}, aisdk: { provider: {}, request: {} } },
}),
cancel: false,
},
)
const ignored = yield* plugin.trigger("provider.update", {}, { provider: provider("openrouter"), cancel: false })
expect(result.provider.options.headers).toEqual({
Existing: "value",
"HTTP-Referer": "https://opencode.ai/",
"X-Title": "opencode",
})
expect(ignored.provider.options.headers).toEqual({})
}),
)
it.effect("uses the exact legacy Kilo header casing and set", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(KiloPlugin)
const result = yield* plugin.trigger("provider.update", {}, { provider: provider("kilo"), cancel: false })
expect(result.provider.options.headers).toEqual({
"HTTP-Referer": "https://opencode.ai/",
"X-Title": "opencode",
})
expect(result.provider.options.headers).not.toHaveProperty("http-referer")
expect(result.provider.options.headers).not.toHaveProperty("x-title")
expect(result.provider.options.headers).not.toHaveProperty("X-Source")
}),
)
it.effect("uses the legacy provider-id guard instead of endpoint package matching", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(KiloPlugin)
const matchingID = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("kilo", {
endpoint: { type: "aisdk", package: "not-kilo" },
}),
cancel: false,
},
)
const matchingPackage = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("custom-kilo", {
endpoint: { type: "aisdk", package: "kilo" },
}),
cancel: false,
},
)
expect(matchingID.provider.options.headers).toEqual({
"HTTP-Referer": "https://opencode.ai/",
"X-Title": "opencode",
})
expect(matchingPackage.provider.options.headers).toEqual({})
}),
)
})

View File

@@ -0,0 +1,63 @@
import { describe, expect } from "bun:test"
import { Effect } from "effect"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { ProviderPlugins } from "@opencode-ai/core/plugin/provider"
import { LLMGatewayPlugin } from "@opencode-ai/core/plugin/provider/llmgateway"
import { expectPluginRegistered, it, provider } from "./provider-helper"
describe("LLMGatewayPlugin", () => {
it.effect("is registered so legacy referer headers can be applied", () =>
Effect.sync(() =>
expectPluginRegistered(
ProviderPlugins.map((item) => item.id),
"llmgateway",
),
),
)
it.effect("applies legacy referer headers only to enabled llmgateway", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(LLMGatewayPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("llmgateway", {
enabled: { via: "env", name: "LLMGATEWAY_API_KEY" },
options: { headers: { Existing: "value" }, body: {}, aisdk: { provider: {}, request: {} } },
}),
cancel: false,
},
)
const ignored = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("openrouter", {
enabled: { via: "env", name: "OPENROUTER_API_KEY" },
}),
cancel: false,
},
)
expect(result.provider.options.headers).toEqual({
Existing: "value",
"HTTP-Referer": "https://opencode.ai/",
"X-Title": "opencode",
"X-Source": "opencode",
})
expect(ignored.provider.options.headers).toEqual({})
}),
)
it.effect("does not apply legacy headers to a disabled llmgateway provider", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(LLMGatewayPlugin)
const result = yield* plugin.trigger("provider.update", {}, { provider: provider("llmgateway"), cancel: false })
expect(result.provider.enabled).toBe(false)
expect(result.provider.options.headers).toEqual({})
}),
)
})

View File

@@ -0,0 +1,106 @@
import { describe, expect } from "bun:test"
import { Effect } from "effect"
import { ModelV2 } from "@opencode-ai/core/model"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { MistralPlugin } from "@opencode-ai/core/plugin/provider/mistral"
import { fakeSelectorSdk, it, model } from "./provider-helper"
describe("MistralPlugin", () => {
it.effect("creates a Mistral SDK for @ai-sdk/mistral", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(MistralPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("mistral", "mistral-large"), package: "@ai-sdk/mistral", options: { name: "mistral" } },
{},
)
expect(result.sdk).toBeDefined()
}),
)
it.effect("ignores non-Mistral SDK packages", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(MistralPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("mistral", "mistral-large"),
package: "@ai-sdk/openai-compatible",
options: { name: "mistral" },
},
{},
)
expect(result.sdk).toBeUndefined()
}),
)
it.effect("matches the old bundled Mistral SDK provider name for the bundled provider ID", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const providers: string[] = []
yield* plugin.add(MistralPlugin)
yield* plugin.add({
id: PluginV2.ID.make("mistral-sdk-inspector"),
effect: Effect.succeed({
"aisdk.sdk": (evt) =>
Effect.sync(() => {
providers.push(evt.sdk.languageModel("mistral-large").provider)
}),
}),
})
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("mistral", "mistral-large"), package: "@ai-sdk/mistral", options: { name: "mistral" } },
{},
)
expect(result.sdk).toBeDefined()
expect(providers).toEqual(["mistral.chat"])
}),
)
it.effect("matches the old bundled Mistral SDK provider name for custom provider IDs", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const providers: string[] = []
yield* plugin.add(MistralPlugin)
yield* plugin.add({
id: PluginV2.ID.make("mistral-sdk-inspector"),
effect: Effect.succeed({
"aisdk.sdk": (evt) =>
Effect.sync(() => {
providers.push(evt.sdk.languageModel("mistral-large").provider)
}),
}),
})
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom-mistral", "mistral-large"),
package: "@ai-sdk/mistral",
options: { name: "custom-mistral" },
},
{},
)
expect(providers).toEqual(["mistral.chat"])
}),
)
it.effect("leaves Mistral language selection on the default sdk.languageModel(apiID) path", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
const sdk = fakeSelectorSdk(calls)
yield* plugin.add(MistralPlugin)
const result = yield* plugin.trigger(
"aisdk.language",
{ model: model("mistral", "alias", { apiID: ModelV2.ID.make("mistral-large") }), sdk, options: {} },
{},
)
const language = result.language ?? sdk.languageModel(result.model.apiID)
expect(calls).toEqual(["languageModel:mistral-large"])
expect(language).toBeDefined()
}),
)
})

View File

@@ -0,0 +1,41 @@
import { describe, expect } from "bun:test"
import { Effect } from "effect"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { ProviderPlugins } from "@opencode-ai/core/plugin/provider"
import { NvidiaPlugin } from "@opencode-ai/core/plugin/provider/nvidia"
import { expectPluginRegistered, it, provider } from "./provider-helper"
describe("NvidiaPlugin", () => {
it.effect("is registered so legacy referer headers can be applied", () =>
Effect.sync(() =>
expectPluginRegistered(
ProviderPlugins.map((item) => item.id),
"nvidia",
),
),
)
it.effect("applies legacy referer headers only to nvidia", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(NvidiaPlugin)
const result = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("nvidia", {
options: { headers: { Existing: "value" }, body: {}, aisdk: { provider: {}, request: {} } },
}),
cancel: false,
},
)
const ignored = yield* plugin.trigger("provider.update", {}, { provider: provider("openrouter"), cancel: false })
expect(result.provider.options.headers).toEqual({
Existing: "value",
"HTTP-Referer": "https://opencode.ai/",
"X-Title": "opencode",
})
expect(ignored.provider.options.headers).toEqual({})
}),
)
})

View File

@@ -0,0 +1,101 @@
import { describe, expect } from "bun:test"
import { Effect } from "effect"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { OpenAICompatiblePlugin } from "@opencode-ai/core/plugin/provider/openai-compatible"
import { it, model } from "./provider-helper"
describe("OpenAICompatiblePlugin", () => {
it.effect("preserves explicit includeUsage false and defaults it to true", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpenAICompatiblePlugin)
const defaulted = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("custom", "model"), package: "@ai-sdk/openai-compatible", options: { name: "custom" } },
{},
)
const disabled = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom", "model"),
package: "@ai-sdk/openai-compatible",
options: { name: "custom", includeUsage: false },
},
{},
)
expect(defaulted.options.includeUsage).toBe(true)
expect(disabled.options.includeUsage).toBe(false)
}),
)
it.effect("defaults includeUsage for OpenAI-compatible package matches", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpenAICompatiblePlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom", "model"),
package: "file:///tmp/@ai-sdk/openai-compatible-provider.js",
options: { name: "custom" },
},
{},
)
expect(result.options.includeUsage).toBe(true)
}),
)
it.effect("uses the provider ID as the OpenAI-compatible provider name", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const observed: string[] = []
yield* plugin.add(OpenAICompatiblePlugin)
yield* plugin.add({
id: PluginV2.ID.make("inspector"),
effect: Effect.succeed({
"aisdk.sdk": (evt) =>
Effect.sync(() => {
observed.push(evt.sdk.languageModel("model").provider)
}),
}),
})
yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom-provider", "model"),
package: "@ai-sdk/openai-compatible",
options: { name: "custom-provider", baseURL: "https://example.com/v1" },
},
{},
)
expect(observed).toEqual(["custom-provider.chat"])
}),
)
it.effect("does not overwrite an SDK created by an earlier provider-specific plugin", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const sentinel = { languageModel: (modelID: string) => ({ modelID }) }
yield* plugin.add({
id: PluginV2.ID.make("sentinel"),
effect: Effect.succeed({
"aisdk.sdk": (evt) =>
Effect.sync(() => {
evt.sdk = sentinel
}),
}),
})
yield* plugin.add(OpenAICompatiblePlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("cloudflare-workers-ai", "model"),
package: "@ai-sdk/openai-compatible",
options: { name: "cloudflare-workers-ai" },
},
{},
)
expect(result.sdk).toBe(sentinel)
}),
)
})

View File

@@ -0,0 +1,100 @@
import { describe, expect } from "bun:test"
import { Effect } from "effect"
import { ModelV2 } from "@opencode-ai/core/model"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { OpenAIPlugin } from "@opencode-ai/core/plugin/provider/openai"
import { fakeSelectorSdk, it, model } from "./provider-helper"
describe("OpenAIPlugin", () => {
it.effect("creates an OpenAI SDK for @ai-sdk/openai using the provider ID as SDK name", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpenAIPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{
model: model("custom-openai", "gpt-5"),
package: "@ai-sdk/openai",
options: { name: "custom-openai", apiKey: "test" },
},
{},
)
expect(result.sdk?.responses("gpt-5").provider).toBe("custom-openai.responses")
}),
)
it.effect("ignores non-OpenAI SDK packages", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpenAIPlugin)
const result = yield* plugin.trigger(
"aisdk.sdk",
{ model: model("openai", "gpt-5"), package: "@ai-sdk/openai-compatible", options: { name: "openai" } },
{},
)
expect(result.sdk).toBeUndefined()
}),
)
it.effect("uses the Responses API for language models", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(OpenAIPlugin)
const result = yield* plugin.trigger(
"aisdk.language",
{
model: model("openai", "alias", { apiID: ModelV2.ID.make("gpt-5") }),
sdk: fakeSelectorSdk(calls),
options: {},
},
{},
)
expect(calls).toEqual(["responses:gpt-5"])
expect(result.language).toBeDefined()
}),
)
it.effect("ignores non-OpenAI providers", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
const calls: string[] = []
yield* plugin.add(OpenAIPlugin)
const result = yield* plugin.trigger(
"aisdk.language",
{ model: model("anthropic", "gpt-5"), sdk: fakeSelectorSdk(calls), options: {} },
{},
)
expect(calls).toEqual([])
expect(result.language).toBeUndefined()
}),
)
it.effect("cancels gpt-5-chat-latest during model updates", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpenAIPlugin)
const normal = yield* plugin.trigger("model.update", {}, { model: model("openai", "gpt-5"), cancel: false })
const filtered = yield* plugin.trigger(
"model.update",
{},
{ model: model("openai", "gpt-5-chat-latest"), cancel: false },
)
expect(normal.cancel).toBe(false)
expect(filtered.cancel).toBe(true)
}),
)
it.effect("does not cancel gpt-5-chat-latest for non-OpenAI providers", () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpenAIPlugin)
const result = yield* plugin.trigger(
"model.update",
{},
{ model: model("custom-openai", "gpt-5-chat-latest"), cancel: false },
)
expect(result.cancel).toBe(false)
}),
)
})

View File

@@ -0,0 +1,195 @@
import { describe, expect } from "bun:test"
import { DateTime, Effect, Option } from "effect"
import { Catalog } from "@opencode-ai/core/catalog"
import { ModelV2 } from "@opencode-ai/core/model"
import { PluginV2 } from "@opencode-ai/core/plugin"
import { OpencodePlugin } from "@opencode-ai/core/plugin/provider/opencode"
import { ProviderV2 } from "@opencode-ai/core/provider"
import { it, model, provider, withEnv } from "./provider-helper"
const cost = (input: number, output = 0) => [{ input, output, cache: { read: 0, write: 0 } }]
describe("OpencodePlugin", () => {
it.effect("uses a public key and cancels paid models without credentials", () =>
withEnv({ OPENCODE_API_KEY: undefined }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpencodePlugin)
const updated = yield* plugin.trigger("provider.update", {}, { provider: provider("opencode"), cancel: false })
const paid = yield* plugin.trigger(
"model.update",
{},
{ model: model("opencode", "paid", { cost: cost(1) }), cancel: false },
)
expect(updated.provider.options.aisdk.provider.apiKey).toBe("public")
expect(paid.cancel).toBe(true)
}),
),
)
it.effect("keeps free models without credentials", () =>
withEnv({ OPENCODE_API_KEY: undefined }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpencodePlugin)
yield* plugin.trigger("provider.update", {}, { provider: provider("opencode"), cancel: false })
const free = yield* plugin.trigger(
"model.update",
{},
{ model: model("opencode", "free", { cost: cost(0) }), cancel: false },
)
expect(free.cancel).toBe(false)
}),
),
)
it.effect("treats output-only cost as free without credentials", () =>
withEnv({ OPENCODE_API_KEY: undefined }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpencodePlugin)
yield* plugin.trigger("provider.update", {}, { provider: provider("opencode"), cancel: false })
const outputOnly = yield* plugin.trigger(
"model.update",
{},
{ model: model("opencode", "output-only", { cost: cost(0, 1) }), cancel: false },
)
expect(outputOnly.cancel).toBe(false)
}),
),
)
it.effect("uses OPENCODE_API_KEY as credentials", () =>
withEnv({ OPENCODE_API_KEY: "secret" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpencodePlugin)
const updated = yield* plugin.trigger("provider.update", {}, { provider: provider("opencode"), cancel: false })
const paid = yield* plugin.trigger(
"model.update",
{},
{ model: model("opencode", "paid", { cost: cost(1) }), cancel: false },
)
expect(updated.provider.options.aisdk.provider.apiKey).toBeUndefined()
expect(paid.cancel).toBe(false)
}),
),
)
it.effect("uses configured provider env vars as credentials", () =>
withEnv({ OPENCODE_API_KEY: undefined, CUSTOM_OPENCODE_API_KEY: "secret" }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpencodePlugin)
const updated = yield* plugin.trigger(
"provider.update",
{},
{ provider: provider("opencode", { env: ["CUSTOM_OPENCODE_API_KEY"] }), cancel: false },
)
const paid = yield* plugin.trigger(
"model.update",
{},
{ model: model("opencode", "paid", { cost: cost(1) }), cancel: false },
)
expect(updated.provider.options.aisdk.provider.apiKey).toBeUndefined()
expect(paid.cancel).toBe(false)
}),
),
)
it.effect("uses configured apiKey as credentials", () =>
withEnv({ OPENCODE_API_KEY: undefined }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpencodePlugin)
const updated = yield* plugin.trigger(
"provider.update",
{},
{
provider: provider("opencode", {
options: {
headers: {},
body: {},
aisdk: {
provider: { apiKey: "configured" },
request: {},
},
},
}),
cancel: false,
},
)
const paid = yield* plugin.trigger(
"model.update",
{},
{ model: model("opencode", "paid", { cost: cost(1) }), cancel: false },
)
expect(updated.provider.options.aisdk.provider.apiKey).toBe("configured")
expect(paid.cancel).toBe(false)
}),
),
)
it.effect("uses auth-enabled providers as credentials", () =>
withEnv({ OPENCODE_API_KEY: undefined }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpencodePlugin)
const updated = yield* plugin.trigger(
"provider.update",
{},
{ provider: provider("opencode", { enabled: { via: "auth", service: "opencode" } }), cancel: false },
)
const paid = yield* plugin.trigger(
"model.update",
{},
{ model: model("opencode", "paid", { cost: cost(1) }), cancel: false },
)
expect(updated.provider.options.aisdk.provider.apiKey).toBeUndefined()
expect(paid.cancel).toBe(false)
}),
),
)
it.effect("ignores non-opencode providers and models", () =>
withEnv({ OPENCODE_API_KEY: undefined }, () =>
Effect.gen(function* () {
const plugin = yield* PluginV2.Service
yield* plugin.add(OpencodePlugin)
const updated = yield* plugin.trigger("provider.update", {}, { provider: provider("openai"), cancel: false })
const paid = yield* plugin.trigger(
"model.update",
{},
{ model: model("openai", "paid", { cost: cost(1) }), cancel: false },
)
expect(updated.provider.options.aisdk.provider.apiKey).toBeUndefined()
expect(paid.cancel).toBe(false)
}),
),
)
it.effect("prefers gpt-5-nano as the opencode small model", () =>
Effect.gen(function* () {
const catalog = yield* Catalog.Service
const providerID = ProviderV2.ID.opencode
yield* catalog.provider.update(providerID, () => {})
yield* catalog.model.update(providerID, ModelV2.ID.make("cheap-mini"), (model) => {
model.capabilities.input = ["text"]
model.capabilities.output = ["text"]
model.cost = cost(1, 1)
model.time.released = DateTime.makeUnsafe(Date.now())
})
yield* catalog.model.update(providerID, ModelV2.ID.make("gpt-5-nano"), (model) => {
model.capabilities.input = ["text"]
model.capabilities.output = ["text"]
model.cost = cost(10, 10)
model.time.released = DateTime.makeUnsafe(Date.now())
})
const selected = yield* catalog.model.small(providerID)
expect(Option.getOrUndefined(selected)?.id).toBe(ModelV2.ID.make("gpt-5-nano"))
}).pipe(Effect.provide(Catalog.defaultLayer)),
)
})

Some files were not shown because too many files have changed in this diff Show More