mirror of
https://github.com/anomalyco/opencode.git
synced 2026-05-13 15:44:56 +00:00
feat(session): execute tools in native LLM runtime
This commit is contained in:
@@ -200,17 +200,17 @@ const dispatch = (tools: Tools, call: ToolCallPart): Effect.Effect<ToolResultVal
|
||||
if (!tool.execute)
|
||||
return Effect.succeed({ type: "error" as const, value: `Tool has no execute handler: ${call.name}` })
|
||||
|
||||
return decodeAndExecute(tool, call.input).pipe(
|
||||
return decodeAndExecute(tool, call).pipe(
|
||||
Effect.catchTag("LLM.ToolFailure", (failure) =>
|
||||
Effect.succeed({ type: "error" as const, value: failure.message } satisfies ToolResultValue),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
const decodeAndExecute = (tool: AnyTool, input: unknown): Effect.Effect<ToolResultValue, ToolFailure> =>
|
||||
tool._decode(input).pipe(
|
||||
const decodeAndExecute = (tool: AnyTool, call: ToolCallPart): Effect.Effect<ToolResultValue, ToolFailure> =>
|
||||
tool._decode(call.input).pipe(
|
||||
Effect.mapError((error) => new ToolFailure({ message: `Invalid tool input: ${error.message}` })),
|
||||
Effect.flatMap((decoded) => tool.execute!(decoded)),
|
||||
Effect.flatMap((decoded) => tool.execute!(decoded, { id: call.id, name: call.name })),
|
||||
Effect.flatMap((value) =>
|
||||
tool._encode(value).pipe(
|
||||
Effect.mapError(
|
||||
|
||||
@@ -11,6 +11,7 @@ export type ToolSchema<T> = Schema.Codec<T, any, never, never>
|
||||
|
||||
export type ToolExecute<Parameters extends ToolSchema<any>, Success extends ToolSchema<any>> = (
|
||||
params: Schema.Schema.Type<Parameters>,
|
||||
context?: { readonly id: string; readonly name: string },
|
||||
) => Effect.Effect<Schema.Schema.Type<Success>, ToolFailure>
|
||||
|
||||
/**
|
||||
@@ -61,7 +62,10 @@ type TypedToolConfig = {
|
||||
type DynamicToolConfig = {
|
||||
readonly description: string
|
||||
readonly jsonSchema: JsonSchema.JsonSchema
|
||||
readonly execute?: (params: unknown) => Effect.Effect<unknown, ToolFailure>
|
||||
readonly execute?: (
|
||||
params: unknown,
|
||||
context?: { readonly id: string; readonly name: string },
|
||||
) => Effect.Effect<unknown, ToolFailure>
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -110,7 +114,10 @@ export function make<Parameters extends ToolSchema<any>, Success extends ToolSch
|
||||
export function make(config: {
|
||||
readonly description: string
|
||||
readonly jsonSchema: JsonSchema.JsonSchema
|
||||
readonly execute: (params: unknown) => Effect.Effect<unknown, ToolFailure>
|
||||
readonly execute: (
|
||||
params: unknown,
|
||||
context?: { readonly id: string; readonly name: string },
|
||||
) => Effect.Effect<unknown, ToolFailure>
|
||||
}): AnyExecutableTool
|
||||
export function make(config: {
|
||||
readonly description: string
|
||||
|
||||
@@ -2,8 +2,8 @@ import { Provider } from "@/provider/provider"
|
||||
import * as Log from "@opencode-ai/core/util/log"
|
||||
import { Context, Effect, Layer, Record } from "effect"
|
||||
import * as Stream from "effect/Stream"
|
||||
import { streamText, wrapLanguageModel, type ModelMessage, type Tool, tool, jsonSchema } from "ai"
|
||||
import type { LLMEvent } from "@opencode-ai/llm"
|
||||
import { streamText, wrapLanguageModel, type ModelMessage, type Tool, tool as aiTool, jsonSchema, asSchema } from "ai"
|
||||
import { tool as nativeTool, ToolFailure, type JsonSchema, type LLMEvent } from "@opencode-ai/llm"
|
||||
import { LLMClient, RequestExecutor } from "@opencode-ai/llm/route"
|
||||
import { mergeDeep } from "remeda"
|
||||
import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider"
|
||||
@@ -18,6 +18,7 @@ import { Flag } from "@opencode-ai/core/flag/flag"
|
||||
import { Permission } from "@/permission"
|
||||
import { PermissionID } from "@/permission/schema"
|
||||
import { Bus } from "@/bus"
|
||||
import { errorMessage } from "@/util/error"
|
||||
import { Wildcard } from "@/util/wildcard"
|
||||
import { SessionID } from "@/session/schema"
|
||||
import { Auth } from "@/auth"
|
||||
@@ -216,7 +217,7 @@ const live: Layer.Layer<
|
||||
Object.keys(tools).length === 0 &&
|
||||
hasToolCalls(input.messages)
|
||||
) {
|
||||
tools["_noop"] = tool({
|
||||
tools["_noop"] = aiTool({
|
||||
description: "Do not call this tool. It exists only for API compatibility and must never be invoked.",
|
||||
inputSchema: jsonSchema({
|
||||
type: "object",
|
||||
@@ -358,31 +359,31 @@ const live: Layer.Layer<
|
||||
if (input.model.providerID !== "openai" || input.model.api.npm !== "@ai-sdk/openai") {
|
||||
return yield* Effect.fail(new Error("Native LLM runtime currently only supports OpenAI models"))
|
||||
}
|
||||
if (Object.keys(sortedTools).length > 0) {
|
||||
return yield* Effect.fail(new Error("Native LLM runtime does not support tools yet"))
|
||||
}
|
||||
const apiKey =
|
||||
info?.type === "api" ? info.key : typeof item.options.apiKey === "string" ? item.options.apiKey : undefined
|
||||
if (!apiKey) return yield* Effect.fail(new Error("Native LLM runtime requires API key auth for OpenAI"))
|
||||
const baseURL = typeof item.options.baseURL === "string" ? item.options.baseURL : undefined
|
||||
const request = LLMNative.request({
|
||||
model: input.model,
|
||||
apiKey,
|
||||
baseURL,
|
||||
system: isOpenaiOauth ? system : [],
|
||||
messages: ProviderTransform.message(messages, input.model, options),
|
||||
tools: sortedTools,
|
||||
toolChoice: input.toolChoice,
|
||||
temperature: params.temperature,
|
||||
topP: params.topP,
|
||||
topK: params.topK,
|
||||
maxOutputTokens: params.maxOutputTokens,
|
||||
providerOptions: ProviderTransform.providerOptions(input.model, params.options),
|
||||
headers: requestHeaders,
|
||||
})
|
||||
return {
|
||||
type: "native" as const,
|
||||
stream: LLMClient.stream(
|
||||
LLMNative.request({
|
||||
model: input.model,
|
||||
apiKey,
|
||||
baseURL,
|
||||
system: isOpenaiOauth ? system : [],
|
||||
messages: ProviderTransform.message(messages, input.model, options),
|
||||
toolChoice: input.toolChoice,
|
||||
temperature: params.temperature,
|
||||
topP: params.topP,
|
||||
topK: params.topK,
|
||||
maxOutputTokens: params.maxOutputTokens,
|
||||
providerOptions: ProviderTransform.providerOptions(input.model, params.options),
|
||||
headers: requestHeaders,
|
||||
}),
|
||||
).pipe(Stream.provide(LLMClient.layer), Stream.provide(RequestExecutor.defaultLayer)),
|
||||
stream: LLMClient.stream({ request, tools: nativeTools(sortedTools, input) }).pipe(
|
||||
Stream.provide(LLMClient.layer),
|
||||
Stream.provide(RequestExecutor.defaultLayer),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -502,6 +503,37 @@ function resolveTools(input: Pick<StreamInput, "tools" | "agent" | "permission"
|
||||
return Record.filter(input.tools, (_, k) => input.user.tools?.[k] !== false && !disabled.has(k))
|
||||
}
|
||||
|
||||
function nativeSchema(value: unknown): JsonSchema {
|
||||
if (!value || typeof value !== "object") return { type: "object", properties: {} }
|
||||
if ("jsonSchema" in value && value.jsonSchema && typeof value.jsonSchema === "object")
|
||||
return value.jsonSchema as JsonSchema
|
||||
return asSchema(value as Parameters<typeof asSchema>[0]).jsonSchema as JsonSchema
|
||||
}
|
||||
|
||||
function nativeTools(tools: Record<string, Tool>, input: StreamRequest) {
|
||||
return Object.fromEntries(
|
||||
Object.entries(tools).map(([name, item]) => [
|
||||
name,
|
||||
nativeTool({
|
||||
description: item.description ?? "",
|
||||
jsonSchema: nativeSchema(item.inputSchema),
|
||||
execute: (args: unknown, ctx?: { readonly id: string; readonly name: string }) =>
|
||||
Effect.tryPromise({
|
||||
try: () => {
|
||||
if (!item.execute) throw new Error(`Tool has no execute handler: ${name}`)
|
||||
return item.execute(args, {
|
||||
toolCallId: ctx?.id ?? name,
|
||||
messages: input.messages,
|
||||
abortSignal: input.abort,
|
||||
})
|
||||
},
|
||||
catch: (error) => new ToolFailure({ message: errorMessage(error) }),
|
||||
}),
|
||||
}),
|
||||
]),
|
||||
)
|
||||
}
|
||||
|
||||
// Check if messages contain any tool-call content
|
||||
// Used to determine if a dummy tool should be added for LiteLLM proxy compatibility
|
||||
export function hasToolCalls(messages: ModelMessage[]): boolean {
|
||||
|
||||
@@ -802,6 +802,134 @@ describe("session.llm.stream", () => {
|
||||
})
|
||||
})
|
||||
|
||||
test("executes OpenAI tool calls through native runtime", async () => {
|
||||
const server = state.server
|
||||
if (!server) {
|
||||
throw new Error("Server not initialized")
|
||||
}
|
||||
|
||||
const source = await loadFixture("openai", "gpt-5.2")
|
||||
const model = source.model
|
||||
const chunks = [
|
||||
{
|
||||
type: "response.output_item.added",
|
||||
item: { type: "function_call", id: "item-native-tool", call_id: "call-native-tool", name: "lookup" },
|
||||
},
|
||||
{
|
||||
type: "response.function_call_arguments.delta",
|
||||
item_id: "item-native-tool",
|
||||
delta: '{"query":"weather"}',
|
||||
},
|
||||
{
|
||||
type: "response.output_item.done",
|
||||
item: {
|
||||
type: "function_call",
|
||||
id: "item-native-tool",
|
||||
call_id: "call-native-tool",
|
||||
name: "lookup",
|
||||
arguments: '{"query":"weather"}',
|
||||
},
|
||||
},
|
||||
{
|
||||
type: "response.completed",
|
||||
response: { incomplete_details: null, usage: { input_tokens: 1, output_tokens: 1 } },
|
||||
},
|
||||
]
|
||||
const request = waitRequest("/responses", createEventResponse(chunks, true))
|
||||
let executed: unknown
|
||||
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
enabled_providers: ["openai"],
|
||||
provider: {
|
||||
openai: {
|
||||
name: "OpenAI",
|
||||
env: ["OPENAI_API_KEY"],
|
||||
npm: "@ai-sdk/openai",
|
||||
api: "https://api.openai.com/v1",
|
||||
models: {
|
||||
[model.id]: model,
|
||||
},
|
||||
options: {
|
||||
apiKey: "test-openai-key",
|
||||
baseURL: `${server.url.origin}/v1`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
|
||||
await WithInstance.provide({
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const previous = process.env.OPENCODE_LLM_RUNTIME
|
||||
process.env.OPENCODE_LLM_RUNTIME = "native"
|
||||
try {
|
||||
const resolved = await getModel(ProviderID.openai, ModelID.make(model.id))
|
||||
const sessionID = SessionID.make("session-test-native-tool")
|
||||
const agent = {
|
||||
name: "test",
|
||||
mode: "primary",
|
||||
options: {},
|
||||
permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
||||
} satisfies Agent.Info
|
||||
|
||||
await drain({
|
||||
user: {
|
||||
id: MessageID.make("msg_user-native-tool"),
|
||||
sessionID,
|
||||
role: "user",
|
||||
time: { created: Date.now() },
|
||||
agent: agent.name,
|
||||
model: { providerID: ProviderID.make("openai"), modelID: resolved.id },
|
||||
} satisfies MessageV2.User,
|
||||
sessionID,
|
||||
model: resolved,
|
||||
agent,
|
||||
system: [],
|
||||
messages: [{ role: "user", content: "Use lookup" }],
|
||||
tools: {
|
||||
lookup: tool({
|
||||
description: "Lookup data",
|
||||
inputSchema: z.object({ query: z.string() }),
|
||||
execute: async (args, options) => {
|
||||
executed = { args, toolCallId: options.toolCallId }
|
||||
return { output: "looked up" }
|
||||
},
|
||||
}),
|
||||
},
|
||||
})
|
||||
} finally {
|
||||
if (previous === undefined) delete process.env.OPENCODE_LLM_RUNTIME
|
||||
else process.env.OPENCODE_LLM_RUNTIME = previous
|
||||
}
|
||||
|
||||
const capture = await request
|
||||
expect(capture.body.tools).toEqual([
|
||||
{
|
||||
type: "function",
|
||||
name: "lookup",
|
||||
description: "Lookup data",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: { query: { type: "string" } },
|
||||
required: ["query"],
|
||||
additionalProperties: false,
|
||||
$schema: "http://json-schema.org/draft-07/schema#",
|
||||
},
|
||||
},
|
||||
])
|
||||
expect(executed).toEqual({ args: { query: "weather" }, toolCallId: "call-native-tool" })
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("accepts user image attachments as data URLs for OpenAI models", async () => {
|
||||
const server = state.server
|
||||
if (!server) {
|
||||
|
||||
Reference in New Issue
Block a user