From d37bc3e71fd6083a7fc9cd2991f16851c7ff186f Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Mon, 11 May 2026 22:24:09 -0400 Subject: [PATCH] refactor(session): inject native LLM client --- packages/opencode/src/session/llm.ts | 10 +- packages/opencode/test/session/llm.test.ts | 211 +++++++++++++++------ 2 files changed, 162 insertions(+), 59 deletions(-) diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index 32fb4cf880..e7cdf8d18b 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -5,6 +5,7 @@ import * as Stream from "effect/Stream" import { streamText, wrapLanguageModel, type ModelMessage, type Tool, tool as aiTool, jsonSchema, asSchema } from "ai" import { tool as nativeTool, ToolFailure, type JsonSchema, type LLMEvent } from "@opencode-ai/llm" import { LLMClient, RequestExecutor } from "@opencode-ai/llm/route" +import type { LLMClientService } from "@opencode-ai/llm/route" import { mergeDeep } from "remeda" import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider" import { ProviderTransform } from "@/provider/transform" @@ -66,7 +67,7 @@ export class Service extends Context.Service()("@opencode/LL const live: Layer.Layer< Service, never, - Auth.Service | Config.Service | Provider.Service | Plugin.Service | Permission.Service + Auth.Service | Config.Service | Provider.Service | Plugin.Service | Permission.Service | LLMClientService > = Layer.effect( Service, Effect.gen(function* () { @@ -75,6 +76,7 @@ const live: Layer.Layer< const provider = yield* Provider.Service const plugin = yield* Plugin.Service const perm = yield* Permission.Service + const llmClient = yield* LLMClient.Service const run = Effect.fn("LLM.run")(function* (input: StreamRequest) { const l = log @@ -380,10 +382,7 @@ const live: Layer.Layer< }) return { type: "native" as const, - stream: LLMClient.stream({ request, tools: nativeTools(sortedTools, input) }).pipe( - Stream.provide(LLMClient.layer), - Stream.provide(RequestExecutor.defaultLayer), - ), + stream: llmClient.stream({ request, tools: nativeTools(sortedTools, input) }), } } @@ -492,6 +491,7 @@ export const defaultLayer = Layer.suspend(() => Layer.provide(Config.defaultLayer), Layer.provide(Provider.defaultLayer), Layer.provide(Plugin.defaultLayer), + Layer.provide(LLMClient.layer.pipe(Layer.provide(RequestExecutor.defaultLayer))), ), ) diff --git a/packages/opencode/test/session/llm.test.ts b/packages/opencode/test/session/llm.test.ts index 2054d05343..1ca282bb06 100644 --- a/packages/opencode/test/session/llm.test.ts +++ b/packages/opencode/test/session/llm.test.ts @@ -1,14 +1,19 @@ import { afterAll, beforeAll, beforeEach, describe, expect, test } from "bun:test" import path from "path" import { tool, type ModelMessage } from "ai" -import { Cause, Effect, Exit, Stream } from "effect" +import { Cause, Effect, Exit, Layer, Stream } from "effect" +import { HttpClientRequest, HttpClientResponse } from "effect/unstable/http" import z from "zod" -import { makeRuntime } from "../../src/effect/run-service" +import { attach, makeRuntime } from "../../src/effect/run-service" import { LLM } from "../../src/session/llm" +import { LLMClient, RequestExecutor } from "@opencode-ai/llm/route" import { WithInstance } from "../../src/project/with-instance" +import { Auth } from "@/auth" +import { Config } from "@/config/config" import { Provider } from "@/provider/provider" import { ProviderTransform } from "@/provider/transform" import { ModelsDev } from "@/provider/models" +import { Plugin } from "@/plugin" import { ProviderID, ModelID } from "../../src/provider/schema" import { Filesystem } from "@/util/filesystem" import { tmpdir } from "../fixture/fixture" @@ -17,6 +22,29 @@ import { MessageV2 } from "../../src/session/message-v2" import { SessionID, MessageID } from "../../src/session/schema" import { AppRuntime } from "../../src/effect/app-runtime" +const openAIConfig = (model: ModelsDev.Provider["models"][string], baseURL: string): Partial => { + const { experimental: _experimental, ...configModel } = model + type ConfigModel = NonNullable[string]["models"]>[string] + return { + enabled_providers: ["openai"], + provider: { + openai: { + name: "OpenAI", + env: ["OPENAI_API_KEY"], + npm: "@ai-sdk/openai", + api: "https://api.openai.com/v1", + models: { + [model.id]: JSON.parse(JSON.stringify(configModel)) as ConfigModel, + }, + options: { + apiKey: "test-openai-key", + baseURL, + }, + }, + }, + } +} + async function getModel(providerID: ProviderID, modelID: ModelID) { return AppRuntime.runPromise( Effect.gen(function* () { @@ -32,6 +60,22 @@ async function drain(input: LLM.StreamInput) { return llm.runPromise((svc) => svc.stream(input).pipe(Stream.runDrain)) } +async function drainWith(layer: Layer.Layer, input: LLM.StreamInput) { + return Effect.runPromise( + attach(LLM.Service.use((svc) => svc.stream(input).pipe(Stream.runDrain))).pipe(Effect.provide(layer)), + ) +} + +function llmLayerWithExecutor(executor: Layer.Layer) { + return LLM.layer.pipe( + Layer.provide(Auth.defaultLayer), + Layer.provide(Config.defaultLayer), + Layer.provide(Provider.defaultLayer), + Layer.provide(Plugin.defaultLayer), + Layer.provide(LLMClient.layer.pipe(Layer.provide(executor))), + ) +} + describe("session.llm.hasToolCalls", () => { test("returns false for empty messages array", () => { expect(LLM.hasToolCalls([])).toBe(false) @@ -614,32 +658,7 @@ describe("session.llm.stream", () => { ] const request = waitRequest("/responses", createEventResponse(responseChunks, true)) - await using tmp = await tmpdir({ - init: async (dir) => { - await Bun.write( - path.join(dir, "opencode.json"), - JSON.stringify({ - $schema: "https://opencode.ai/config.json", - enabled_providers: ["openai"], - provider: { - openai: { - name: "OpenAI", - env: ["OPENAI_API_KEY"], - npm: "@ai-sdk/openai", - api: "https://api.openai.com/v1", - models: { - [model.id]: model, - }, - options: { - apiKey: "test-openai-key", - baseURL: `${server.url.origin}/v1`, - }, - }, - }, - }), - ) - }, - }) + await using tmp = await tmpdir({ config: openAIConfig(model, `${server.url.origin}/v1`) }) await WithInstance.provide({ directory: tmp.path, @@ -726,32 +745,7 @@ describe("session.llm.stream", () => { ] const request = waitRequest("/responses", createEventResponse(chunks, true)) - await using tmp = await tmpdir({ - init: async (dir) => { - await Bun.write( - path.join(dir, "opencode.json"), - JSON.stringify({ - $schema: "https://opencode.ai/config.json", - enabled_providers: ["openai"], - provider: { - openai: { - name: "OpenAI", - env: ["OPENAI_API_KEY"], - npm: "@ai-sdk/openai", - api: "https://api.openai.com/v1", - models: { - [model.id]: model, - }, - options: { - apiKey: "test-openai-key", - baseURL: `${server.url.origin}/v1`, - }, - }, - }, - }), - ) - }, - }) + await using tmp = await tmpdir({ config: openAIConfig(model, `${server.url.origin}/v1`) }) await WithInstance.provide({ directory: tmp.path, @@ -802,6 +796,115 @@ describe("session.llm.stream", () => { }) }) + test("uses injected native request executor for tool calls", async () => { + const source = await loadFixture("openai", "gpt-5.2") + const model = source.model + const chunks = [ + { + type: "response.output_item.added", + item: { type: "function_call", id: "item-injected-tool", call_id: "call-injected-tool", name: "lookup" }, + }, + { + type: "response.function_call_arguments.delta", + item_id: "item-injected-tool", + delta: '{"query":"weather"}', + }, + { + type: "response.output_item.done", + item: { + type: "function_call", + id: "item-injected-tool", + call_id: "call-injected-tool", + name: "lookup", + arguments: '{"query":"weather"}', + }, + }, + { + type: "response.completed", + response: { incomplete_details: null, usage: { input_tokens: 1, output_tokens: 1 } }, + }, + ] + let captured: Record | undefined + let executed: unknown + const executor = Layer.succeed( + RequestExecutor.Service, + RequestExecutor.Service.of({ + execute: (request) => + Effect.gen(function* () { + const web = yield* HttpClientRequest.toWeb(request).pipe(Effect.orDie) + captured = (yield* Effect.promise(() => web.json())) as Record + return HttpClientResponse.fromWeb(request, createEventResponse(chunks, true)) + }), + }), + ) + + await using tmp = await tmpdir({ config: openAIConfig(model, "https://injected-openai.test/v1") }) + + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + const previous = process.env.OPENCODE_LLM_RUNTIME + process.env.OPENCODE_LLM_RUNTIME = "native" + try { + const resolved = await getModel(ProviderID.openai, ModelID.make(model.id)) + const sessionID = SessionID.make("session-test-native-injected-tool") + const agent = { + name: "test", + mode: "primary", + options: {}, + permission: [{ permission: "*", pattern: "*", action: "allow" }], + } satisfies Agent.Info + + await drainWith(llmLayerWithExecutor(executor), { + user: { + id: MessageID.make("msg_user-native-injected-tool"), + sessionID, + role: "user", + time: { created: Date.now() }, + agent: agent.name, + model: { providerID: ProviderID.make("openai"), modelID: resolved.id }, + } satisfies MessageV2.User, + sessionID, + model: resolved, + agent, + system: [], + messages: [{ role: "user", content: "Use lookup" }], + tools: { + lookup: tool({ + description: "Lookup data", + inputSchema: z.object({ query: z.string() }), + execute: async (args, options) => { + executed = { args, toolCallId: options.toolCallId } + return { output: "looked up" } + }, + }), + }, + }) + } finally { + if (previous === undefined) delete process.env.OPENCODE_LLM_RUNTIME + else process.env.OPENCODE_LLM_RUNTIME = previous + } + + expect(captured?.model).toBe(model.id) + expect(captured?.tools).toEqual([ + { + type: "function", + name: "lookup", + description: "Lookup data", + parameters: { + type: "object", + properties: { query: { type: "string" } }, + required: ["query"], + additionalProperties: false, + $schema: "http://json-schema.org/draft-07/schema#", + }, + }, + ]) + expect(executed).toEqual({ args: { query: "weather" }, toolCallId: "call-injected-tool" }) + }, + }) + }) + test("executes OpenAI tool calls through native runtime", async () => { const server = state.server if (!server) {