mirror of
https://github.com/anomalyco/opencode.git
synced 2026-05-16 17:25:22 +00:00
Compare commits
6 Commits
server-dis
...
llm-native
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4d7d5a87ef | ||
|
|
d441e931f9 | ||
|
|
ad79ad9ea8 | ||
|
|
d6b23fd8f6 | ||
|
|
5911bd532d | ||
|
|
2385123f03 |
32
bun.lock
32
bun.lock
@@ -421,6 +421,7 @@
|
||||
"@octokit/graphql": "9.0.2",
|
||||
"@octokit/rest": "catalog:",
|
||||
"@openauthjs/openauth": "catalog:",
|
||||
"@opencode-ai/llm": "workspace:*",
|
||||
"@opencode-ai/plugin": "workspace:*",
|
||||
"@opencode-ai/script": "workspace:*",
|
||||
"@opencode-ai/sdk": "workspace:*",
|
||||
@@ -489,6 +490,7 @@
|
||||
"@babel/core": "7.28.4",
|
||||
"@octokit/webhooks-types": "7.6.1",
|
||||
"@opencode-ai/core": "workspace:*",
|
||||
"@opencode-ai/http-recorder": "workspace:*",
|
||||
"@opencode-ai/script": "workspace:*",
|
||||
"@parcel/watcher-darwin-arm64": "2.5.1",
|
||||
"@parcel/watcher-darwin-x64": "2.5.1",
|
||||
@@ -536,9 +538,9 @@
|
||||
"typescript": "catalog:",
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@opentui/core": ">=0.2.10",
|
||||
"@opentui/keymap": ">=0.2.10",
|
||||
"@opentui/solid": ">=0.2.10",
|
||||
"@opentui/core": ">=0.2.11",
|
||||
"@opentui/keymap": ">=0.2.11",
|
||||
"@opentui/solid": ">=0.2.11",
|
||||
},
|
||||
"optionalPeers": [
|
||||
"@opentui/core",
|
||||
@@ -721,9 +723,9 @@
|
||||
"@npmcli/arborist": "9.4.0",
|
||||
"@octokit/rest": "22.0.0",
|
||||
"@openauthjs/openauth": "0.0.0-20250322224806",
|
||||
"@opentui/core": "0.2.10",
|
||||
"@opentui/keymap": "0.2.10",
|
||||
"@opentui/solid": "0.2.10",
|
||||
"@opentui/core": "0.2.11",
|
||||
"@opentui/keymap": "0.2.11",
|
||||
"@opentui/solid": "0.2.11",
|
||||
"@pierre/diffs": "1.1.0-beta.18",
|
||||
"@playwright/test": "1.59.1",
|
||||
"@sentry/solid": "10.36.0",
|
||||
@@ -1590,23 +1592,23 @@
|
||||
|
||||
"@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.40.0", "", {}, "sha512-cifvXDhcqMwwTlTK04GBNeIe7yyo28Mfby85QXFe1Yk8nmi36Ab/5UQwptOx84SsoGNRg+EVSjwzfSZMy6pmlw=="],
|
||||
|
||||
"@opentui/core": ["@opentui/core@0.2.10", "", { "dependencies": { "bun-ffi-structs": "0.2.2", "diff": "9.0.0", "marked": "17.0.1", "string-width": "7.2.0", "strip-ansi": "7.1.2", "yoga-layout": "3.2.1" }, "optionalDependencies": { "@opentui/core-darwin-arm64": "0.2.10", "@opentui/core-darwin-x64": "0.2.10", "@opentui/core-linux-arm64": "0.2.10", "@opentui/core-linux-x64": "0.2.10", "@opentui/core-win32-arm64": "0.2.10", "@opentui/core-win32-x64": "0.2.10" }, "peerDependencies": { "web-tree-sitter": "0.25.10" } }, "sha512-oviCtx0jYjc7F8X2b8+0IkQLg6WH47Nwl6CFeZo5dU0k6OpSbTbi07ZleObaiECAp+S1YLhAtVdgzHU7hBZlaw=="],
|
||||
"@opentui/core": ["@opentui/core@0.2.11", "", { "dependencies": { "bun-ffi-structs": "0.2.2", "diff": "9.0.0", "marked": "17.0.1", "string-width": "7.2.0", "strip-ansi": "7.1.2", "yoga-layout": "3.2.1" }, "optionalDependencies": { "@opentui/core-darwin-arm64": "0.2.11", "@opentui/core-darwin-x64": "0.2.11", "@opentui/core-linux-arm64": "0.2.11", "@opentui/core-linux-x64": "0.2.11", "@opentui/core-win32-arm64": "0.2.11", "@opentui/core-win32-x64": "0.2.11" }, "peerDependencies": { "web-tree-sitter": "0.25.10" } }, "sha512-X0zLmcDEvMrPzWYp769I7VEVb+og38vaete9tGZXu9HnJgu/paPUUplUT+6denBQccr2qx1rBYV6EtgbBpLEyw=="],
|
||||
|
||||
"@opentui/core-darwin-arm64": ["@opentui/core-darwin-arm64@0.2.10", "", { "os": "darwin", "cpu": "arm64" }, "sha512-+lbDDj42Og+UtTZEwlHhGXichmOlkxSqn0J+Jqjat5/Tt5oZykj1NZjFIQ7ZSz4Miz7EmZwgYKE2CyOmmm9MoQ=="],
|
||||
"@opentui/core-darwin-arm64": ["@opentui/core-darwin-arm64@0.2.11", "", { "os": "darwin", "cpu": "arm64" }, "sha512-h2MXtE2Cu3XlKVoQMXthnbhleO68zGXkoh/r1Q5pCoZh6RuXqns5/94D/aZThXBWwzPuEoyarMlxxR9OqrpvHw=="],
|
||||
|
||||
"@opentui/core-darwin-x64": ["@opentui/core-darwin-x64@0.2.10", "", { "os": "darwin", "cpu": "x64" }, "sha512-5iAoA0aqMWWAQ93nh8Bb0ipwt9h+tvEFc88+YO9St43uUJ+XrXcmMj3T8wtl6dSu/SN0UoDWNaUMHUmtykiPtg=="],
|
||||
"@opentui/core-darwin-x64": ["@opentui/core-darwin-x64@0.2.11", "", { "os": "darwin", "cpu": "x64" }, "sha512-Y0jbPClnOBTPSIy+2THG86MTqIG/jGFlOOKuw4JfCDqEjPBM3pLWIHnJb3WxHRi2LlvfyBxvrUTXWlW6JpI0QQ=="],
|
||||
|
||||
"@opentui/core-linux-arm64": ["@opentui/core-linux-arm64@0.2.10", "", { "os": "linux", "cpu": "arm64" }, "sha512-EnrkxgH5K76Oi/Br1UHPZblXG5P60snmtySfnxuVaeECNZrbTkV6BV/A0WoBeWshJweGbx1D+eTF+sEEjQCi8w=="],
|
||||
"@opentui/core-linux-arm64": ["@opentui/core-linux-arm64@0.2.11", "", { "os": "linux", "cpu": "arm64" }, "sha512-blQyyuTaW4q/OQ3whs7Kt7GCXhBUR5EQHHDdjOqQAr0HYpohUa6sbHMbiBcX2Ehc9ZWwtiaOoWiyZ5YXy2SAvg=="],
|
||||
|
||||
"@opentui/core-linux-x64": ["@opentui/core-linux-x64@0.2.10", "", { "os": "linux", "cpu": "x64" }, "sha512-fI+r3kCPqIxsWwPVGpKUQy4zHK8y+jkDRCwa3UbaUy48RQ44jMuf2RhVhmi4xmCvSc8UPJBbYsw1tLuh9kmXjg=="],
|
||||
"@opentui/core-linux-x64": ["@opentui/core-linux-x64@0.2.11", "", { "os": "linux", "cpu": "x64" }, "sha512-0nEB5+MgzQRYiVcQd1vHXPWNPWGh4JEmQTJKyG3OHnTzPaJ1FVSQ/V71ECyRSl3ymY3F+U0eW9cFgw1hCieK2w=="],
|
||||
|
||||
"@opentui/core-win32-arm64": ["@opentui/core-win32-arm64@0.2.10", "", { "os": "win32", "cpu": "arm64" }, "sha512-8F4z2hIRgkVWcr6CMVeJ9N4+1rmURPt2Pq2GBPko8ch6rxHR+a//KD1MfphyuLTHBS1tJ4vfZSWSoiaESImtrA=="],
|
||||
"@opentui/core-win32-arm64": ["@opentui/core-win32-arm64@0.2.11", "", { "os": "win32", "cpu": "arm64" }, "sha512-+KKH77fzm0qF8py9G2pU32DzB1bAgDMfBajrs7gKL5NtSEnknrwfh7hIs/tq41aF6j9zvIzgtykByh26tcjFog=="],
|
||||
|
||||
"@opentui/core-win32-x64": ["@opentui/core-win32-x64@0.2.10", "", { "os": "win32", "cpu": "x64" }, "sha512-Ki+qNBlIFW5K2wcG/RHrlPp7yEQKXeiNX3mlje25iwX62Ac5w391HBpOmUjbPoq20McPyDRnhbLfbXQSPtickg=="],
|
||||
"@opentui/core-win32-x64": ["@opentui/core-win32-x64@0.2.11", "", { "os": "win32", "cpu": "x64" }, "sha512-dMmb9DX0W0HWadLdgciMbonqIc1xdcKiVmaQSYxw5eGCzFRPZIOrKHByesP+2ipkMuLx85W/MJUFal/lW8XSNg=="],
|
||||
|
||||
"@opentui/keymap": ["@opentui/keymap@0.2.10", "", { "dependencies": { "@opentui/core": "0.2.10" }, "peerDependencies": { "@opentui/react": "0.2.10", "@opentui/solid": "0.2.10", "react": ">=19.2.0", "solid-js": "1.9.12" }, "optionalPeers": ["@opentui/react", "@opentui/solid", "react", "solid-js"] }, "sha512-80fU3Lr/98sNIpVYd8PApAeQw8A8D9BemyOGi6jGvTQCl0rxKgvaVBviDRGKxl1INTVjZy9By8UPncc2KJOuWQ=="],
|
||||
"@opentui/keymap": ["@opentui/keymap@0.2.11", "", { "dependencies": { "@opentui/core": "0.2.11" }, "peerDependencies": { "@opentui/react": "0.2.11", "@opentui/solid": "0.2.11", "react": ">=19.2.0", "solid-js": "1.9.12" }, "optionalPeers": ["@opentui/react", "@opentui/solid", "react", "solid-js"] }, "sha512-pCrJrY3mTuXdDaaRneId1JsJCtGE+7prTtWihzOLZzVJTJYyYtT38gMI7MpyAoloVDfEL5cTe8C+v7wv+IYREw=="],
|
||||
|
||||
"@opentui/solid": ["@opentui/solid@0.2.10", "", { "dependencies": { "@babel/core": "7.28.0", "@babel/preset-typescript": "7.27.1", "@opentui/core": "0.2.10", "babel-plugin-module-resolver": "5.0.2", "babel-preset-solid": "1.9.12", "entities": "7.0.1", "s-js": "^0.4.9" }, "peerDependencies": { "solid-js": "1.9.12" } }, "sha512-+4/MB90yIQiPwg8Y4wY092yva9BvRTsJeeeEO3e2H7P8k8zxYk4G9bzuhqYLxA9mTVQ+zVDlrmFoPQhT7vpIRw=="],
|
||||
"@opentui/solid": ["@opentui/solid@0.2.11", "", { "dependencies": { "@babel/core": "7.28.0", "@babel/preset-typescript": "7.27.1", "@opentui/core": "0.2.11", "babel-plugin-module-resolver": "5.0.2", "babel-preset-solid": "1.9.12", "entities": "7.0.1", "s-js": "^0.4.9" }, "peerDependencies": { "solid-js": "1.9.12" } }, "sha512-M3WHxBFORHVE0yqMJYpi9PfjXWlnRTw/LYuBhZaJv0HTo+zTs60P/ukGcwnHDWnMpTGf3BH9x0Yi2dIqjHRY6Q=="],
|
||||
|
||||
"@oslojs/asn1": ["@oslojs/asn1@1.0.0", "", { "dependencies": { "@oslojs/binary": "1.0.0" } }, "sha512-zw/wn0sj0j0QKbIXfIlnEcTviaCzYOY3V5rAyjR6YtOByFtJiT574+8p9Wlach0lZH9fddD4yb9laEAIl4vXQA=="],
|
||||
|
||||
|
||||
@@ -35,9 +35,9 @@
|
||||
"@types/cross-spawn": "6.0.6",
|
||||
"@octokit/rest": "22.0.0",
|
||||
"@hono/zod-validator": "0.4.2",
|
||||
"@opentui/core": "0.2.10",
|
||||
"@opentui/keymap": "0.2.10",
|
||||
"@opentui/solid": "0.2.10",
|
||||
"@opentui/core": "0.2.11",
|
||||
"@opentui/keymap": "0.2.11",
|
||||
"@opentui/solid": "0.2.11",
|
||||
"ulid": "3.0.1",
|
||||
"@kobalte/core": "0.13.11",
|
||||
"@types/luxon": "3.7.1",
|
||||
|
||||
@@ -38,7 +38,6 @@ export const Flag = {
|
||||
),
|
||||
OPENCODE_EXPERIMENTAL_DISABLE_COPY_ON_SELECT:
|
||||
copy === undefined ? process.platform === "win32" : truthy("OPENCODE_EXPERIMENTAL_DISABLE_COPY_ON_SELECT"),
|
||||
OPENCODE_EXPERIMENTAL_MINIMAL_THINKING: truthy("OPENCODE_EXPERIMENTAL_MINIMAL_THINKING"),
|
||||
OPENCODE_MODELS_URL: process.env["OPENCODE_MODELS_URL"],
|
||||
OPENCODE_MODELS_PATH: process.env["OPENCODE_MODELS_PATH"],
|
||||
OPENCODE_DB: process.env["OPENCODE_DB"],
|
||||
|
||||
@@ -91,6 +91,7 @@ export const TextDelta = Schema.Struct({
|
||||
type: Schema.tag("text-delta"),
|
||||
id: ContentBlockID,
|
||||
text: Schema.String,
|
||||
providerMetadata: Schema.optional(ProviderMetadata),
|
||||
}).annotate({ identifier: "LLM.Event.TextDelta" })
|
||||
export type TextDelta = Schema.Schema.Type<typeof TextDelta>
|
||||
|
||||
@@ -112,6 +113,7 @@ export const ReasoningDelta = Schema.Struct({
|
||||
type: Schema.tag("reasoning-delta"),
|
||||
id: ContentBlockID,
|
||||
text: Schema.String,
|
||||
providerMetadata: Schema.optional(ProviderMetadata),
|
||||
}).annotate({ identifier: "LLM.Event.ReasoningDelta" })
|
||||
export type ReasoningDelta = Schema.Schema.Type<typeof ReasoningDelta>
|
||||
|
||||
|
||||
@@ -33,7 +33,15 @@ export type TextVerbosity = Schema.Schema.Type<typeof TextVerbosity>
|
||||
export const MessageRole = Schema.Literals(["user", "assistant", "tool"])
|
||||
export type MessageRole = Schema.Schema.Type<typeof MessageRole>
|
||||
|
||||
export const FinishReason = Schema.Literals(["stop", "length", "tool-calls", "content-filter", "error", "unknown"])
|
||||
export const FinishReason = Schema.Literals([
|
||||
"stop",
|
||||
"length",
|
||||
"tool-calls",
|
||||
"content-filter",
|
||||
"error",
|
||||
"other",
|
||||
"unknown",
|
||||
])
|
||||
export type FinishReason = Schema.Schema.Type<typeof FinishReason>
|
||||
|
||||
export const JsonSchema = Schema.Record(Schema.String, Schema.Unknown)
|
||||
|
||||
@@ -37,8 +37,9 @@
|
||||
"devDependencies": {
|
||||
"@babel/core": "7.28.4",
|
||||
"@octokit/webhooks-types": "7.6.1",
|
||||
"@opencode-ai/script": "workspace:*",
|
||||
"@opencode-ai/core": "workspace:*",
|
||||
"@opencode-ai/http-recorder": "workspace:*",
|
||||
"@opencode-ai/script": "workspace:*",
|
||||
"@parcel/watcher-darwin-arm64": "2.5.1",
|
||||
"@parcel/watcher-darwin-x64": "2.5.1",
|
||||
"@parcel/watcher-linux-arm64-glibc": "2.5.1",
|
||||
@@ -99,6 +100,7 @@
|
||||
"@octokit/graphql": "9.0.2",
|
||||
"@octokit/rest": "catalog:",
|
||||
"@openauthjs/openauth": "catalog:",
|
||||
"@opencode-ai/llm": "workspace:*",
|
||||
"@opencode-ai/plugin": "workspace:*",
|
||||
"@opencode-ai/script": "workspace:*",
|
||||
"@opencode-ai/sdk": "workspace:*",
|
||||
|
||||
@@ -70,11 +70,54 @@ Endpoint definitions declare which public errors can be emitted. Public
|
||||
HTTP error schemas carry their response status with `httpApiStatus` or the
|
||||
equivalent HttpApi schema annotation.
|
||||
|
||||
Effect's own HttpApi examples follow this pattern:
|
||||
|
||||
```ts
|
||||
export class Unauthorized extends Schema.TaggedErrorClass<Unauthorized>()(
|
||||
"Unauthorized",
|
||||
{ message: Schema.String },
|
||||
{ httpApiStatus: 401 },
|
||||
) {}
|
||||
|
||||
export class Authorization extends HttpApiMiddleware.Service<
|
||||
Authorization,
|
||||
{
|
||||
provides: CurrentUser
|
||||
}
|
||||
>()("app/Authorization", {
|
||||
security: { bearer: HttpApiSecurity.bearer },
|
||||
error: Unauthorized,
|
||||
}) {}
|
||||
```
|
||||
|
||||
Endpoint-level errors use the same idea:
|
||||
|
||||
```ts
|
||||
export class ConfigApiError extends Schema.ErrorClass<ConfigApiError>("ConfigApiError")(
|
||||
{
|
||||
name: Schema.Union(Schema.Literal("ConfigInvalidError"), Schema.Literal("ConfigJsonError")),
|
||||
data: Schema.Struct({ message: Schema.optional(Schema.String), path: Schema.String }),
|
||||
},
|
||||
{ httpApiStatus: 400 },
|
||||
) {}
|
||||
|
||||
HttpApiEndpoint.get("get", "/config", {
|
||||
success: Config.Info,
|
||||
error: ConfigApiError,
|
||||
})
|
||||
```
|
||||
|
||||
The service error and HTTP error may be the same class only when the wire
|
||||
shape is intentionally public. Use separate HTTP error schemas when the
|
||||
service error contains internals, low-level causes, retry hints, or data
|
||||
that should not be exposed to API clients.
|
||||
|
||||
Do not map every domain error into one universal HTTP error class. Prefer a
|
||||
small public error vocabulary by route group: shared shapes like
|
||||
`ApiNotFoundError`, route-specific shapes like `ConfigApiError`, and built-in
|
||||
empty `HttpApiError.*` only when an empty/no-content body is the intended SDK
|
||||
contract.
|
||||
|
||||
## Mapping Guidance
|
||||
|
||||
- Keep one-off translations inline in the handler.
|
||||
@@ -86,6 +129,35 @@ that should not be exposed to API clients.
|
||||
breaking API change.
|
||||
- Use built-in `HttpApiError.*` only when its generated body and SDK
|
||||
surface are intentionally the public contract.
|
||||
- Prefer `Schema.ErrorClass` for public HTTP error bodies whose wire shape is
|
||||
not the same as the internal domain error shape.
|
||||
- Prefer `Schema.TaggedErrorClass` for service/domain errors and middleware
|
||||
errors that are naturally tagged by `_tag`.
|
||||
- If preserving a legacy `{ name, data }` body, model that shape explicitly in
|
||||
the public API error schema instead of relying on `NamedError.toObject()` in
|
||||
generic middleware.
|
||||
|
||||
## User-Facing Rendering
|
||||
|
||||
HTTP serialization and user rendering are separate boundaries. The server
|
||||
should send structured public errors; CLI and TUI code should format those
|
||||
structures through one shared formatter.
|
||||
|
||||
For SDK calls using `{ throwOnError: true }`, the generated client may wrap the
|
||||
decoded response body in an `Error`. The original body should remain available
|
||||
under `error.cause.body`; `FormatError` is the right place to unwrap and render
|
||||
that body. TUI aggregation helpers should call `FormatError` first, then fall
|
||||
back to generic `Error.message` / string rendering.
|
||||
|
||||
When several parallel startup requests fail from the same underlying issue,
|
||||
group identical rendered messages and list the affected request names once.
|
||||
For example:
|
||||
|
||||
```text
|
||||
Configuration is invalid at /path/to/opencode.json
|
||||
↳ Expected object, got "not-object" provider.bad.options
|
||||
Affected startup requests: config.providers, provider.list, app.agents, config.get
|
||||
```
|
||||
|
||||
## Middleware Guidance
|
||||
|
||||
@@ -99,6 +171,15 @@ middleware should shrink. It should not gain new name checks.
|
||||
Unknown `500` responses should log full details server-side with
|
||||
`Cause.pretty(cause)` and return a safe public body.
|
||||
|
||||
The config startup regression in #27056 is the failure mode this rule is meant
|
||||
to avoid: a user-authored invalid `opencode.json` crossed the HttpApi boundary
|
||||
as a defect, so middleware replaced a useful `ConfigInvalidError` with a safe
|
||||
generic `UnknownError`. The compatibility fix is to preserve config parse and
|
||||
validation errors as client-visible `400`s. The target architecture is better:
|
||||
config loading should fail on the typed error channel, config HTTP handlers
|
||||
should map those errors to declared `ConfigApiError` responses, and the generic
|
||||
middleware should never see them.
|
||||
|
||||
## Migration Order
|
||||
|
||||
Prefer small vertical slices:
|
||||
@@ -113,6 +194,9 @@ Prefer small vertical slices:
|
||||
Good early domains are storage not-found, worktree errors, and provider
|
||||
auth validation errors because they currently drive HTTP behavior.
|
||||
|
||||
Config parse and validation errors are also a good early slice because they
|
||||
are startup-blocking and must be rendered clearly in both CLI and TUI flows.
|
||||
|
||||
## Checklist For A PR
|
||||
|
||||
- [ ] Expected failures are typed errors, not defects.
|
||||
|
||||
@@ -1,42 +1,24 @@
|
||||
import { Effect } from "effect"
|
||||
import { Server } from "../../server/server"
|
||||
import { ServerDiscovery } from "@/cli/server-discovery"
|
||||
import { effectCmd } from "../effect-cmd"
|
||||
import { withNetworkOptions, resolveNetworkOptions } from "../network"
|
||||
import { Flag } from "@opencode-ai/core/flag/flag"
|
||||
|
||||
export const ServeCommand = effectCmd({
|
||||
command: "serve",
|
||||
builder: (yargs) =>
|
||||
withNetworkOptions(yargs).option("discoverable", {
|
||||
type: "boolean",
|
||||
describe: "write this server to the local discovery file for default TUI startup",
|
||||
default: false,
|
||||
}),
|
||||
builder: (yargs) => withNetworkOptions(yargs),
|
||||
describe: "starts a headless opencode server",
|
||||
// Server loads instances per-request via x-opencode-directory header — no
|
||||
// need for an ambient project InstanceContext at startup.
|
||||
instance: false,
|
||||
handler: (args) =>
|
||||
Effect.gen(function* () {
|
||||
if (!Flag.OPENCODE_SERVER_PASSWORD) {
|
||||
console.log("Warning: OPENCODE_SERVER_PASSWORD is not set; server is unsecured.")
|
||||
}
|
||||
const opts = yield* resolveNetworkOptions(args)
|
||||
const server = yield* Effect.promise(() => Server.listen(opts))
|
||||
const discovery = args.discoverable ? yield* ServerDiscovery.Service : undefined
|
||||
if (discovery) {
|
||||
yield* discovery.write(server.url)
|
||||
process.on("exit", ServerDiscovery.removeSync)
|
||||
}
|
||||
console.log(`opencode server listening on http://${server.hostname}:${server.port}`)
|
||||
handler: Effect.fn("Cli.serve")(function* (args) {
|
||||
if (!Flag.OPENCODE_SERVER_PASSWORD) {
|
||||
console.log("Warning: OPENCODE_SERVER_PASSWORD is not set; server is unsecured.")
|
||||
}
|
||||
const opts = yield* resolveNetworkOptions(args)
|
||||
const server = yield* Effect.promise(() => Server.listen(opts))
|
||||
console.log(`opencode server listening on http://${server.hostname}:${server.port}`)
|
||||
|
||||
yield* Effect.never.pipe(
|
||||
Effect.ensuring(
|
||||
discovery
|
||||
? discovery.remove().pipe(Effect.ensuring(Effect.sync(() => process.off("exit", ServerDiscovery.removeSync))))
|
||||
: Effect.void,
|
||||
),
|
||||
)
|
||||
}).pipe(Effect.provide(ServerDiscovery.defaultLayer)),
|
||||
yield* Effect.never
|
||||
}),
|
||||
})
|
||||
|
||||
@@ -188,6 +188,7 @@ export const Definitions = {
|
||||
"dialog.select.home": keybind("home", "Move to first dialog item"),
|
||||
"dialog.select.end": keybind("end", "Move to last dialog item"),
|
||||
"dialog.select.submit": keybind("return", "Submit selected dialog item"),
|
||||
"dialog.prompt.submit": keybind("return", "Submit dialog prompt"),
|
||||
"dialog.mcp.toggle": keybind("space", "Toggle MCP in MCP dialog"),
|
||||
"prompt.autocomplete.prev": keybind("up,ctrl+p", "Move to previous autocomplete item"),
|
||||
"prompt.autocomplete.next": keybind("down,ctrl+n", "Move to next autocomplete item"),
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import { FormatError } from "@/cli/error"
|
||||
|
||||
/**
|
||||
* Aggregate Promise.allSettled results into a single Error that names every
|
||||
* failed endpoint, or return null when all fulfilled. Used at TUI bootstrap
|
||||
@@ -15,7 +17,19 @@ export function aggregateFailures(labeled: LabeledSettled[]): Error | null {
|
||||
)
|
||||
if (failed.length === 0) return null
|
||||
|
||||
const reasons = failed.map((f) => `${f.name}: ${reasonMessage(f.result.reason)}`).join("; ")
|
||||
const reasons = Array.from(
|
||||
failed
|
||||
.map((f) => ({ name: f.name, message: reasonMessage(f.result.reason) }))
|
||||
.reduce((grouped, failure) => {
|
||||
grouped.set(failure.message, [...(grouped.get(failure.message) ?? []), failure.name])
|
||||
return grouped
|
||||
}, new Map<string, string[]>())
|
||||
.entries(),
|
||||
)
|
||||
.map(([message, names]) =>
|
||||
names.length === 1 ? `${names[0]}: ${message}` : `${message}\nAffected startup requests: ${names.join(", ")}`,
|
||||
)
|
||||
.join("; ")
|
||||
const summary = `${failed.length} of ${labeled.length} requests failed: ${reasons}`
|
||||
const err = new Error(summary)
|
||||
err.cause = { failures: failed.map((f) => ({ name: f.name, reason: f.result.reason })) }
|
||||
@@ -23,6 +37,9 @@ export function aggregateFailures(labeled: LabeledSettled[]): Error | null {
|
||||
}
|
||||
|
||||
function reasonMessage(reason: unknown): string {
|
||||
const formatted = FormatError(reason)
|
||||
if (formatted) return formatted
|
||||
|
||||
if (reason instanceof Error) return reason.message
|
||||
if (typeof reason === "string") return reason
|
||||
if (reason && typeof reason === "object") {
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
import { createMemo, type Setter } from "solid-js"
|
||||
import { Flag } from "@opencode-ai/core/flag/flag"
|
||||
import { useKV } from "./kv"
|
||||
|
||||
export type ThinkingMode = "show" | "minimal" | "hide"
|
||||
export type ThinkingMode = "show" | "hide"
|
||||
|
||||
const MODES: readonly ThinkingMode[] = ["show", "minimal", "hide"] as const
|
||||
const MODES: readonly ThinkingMode[] = ["show", "hide"] as const
|
||||
|
||||
// OpenAI's Responses API surfaces reasoning summaries that start with a bolded
|
||||
// title line: "**Inspecting PR workflow**\n\n<body>". GitHub Copilot routes
|
||||
@@ -20,7 +19,7 @@ export function isThinkingMode(value: unknown): value is ThinkingMode {
|
||||
return typeof value === "string" && (MODES as readonly string[]).includes(value)
|
||||
}
|
||||
|
||||
// Cycle order matches the slash command: show → minimal → hide → show.
|
||||
// Cycle order matches the slash command: show → hide → show.
|
||||
export function nextThinkingMode(current: ThinkingMode): ThinkingMode {
|
||||
const idx = MODES.indexOf(current)
|
||||
return MODES[(idx + 1) % MODES.length] ?? "show"
|
||||
@@ -33,7 +32,7 @@ export function useThinkingMode() {
|
||||
// The KVProvider only renders children once kv.ready, so reads here are safe.
|
||||
const hadStored = kv.get("thinking_mode") !== undefined
|
||||
const legacy = kv.get("thinking_visibility")
|
||||
const [stored, setStored] = kv.signal<ThinkingMode>("thinking_mode", "minimal")
|
||||
const [stored, setStored] = kv.signal<ThinkingMode>("thinking_mode", "hide")
|
||||
|
||||
// The kv signal exposes its setter typed as `Setter<T>` which carries Solid's
|
||||
// overload set; passing an updater fn through a property access loses the
|
||||
@@ -47,21 +46,21 @@ export function useThinkingMode() {
|
||||
|
||||
// Preserve previous experience for users who had explicitly toggled the
|
||||
// legacy `thinking_visibility` boolean. First-time users (no legacy key)
|
||||
// get the new "minimal" default.
|
||||
// get the new "hide" default (collapsed thinking).
|
||||
if (!hadStored) {
|
||||
if (legacy === true) set("show")
|
||||
else if (legacy === false) set("hide")
|
||||
}
|
||||
|
||||
if ((stored() as string) === "minimal") set("hide")
|
||||
|
||||
const mode = createMemo<ThinkingMode>(() => {
|
||||
if (Flag.OPENCODE_EXPERIMENTAL_MINIMAL_THINKING) return "minimal"
|
||||
const value = stored()
|
||||
return isThinkingMode(value) ? value : "minimal"
|
||||
return isThinkingMode(value) ? value : "hide"
|
||||
})
|
||||
|
||||
return {
|
||||
mode,
|
||||
set,
|
||||
locked: () => Flag.OPENCODE_EXPERIMENTAL_MINIMAL_THINKING === true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -392,7 +392,7 @@ function AssistantReasoning(props: {
|
||||
const thinking = useThinkingMode()
|
||||
const [expanded, setExpanded] = createSignal(false)
|
||||
const content = createMemo(() => props.part.text.replace("[REDACTED]", "").trim())
|
||||
const inMinimal = createMemo(() => thinking.mode() === "minimal")
|
||||
const inMinimal = createMemo(() => thinking.mode() === "hide")
|
||||
// v2 reasoning parts have no per-part `time.end` (see SessionMessageAssistantReasoning
|
||||
// in the v2 SDK); we settle on parent-message completion instead.
|
||||
const isDone = createMemo(() => props.completedAt() !== undefined)
|
||||
@@ -404,7 +404,7 @@ function AssistantReasoning(props: {
|
||||
}
|
||||
|
||||
return (
|
||||
<Show when={content() && thinking.mode() !== "hide"}>
|
||||
<Show when={content()}>
|
||||
<Switch>
|
||||
<Match when={!inMinimal() || expanded()}>
|
||||
<box
|
||||
|
||||
@@ -218,7 +218,7 @@ export function Session() {
|
||||
const [conceal, setConceal] = createSignal(true)
|
||||
const thinking = useThinkingMode()
|
||||
const thinkingMode = thinking.mode
|
||||
const showThinking = createMemo(() => thinkingMode() !== "hide")
|
||||
const showThinking = createMemo(() => true)
|
||||
const [timestamps, setTimestamps] = kv.signal<"hide" | "show">("timestamps", "hide")
|
||||
const [showDetails, setShowDetails] = kv.signal("tool_details_visibility", true)
|
||||
const [showAssistantMetadata, _setShowAssistantMetadata] = kv.signal("assistant_metadata_visibility", true)
|
||||
@@ -689,9 +689,8 @@ export function Session() {
|
||||
{
|
||||
title: (() => {
|
||||
const next = nextThinkingMode(thinkingMode())
|
||||
if (next === "minimal") return "Switch thinking to minimal"
|
||||
if (next === "hide") return "Hide thinking"
|
||||
return "Show thinking"
|
||||
if (next === "hide") return "Collapse thinking"
|
||||
return "Expand thinking"
|
||||
})(),
|
||||
value: "session.toggle.thinking",
|
||||
category: "Session",
|
||||
@@ -700,16 +699,6 @@ export function Session() {
|
||||
aliases: ["toggle-thinking"],
|
||||
},
|
||||
run: () => {
|
||||
// Env override forces minimal for the process. Updating KV here would
|
||||
// silently diverge from what's rendered; tell the user instead.
|
||||
if (thinking.locked()) {
|
||||
toast.show({
|
||||
message: "Thinking mode is locked to minimal by OPENCODE_EXPERIMENTAL_MINIMAL_THINKING",
|
||||
variant: "info",
|
||||
})
|
||||
dialog.clear()
|
||||
return
|
||||
}
|
||||
thinking.set(nextThinkingMode(thinkingMode()))
|
||||
dialog.clear()
|
||||
},
|
||||
@@ -1512,7 +1501,7 @@ const PART_MAPPING = {
|
||||
function ReasoningPart(props: { last: boolean; part: ReasoningPart; message: AssistantMessage }) {
|
||||
const { theme, subtleSyntax } = useTheme()
|
||||
const ctx = use()
|
||||
// Collapsed by default in minimal mode: a single line throughout, so the
|
||||
// Collapsed by default in hide mode: a single line throughout, so the
|
||||
// layout never shifts. Click to open the full markdown block, click to close.
|
||||
const [expanded, setExpanded] = createSignal(false)
|
||||
|
||||
@@ -1523,7 +1512,7 @@ function ReasoningPart(props: { last: boolean; part: ReasoningPart; message: Ass
|
||||
// Reasoning is finalized when the server sets `time.end` (see processor.ts).
|
||||
// Flips independently of the parent message completing.
|
||||
const isDone = createMemo(() => props.part.time.end !== undefined)
|
||||
const inMinimal = createMemo(() => ctx.thinkingMode() === "minimal")
|
||||
const inMinimal = createMemo(() => ctx.thinkingMode() === "hide")
|
||||
const duration = createMemo(() => {
|
||||
const end = props.part.time.end
|
||||
return end === undefined ? 0 : Math.max(0, end - props.part.time.start)
|
||||
@@ -1539,10 +1528,10 @@ function ReasoningPart(props: { last: boolean; part: ReasoningPart; message: Ass
|
||||
}
|
||||
|
||||
return (
|
||||
<Show when={content() && ctx.thinkingMode() !== "hide"}>
|
||||
<Show when={content()}>
|
||||
<Switch>
|
||||
<Match when={!inMinimal() || expanded()}>
|
||||
{/* Full markdown block: `show` mode, or `minimal` after the user opens it. */}
|
||||
{/* Full markdown block: `show` mode, or `hide` after the user opens it. */}
|
||||
<box
|
||||
id={"text-" + props.part.id}
|
||||
paddingLeft={2}
|
||||
@@ -1558,7 +1547,7 @@ function ReasoningPart(props: { last: boolean; part: ReasoningPart; message: Ass
|
||||
drawUnstyledText={false}
|
||||
streaming={true}
|
||||
syntaxStyle={subtleSyntax()}
|
||||
content={(inMinimal() ? "▼ " : "") + "_Thinking:_ " + content()}
|
||||
content={(inMinimal() ? "▼ " : "") + (isDone() ? "_Thought:_ " : "_Thinking:_ ") + content()}
|
||||
conceal={ctx.conceal()}
|
||||
fg={theme.textMuted}
|
||||
/>
|
||||
|
||||
@@ -9,8 +9,6 @@ import { errorMessage } from "@/util/error"
|
||||
import { withTimeout } from "@/util/timeout"
|
||||
import { withNetworkOptions, resolveNetworkOptionsNoConfig } from "@/cli/network"
|
||||
import { Filesystem } from "@/util/filesystem"
|
||||
import { ServerAuth } from "@/server/auth"
|
||||
import { ServerDiscovery } from "@/cli/server-discovery"
|
||||
import type { GlobalEvent } from "@opencode-ai/sdk/v2"
|
||||
import type { EventSource } from "./context/sdk"
|
||||
import { win32DisableProcessedInput, win32InstallCtrlCGuard } from "./win32"
|
||||
@@ -199,26 +197,16 @@ export const TuiThreadCommand = cmd({
|
||||
network.mdns ||
|
||||
network.port !== 0 ||
|
||||
network.hostname !== "127.0.0.1"
|
||||
const discovered = external ? undefined : await ServerDiscovery.find()
|
||||
|
||||
const transport = external
|
||||
? {
|
||||
url: (await client.call("server", network)).url,
|
||||
fetch: undefined,
|
||||
headers: ServerAuth.headers(),
|
||||
events: undefined,
|
||||
}
|
||||
: discovered
|
||||
? {
|
||||
url: discovered,
|
||||
fetch: undefined,
|
||||
headers: ServerAuth.headers(),
|
||||
events: undefined,
|
||||
}
|
||||
: {
|
||||
url: "http://opencode.internal",
|
||||
fetch: createWorkerFetch(client),
|
||||
headers: undefined,
|
||||
events: createEventSource(client),
|
||||
}
|
||||
|
||||
@@ -228,7 +216,6 @@ export const TuiThreadCommand = cmd({
|
||||
sessionID: args.session,
|
||||
directory: cwd,
|
||||
fetch: transport.fetch,
|
||||
headers: transport.headers,
|
||||
})
|
||||
} catch (error) {
|
||||
UI.error(errorMessage(error))
|
||||
@@ -252,7 +239,6 @@ export const TuiThreadCommand = cmd({
|
||||
config,
|
||||
directory: cwd,
|
||||
fetch: transport.fetch,
|
||||
headers: transport.headers,
|
||||
events: transport.events,
|
||||
args: {
|
||||
continue: args.continue,
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import { TextareaRenderable, TextAttributes } from "@opentui/core"
|
||||
import { useTheme } from "../context/theme"
|
||||
import { useDialog, type DialogContext } from "./dialog"
|
||||
import { Show, createEffect, onMount, type JSX } from "solid-js"
|
||||
import { Show, createEffect, createSignal, onMount, type JSX } from "solid-js"
|
||||
import { Spinner } from "../component/spinner"
|
||||
import { useTuiConfig } from "../context/tui-config"
|
||||
import { useBindings, useCommandShortcut } from "../keymap"
|
||||
|
||||
export type DialogPromptProps = {
|
||||
title: string
|
||||
@@ -18,8 +20,32 @@ export type DialogPromptProps = {
|
||||
export function DialogPrompt(props: DialogPromptProps) {
|
||||
const dialog = useDialog()
|
||||
const { theme } = useTheme()
|
||||
const tuiConfig = useTuiConfig()
|
||||
const submitShortcut = useCommandShortcut("dialog.prompt.submit")
|
||||
const [textareaTarget, setTextareaTarget] = createSignal<TextareaRenderable>()
|
||||
let textarea: TextareaRenderable
|
||||
|
||||
function confirm() {
|
||||
if (props.busy) return
|
||||
props.onConfirm?.(textarea.plainText)
|
||||
}
|
||||
|
||||
useBindings(() => ({
|
||||
target: textareaTarget,
|
||||
enabled: textareaTarget() !== undefined && !props.busy,
|
||||
// Dialog form semantics must win over the global managed textarea input layer.
|
||||
priority: 1,
|
||||
commands: [
|
||||
{
|
||||
name: "dialog.prompt.submit",
|
||||
title: "Submit dialog prompt",
|
||||
category: "Dialog",
|
||||
run: confirm,
|
||||
},
|
||||
],
|
||||
bindings: tuiConfig.keybinds.gather("dialog.prompt", ["dialog.prompt.submit"]),
|
||||
}))
|
||||
|
||||
onMount(() => {
|
||||
dialog.setSize("medium")
|
||||
setTimeout(() => {
|
||||
@@ -59,13 +85,10 @@ export function DialogPrompt(props: DialogPromptProps) {
|
||||
<box gap={1}>
|
||||
{props.description}
|
||||
<textarea
|
||||
onSubmit={() => {
|
||||
if (props.busy) return
|
||||
props.onConfirm?.(textarea.plainText)
|
||||
}}
|
||||
height={3}
|
||||
ref={(val: TextareaRenderable) => {
|
||||
textarea = val
|
||||
setTextareaTarget(val)
|
||||
}}
|
||||
initialValue={props.value}
|
||||
placeholder={props.placeholder ?? "Enter text"}
|
||||
@@ -80,9 +103,11 @@ export function DialogPrompt(props: DialogPromptProps) {
|
||||
</box>
|
||||
<box paddingBottom={1} gap={1} flexDirection="row">
|
||||
<Show when={!props.busy} fallback={<text fg={theme.textMuted}>processing...</text>}>
|
||||
<text fg={theme.text}>
|
||||
enter <span style={{ fg: theme.textMuted }}>submit</span>
|
||||
</text>
|
||||
<Show when={submitShortcut()}>
|
||||
<text fg={theme.text}>
|
||||
{submitShortcut()} <span style={{ fg: theme.textMuted }}>submit</span>
|
||||
</text>
|
||||
</Show>
|
||||
</Show>
|
||||
</box>
|
||||
</box>
|
||||
|
||||
@@ -2,16 +2,9 @@ import { NamedError } from "@opencode-ai/core/util/error"
|
||||
import { errorFormat } from "@/util/error"
|
||||
import { isRecord } from "@/util/record"
|
||||
|
||||
interface ErrorLike {
|
||||
name?: string
|
||||
_tag?: string
|
||||
message?: string
|
||||
data?: Record<string, unknown>
|
||||
}
|
||||
|
||||
type ConfigIssue = { message: string; path: string[] }
|
||||
|
||||
function isTaggedError(error: unknown, tag: string): boolean {
|
||||
function isTaggedError(error: unknown, tag: string): error is Record<string, unknown> {
|
||||
return isRecord(error) && error._tag === tag
|
||||
}
|
||||
|
||||
@@ -39,22 +32,27 @@ function configIssues(input: Record<string, unknown>): ConfigIssue[] {
|
||||
: []
|
||||
}
|
||||
|
||||
export function FormatError(input: unknown) {
|
||||
export function FormatError(input: unknown): string | undefined {
|
||||
if (input instanceof Error && isRecord(input.cause) && "body" in input.cause) {
|
||||
const formatted = FormatError(input.cause.body)
|
||||
if (formatted) return formatted
|
||||
}
|
||||
|
||||
// CliError: domain failure surfaced from an effectCmd handler via fail("...")
|
||||
if (isTaggedError(input, "CliError")) {
|
||||
const data = input as ErrorLike & { exitCode?: number }
|
||||
if (data.exitCode != null) process.exitCode = data.exitCode
|
||||
return data.message ?? ""
|
||||
if (typeof input.exitCode === "number") process.exitCode = input.exitCode
|
||||
return stringField(input, "message") ?? ""
|
||||
}
|
||||
|
||||
// MCPFailed: { name: string }
|
||||
if (NamedError.hasName(input, "MCPFailed")) {
|
||||
return `MCP server "${(input as ErrorLike).data?.name}" failed. Note, opencode does not support MCP authentication yet.`
|
||||
const data = isRecord(input) && isRecord(input.data) ? stringField(input.data, "name") : undefined
|
||||
return `MCP server "${data}" failed. Note, opencode does not support MCP authentication yet.`
|
||||
}
|
||||
|
||||
// AccountServiceError, AccountTransportError: TaggedErrorClass
|
||||
if (isTaggedError(input, "AccountServiceError") || isTaggedError(input, "AccountTransportError")) {
|
||||
return (input as ErrorLike).message ?? ""
|
||||
return stringField(input, "message") ?? ""
|
||||
}
|
||||
|
||||
// ProviderModelNotFoundError: { providerID: string, modelID: string, suggestions?: string[] }
|
||||
@@ -64,7 +62,7 @@ export function FormatError(input: unknown) {
|
||||
? providerModelNotFound.suggestions.filter((x) => typeof x === "string")
|
||||
: []
|
||||
return [
|
||||
`Model not found: ${providerModelNotFound.providerID}/${providerModelNotFound.modelID}`,
|
||||
`Model not found: ${stringField(providerModelNotFound, "providerID")}/${stringField(providerModelNotFound, "modelID")}`,
|
||||
...(suggestions.length ? ["Did you mean: " + suggestions.join(", ")] : []),
|
||||
`Try: \`opencode models\` to list available models`,
|
||||
`Or check your config (opencode.json) provider/model names`,
|
||||
@@ -112,6 +110,7 @@ export function FormatError(input: unknown) {
|
||||
if (isTaggedError(input, "UICancelledError") || NamedError.hasName(input, "UICancelledError")) {
|
||||
return ""
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
export function FormatUnknownError(input: unknown): string {
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
export * as ServerDiscovery from "./server-discovery"
|
||||
|
||||
import { makeRuntime } from "@/effect/run-service"
|
||||
import { ServerAuth } from "@/server/auth"
|
||||
import { AppFileSystem } from "@opencode-ai/core/filesystem"
|
||||
import { Global } from "@opencode-ai/core/global"
|
||||
import { Context, Effect, Layer, Option, Schema } from "effect"
|
||||
import { readFileSync, unlinkSync } from "fs"
|
||||
import path from "path"
|
||||
|
||||
export const file = path.join(Global.Path.state, "server.json")
|
||||
|
||||
const Entry = Schema.Struct({
|
||||
url: Schema.String,
|
||||
pid: Schema.Number,
|
||||
})
|
||||
type Entry = typeof Entry.Type
|
||||
const decodeEntry = Schema.decodeUnknownOption(Entry)
|
||||
|
||||
export interface Interface {
|
||||
readonly write: (url: URL) => Effect.Effect<void>
|
||||
readonly remove: () => Effect.Effect<void>
|
||||
readonly find: () => Effect.Effect<string | undefined>
|
||||
}
|
||||
|
||||
export class Service extends Context.Service<Service, Interface>()("@opencode/CliServerDiscovery") {}
|
||||
|
||||
export const layer = Layer.effect(
|
||||
Service,
|
||||
Effect.gen(function* () {
|
||||
const fs = yield* AppFileSystem.Service
|
||||
|
||||
const read = Effect.fn("CliServerDiscovery.read")(function* () {
|
||||
const entry = yield* fs.readJson(file).pipe(Effect.catch(() => Effect.succeed(undefined)))
|
||||
return Option.getOrUndefined(decodeEntry(entry))
|
||||
})
|
||||
|
||||
const remove = Effect.fn("CliServerDiscovery.remove")(function* () {
|
||||
const entry = yield* read()
|
||||
if (entry?.pid !== process.pid) return
|
||||
yield* fs.remove(file).pipe(Effect.ignore)
|
||||
})
|
||||
|
||||
const removeStale = Effect.fn("CliServerDiscovery.removeStale")(function* (entry: Entry) {
|
||||
const current = yield* read()
|
||||
if (current?.pid !== entry.pid || current.url !== entry.url) return
|
||||
yield* fs.remove(file).pipe(Effect.ignore)
|
||||
})
|
||||
|
||||
return Service.of({
|
||||
write: Effect.fn("CliServerDiscovery.write")(function* (url) {
|
||||
yield* fs.writeJson(file, { url: localURL(url).toString(), pid: process.pid }, 0o600).pipe(Effect.orDie)
|
||||
}),
|
||||
remove,
|
||||
find: Effect.fn("CliServerDiscovery.find")(function* () {
|
||||
const entry = yield* read()
|
||||
if (!entry) return undefined
|
||||
const url = yield* healthy(entry.url)
|
||||
if (url) return url
|
||||
yield* removeStale(entry)
|
||||
}),
|
||||
})
|
||||
}),
|
||||
)
|
||||
|
||||
export const defaultLayer = layer.pipe(Layer.provide(AppFileSystem.defaultLayer))
|
||||
|
||||
const { runPromise } = makeRuntime(Service, defaultLayer)
|
||||
|
||||
export const find = () => runPromise((discovery) => discovery.find())
|
||||
|
||||
export function removeSync() {
|
||||
const entry = readSync()
|
||||
if (entry?.pid !== process.pid) return
|
||||
try {
|
||||
unlinkSync(file)
|
||||
} catch {}
|
||||
}
|
||||
|
||||
function readSync() {
|
||||
try {
|
||||
return Option.getOrUndefined(decodeEntry(JSON.parse(readFileSync(file, "utf8"))))
|
||||
} catch {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
function healthy(input: string) {
|
||||
return Effect.tryPromise({
|
||||
try: async () => {
|
||||
const url = new URL(input)
|
||||
if (url.protocol !== "http:" && url.protocol !== "https:") return undefined
|
||||
const response = await fetch(new URL("/global/health", url), {
|
||||
headers: ServerAuth.headers(),
|
||||
signal: AbortSignal.timeout(1000),
|
||||
})
|
||||
if (!response.ok) return undefined
|
||||
const body = (await response.json()) as unknown
|
||||
if (typeof body === "object" && body !== null && "healthy" in body && body.healthy === true) {
|
||||
return url.toString()
|
||||
}
|
||||
},
|
||||
catch: () => undefined,
|
||||
}).pipe(Effect.catch(() => Effect.succeed(undefined)))
|
||||
}
|
||||
|
||||
function localURL(url: URL) {
|
||||
const result = new URL(url)
|
||||
if (result.hostname === "0.0.0.0") result.hostname = "127.0.0.1"
|
||||
if (result.hostname === "::") result.hostname = "::1"
|
||||
return result
|
||||
}
|
||||
@@ -50,6 +50,10 @@ export class Service extends ConfigService.Service<Service>()("@opencode/Runtime
|
||||
experimentalIconDiscovery: enabledByExperimental("OPENCODE_EXPERIMENTAL_ICON_DISCOVERY"),
|
||||
outputTokenMax: positiveInteger("OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX"),
|
||||
bashDefaultTimeoutMs: positiveInteger("OPENCODE_EXPERIMENTAL_BASH_DEFAULT_TIMEOUT_MS"),
|
||||
experimentalNativeLlm: Config.all({
|
||||
enabled: bool("OPENCODE_EXPERIMENTAL_NATIVE_LLM"),
|
||||
legacy: Config.string("OPENCODE_LLM_RUNTIME").pipe(Config.withDefault("")),
|
||||
}).pipe(Config.map((flags) => flags.enabled || flags.legacy === "native")),
|
||||
client: Config.string("OPENCODE_CLIENT").pipe(Config.withDefault("cli")),
|
||||
}) {}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { NamedError } from "@opencode-ai/core/util/error"
|
||||
import * as Log from "@opencode-ai/core/util/log"
|
||||
import { ConfigError } from "@/config/error"
|
||||
import { Cause, Effect } from "effect"
|
||||
import { HttpRouter, HttpServerError, HttpServerRespondable, HttpServerResponse } from "effect/unstable/http"
|
||||
|
||||
@@ -18,6 +19,13 @@ export const errorLayer = HttpRouter.middleware<{ handles: unknown }>()((effect)
|
||||
if (!defect) return Effect.failCause(cause)
|
||||
|
||||
const error = defect.defect
|
||||
if (
|
||||
error instanceof NamedError &&
|
||||
(ConfigError.InvalidError.isInstance(error) || ConfigError.JsonError.isInstance(error))
|
||||
) {
|
||||
return Effect.succeed(HttpServerResponse.jsonUnsafe(error.toObject(), { status: 400 }))
|
||||
}
|
||||
|
||||
log.error("failed", { error, cause: Cause.pretty(cause) })
|
||||
|
||||
return Effect.succeed(
|
||||
|
||||
@@ -2,7 +2,10 @@ import { Provider } from "@/provider/provider"
|
||||
import * as Log from "@opencode-ai/core/util/log"
|
||||
import { Context, Effect, Layer, Record } from "effect"
|
||||
import * as Stream from "effect/Stream"
|
||||
import { streamText, wrapLanguageModel, type ModelMessage, type Tool, tool, jsonSchema } from "ai"
|
||||
import { streamText, wrapLanguageModel, type ModelMessage, type Tool, tool as aiTool, jsonSchema } from "ai"
|
||||
import type { LLMEvent } from "@opencode-ai/llm"
|
||||
import { LLMClient, RequestExecutor } from "@opencode-ai/llm/route"
|
||||
import type { LLMClientService } from "@opencode-ai/llm/route"
|
||||
import { mergeDeep } from "remeda"
|
||||
import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider"
|
||||
import { ProviderTransform } from "@/provider/transform"
|
||||
@@ -23,10 +26,11 @@ import { EffectBridge } from "@/effect/bridge"
|
||||
import { RuntimeFlags } from "@/effect/runtime-flags"
|
||||
import * as Option from "effect/Option"
|
||||
import * as OtelTracer from "@effect/opentelemetry/Tracer"
|
||||
import { LLMAISDK } from "./llm/ai-sdk"
|
||||
import { LLMNativeRuntime } from "./llm/native-runtime"
|
||||
|
||||
const log = Log.create({ service: "llm" })
|
||||
export const OUTPUT_TOKEN_MAX = ProviderTransform.OUTPUT_TOKEN_MAX
|
||||
type Result = Awaited<ReturnType<typeof streamText>>
|
||||
|
||||
// Avoid re-instantiating remeda's deep merge types in this hot LLM path; the runtime behavior is still mergeDeep.
|
||||
const mergeOptions = (target: Record<string, any>, source: Record<string, any> | undefined): Record<string, any> =>
|
||||
@@ -51,10 +55,8 @@ export type StreamRequest = StreamInput & {
|
||||
abort: AbortSignal
|
||||
}
|
||||
|
||||
export type Event = Result["fullStream"] extends AsyncIterable<infer T> ? T : never
|
||||
|
||||
export interface Interface {
|
||||
readonly stream: (input: StreamInput) => Stream.Stream<Event, unknown>
|
||||
readonly stream: (input: StreamInput) => Stream.Stream<LLMEvent, unknown>
|
||||
}
|
||||
|
||||
export class Service extends Context.Service<Service, Interface>()("@opencode/LLM") {}
|
||||
@@ -62,7 +64,13 @@ export class Service extends Context.Service<Service, Interface>()("@opencode/LL
|
||||
const live: Layer.Layer<
|
||||
Service,
|
||||
never,
|
||||
Auth.Service | Config.Service | Provider.Service | Plugin.Service | Permission.Service | RuntimeFlags.Service
|
||||
| Auth.Service
|
||||
| Config.Service
|
||||
| Provider.Service
|
||||
| Plugin.Service
|
||||
| Permission.Service
|
||||
| LLMClientService
|
||||
| RuntimeFlags.Service
|
||||
> = Layer.effect(
|
||||
Service,
|
||||
Effect.gen(function* () {
|
||||
@@ -71,6 +79,7 @@ const live: Layer.Layer<
|
||||
const provider = yield* Provider.Service
|
||||
const plugin = yield* Plugin.Service
|
||||
const perm = yield* Permission.Service
|
||||
const llmClient = yield* LLMClient.Service
|
||||
const flags = yield* RuntimeFlags.Service
|
||||
|
||||
const run = Effect.fn("LLM.run")(function* (input: StreamRequest) {
|
||||
@@ -202,7 +211,7 @@ const live: Layer.Layer<
|
||||
Object.keys(tools).length === 0 &&
|
||||
hasToolCalls(input.messages)
|
||||
) {
|
||||
tools["_noop"] = tool({
|
||||
tools["_noop"] = aiTool({
|
||||
description: "Do not call this tool. It exists only for API compatibility and must never be invoked.",
|
||||
inputSchema: jsonSchema({
|
||||
type: "object",
|
||||
@@ -322,86 +331,141 @@ const live: Layer.Layer<
|
||||
? (yield* InstanceState.context).project.id
|
||||
: undefined
|
||||
|
||||
return streamText({
|
||||
onError(error) {
|
||||
l.error("stream error", {
|
||||
error,
|
||||
})
|
||||
},
|
||||
async experimental_repairToolCall(failed) {
|
||||
const lower = failed.toolCall.toolName.toLowerCase()
|
||||
if (lower !== failed.toolCall.toolName && sortedTools[lower]) {
|
||||
l.info("repairing tool call", {
|
||||
tool: failed.toolCall.toolName,
|
||||
repaired: lower,
|
||||
const requestHeaders = {
|
||||
...(input.model.providerID.startsWith("opencode")
|
||||
? {
|
||||
...(opencodeProjectID ? { "x-opencode-project": opencodeProjectID } : {}),
|
||||
"x-opencode-session": input.sessionID,
|
||||
"x-opencode-request": input.user.id,
|
||||
"x-opencode-client": flags.client,
|
||||
"User-Agent": `opencode/${InstallationVersion}`,
|
||||
}
|
||||
: {
|
||||
"x-session-affinity": input.sessionID,
|
||||
...(input.parentSessionID ? { "x-parent-session-id": input.parentSessionID } : {}),
|
||||
"User-Agent": `opencode/${InstallationVersion}`,
|
||||
}),
|
||||
...input.model.headers,
|
||||
...headers,
|
||||
}
|
||||
|
||||
if (flags.experimentalNativeLlm) {
|
||||
const native = LLMNativeRuntime.stream({
|
||||
model: input.model,
|
||||
provider: item,
|
||||
auth: info,
|
||||
llmClient,
|
||||
isOpenaiOauth,
|
||||
system,
|
||||
messages,
|
||||
tools: sortedTools,
|
||||
toolChoice: input.toolChoice,
|
||||
temperature: params.temperature,
|
||||
topP: params.topP,
|
||||
topK: params.topK,
|
||||
maxOutputTokens: params.maxOutputTokens,
|
||||
providerOptions: params.options,
|
||||
headers: requestHeaders,
|
||||
abort: input.abort,
|
||||
})
|
||||
if (native.type === "supported") {
|
||||
yield* Effect.logInfo("llm runtime selected").pipe(
|
||||
Effect.annotateLogs({
|
||||
"llm.runtime": "native",
|
||||
"llm.provider": input.model.providerID,
|
||||
"llm.model": input.model.id,
|
||||
}),
|
||||
)
|
||||
return {
|
||||
type: "native" as const,
|
||||
stream: native.stream,
|
||||
}
|
||||
}
|
||||
yield* Effect.logInfo("llm runtime selected").pipe(
|
||||
Effect.annotateLogs({
|
||||
"llm.runtime": "ai-sdk",
|
||||
"llm.provider": input.model.providerID,
|
||||
"llm.model": input.model.id,
|
||||
"llm.native_unsupported_reason": native.reason,
|
||||
}),
|
||||
)
|
||||
l.info("native runtime unavailable; falling back to ai-sdk", { reason: native.reason })
|
||||
}
|
||||
|
||||
yield* Effect.logInfo("llm runtime selected").pipe(
|
||||
Effect.annotateLogs({
|
||||
"llm.runtime": "ai-sdk",
|
||||
"llm.provider": input.model.providerID,
|
||||
"llm.model": input.model.id,
|
||||
}),
|
||||
)
|
||||
return {
|
||||
type: "ai-sdk" as const,
|
||||
result: streamText({
|
||||
onError(error) {
|
||||
l.error("stream error", {
|
||||
error,
|
||||
})
|
||||
},
|
||||
async experimental_repairToolCall(failed) {
|
||||
const lower = failed.toolCall.toolName.toLowerCase()
|
||||
if (lower !== failed.toolCall.toolName && sortedTools[lower]) {
|
||||
l.info("repairing tool call", {
|
||||
tool: failed.toolCall.toolName,
|
||||
repaired: lower,
|
||||
})
|
||||
return {
|
||||
...failed.toolCall,
|
||||
toolName: lower,
|
||||
}
|
||||
}
|
||||
return {
|
||||
...failed.toolCall,
|
||||
toolName: lower,
|
||||
}
|
||||
}
|
||||
return {
|
||||
...failed.toolCall,
|
||||
input: JSON.stringify({
|
||||
tool: failed.toolCall.toolName,
|
||||
error: failed.error.message,
|
||||
}),
|
||||
toolName: "invalid",
|
||||
}
|
||||
},
|
||||
temperature: params.temperature,
|
||||
topP: params.topP,
|
||||
topK: params.topK,
|
||||
providerOptions: ProviderTransform.providerOptions(input.model, params.options),
|
||||
activeTools: Object.keys(sortedTools).filter((x) => x !== "invalid"),
|
||||
tools: sortedTools,
|
||||
toolChoice: input.toolChoice,
|
||||
maxOutputTokens: params.maxOutputTokens,
|
||||
abortSignal: input.abort,
|
||||
headers: {
|
||||
...(input.model.providerID.startsWith("opencode")
|
||||
? {
|
||||
"x-opencode-project": opencodeProjectID,
|
||||
"x-opencode-session": input.sessionID,
|
||||
"x-opencode-request": input.user.id,
|
||||
"x-opencode-client": flags.client,
|
||||
"User-Agent": `opencode/${InstallationVersion}`,
|
||||
}
|
||||
: {
|
||||
"x-session-affinity": input.sessionID,
|
||||
...(input.parentSessionID ? { "x-parent-session-id": input.parentSessionID } : {}),
|
||||
"User-Agent": `opencode/${InstallationVersion}`,
|
||||
input: JSON.stringify({
|
||||
tool: failed.toolCall.toolName,
|
||||
error: failed.error.message,
|
||||
}),
|
||||
...input.model.headers,
|
||||
...headers,
|
||||
},
|
||||
maxRetries: input.retries ?? 0,
|
||||
messages,
|
||||
model: wrapLanguageModel({
|
||||
model: language,
|
||||
middleware: [
|
||||
{
|
||||
specificationVersion: "v3" as const,
|
||||
async transformParams(args) {
|
||||
if (args.type === "stream") {
|
||||
// @ts-expect-error
|
||||
args.params.prompt = ProviderTransform.message(args.params.prompt, input.model, options)
|
||||
}
|
||||
return args.params
|
||||
},
|
||||
},
|
||||
],
|
||||
}),
|
||||
experimental_telemetry: {
|
||||
isEnabled: cfg.experimental?.openTelemetry,
|
||||
functionId: "session.llm",
|
||||
tracer: telemetryTracer,
|
||||
metadata: {
|
||||
userId: cfg.username ?? "unknown",
|
||||
sessionId: input.sessionID,
|
||||
toolName: "invalid",
|
||||
}
|
||||
},
|
||||
},
|
||||
})
|
||||
temperature: params.temperature,
|
||||
topP: params.topP,
|
||||
topK: params.topK,
|
||||
providerOptions: ProviderTransform.providerOptions(input.model, params.options),
|
||||
activeTools: Object.keys(sortedTools).filter((x) => x !== "invalid"),
|
||||
tools: sortedTools,
|
||||
toolChoice: input.toolChoice,
|
||||
maxOutputTokens: params.maxOutputTokens,
|
||||
abortSignal: input.abort,
|
||||
headers: requestHeaders,
|
||||
maxRetries: input.retries ?? 0,
|
||||
messages,
|
||||
model: wrapLanguageModel({
|
||||
model: language,
|
||||
middleware: [
|
||||
{
|
||||
specificationVersion: "v3" as const,
|
||||
async transformParams(args) {
|
||||
if (args.type === "stream") {
|
||||
// @ts-expect-error
|
||||
args.params.prompt = ProviderTransform.message(args.params.prompt, input.model, options)
|
||||
}
|
||||
return args.params
|
||||
},
|
||||
},
|
||||
],
|
||||
}),
|
||||
experimental_telemetry: {
|
||||
isEnabled: cfg.experimental?.openTelemetry,
|
||||
functionId: "session.llm",
|
||||
tracer: telemetryTracer,
|
||||
metadata: {
|
||||
userId: cfg.username ?? "unknown",
|
||||
sessionId: input.sessionID,
|
||||
},
|
||||
},
|
||||
}),
|
||||
}
|
||||
})
|
||||
|
||||
const stream: Interface["stream"] = (input) =>
|
||||
@@ -415,7 +479,15 @@ const live: Layer.Layer<
|
||||
|
||||
const result = yield* run({ ...input, abort: ctrl.signal })
|
||||
|
||||
return Stream.fromAsyncIterable(result.fullStream, (e) => (e instanceof Error ? e : new Error(String(e))))
|
||||
if (result.type === "native") return result.stream
|
||||
|
||||
const state = LLMAISDK.adapterState()
|
||||
return Stream.fromAsyncIterable(result.result.fullStream, (e) =>
|
||||
e instanceof Error ? e : new Error(String(e)),
|
||||
).pipe(
|
||||
Stream.mapEffect((event) => LLMAISDK.toLLMEvents(state, event)),
|
||||
Stream.flatMap((events) => Stream.fromIterable(events)),
|
||||
)
|
||||
}),
|
||||
),
|
||||
)
|
||||
@@ -432,6 +504,7 @@ export const defaultLayer = Layer.suspend(() =>
|
||||
Layer.provide(Config.defaultLayer),
|
||||
Layer.provide(Provider.defaultLayer),
|
||||
Layer.provide(Plugin.defaultLayer),
|
||||
Layer.provide(LLMClient.layer.pipe(Layer.provide(RequestExecutor.defaultLayer))),
|
||||
Layer.provide(RuntimeFlags.defaultLayer),
|
||||
),
|
||||
)
|
||||
|
||||
16
packages/opencode/src/session/llm/AGENTS.md
Normal file
16
packages/opencode/src/session/llm/AGENTS.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Session LLM Runtime Boundaries
|
||||
|
||||
`../llm.ts` is the opencode session LLM service. It owns opencode concerns: auth, config, model/provider resolution, plugins, permissions, telemetry headers, and runtime selection.
|
||||
|
||||
This folder contains adapters behind that service boundary:
|
||||
|
||||
- `ai-sdk.ts` converts AI SDK `fullStream` parts into `@opencode-ai/llm` `LLMEvent`s. This is the default runtime path.
|
||||
- `native-request.ts` converts opencode's normalized session input into a native `@opencode-ai/llm` `LLMRequest`. It does not execute requests.
|
||||
- `native-runtime.ts` is the opt-in native runtime adapter. It decides whether a selected model is supported, builds the native request, bridges opencode tools into native executable tools, and delegates transport to `LLMClient` / `RequestExecutor`.
|
||||
|
||||
Safety boundary:
|
||||
|
||||
- AI SDK remains the default.
|
||||
- `OPENCODE_EXPERIMENTAL_NATIVE_LLM=true` is an opt-in hint, not a global replacement. The legacy `OPENCODE_LLM_RUNTIME=native` env var is still accepted by `RuntimeFlags` for local testing.
|
||||
- Native execution currently runs only for OpenAI-compatible Responses models exposed through `@ai-sdk/openai`: direct `openai` API-key auth and console-managed `opencode`/Zen API-key config.
|
||||
- Unsupported providers, OpenAI OAuth, and missing API-key cases fall back to AI SDK.
|
||||
253
packages/opencode/src/session/llm/ai-sdk.ts
Normal file
253
packages/opencode/src/session/llm/ai-sdk.ts
Normal file
@@ -0,0 +1,253 @@
|
||||
import { FinishReason, LLMEvent, ProviderMetadata, ToolResultValue } from "@opencode-ai/llm"
|
||||
import { Effect, Schema } from "effect"
|
||||
import { type streamText } from "ai"
|
||||
import { errorMessage } from "@/util/error"
|
||||
|
||||
type Result = Awaited<ReturnType<typeof streamText>>
|
||||
type AISDKEvent = Result["fullStream"] extends AsyncIterable<infer T> ? T : never
|
||||
|
||||
export function adapterState() {
|
||||
return {
|
||||
step: 0,
|
||||
text: 0,
|
||||
reasoning: 0,
|
||||
currentTextID: undefined as string | undefined,
|
||||
currentReasoningID: undefined as string | undefined,
|
||||
toolNames: {} as Record<string, string>,
|
||||
}
|
||||
}
|
||||
|
||||
function finishReason(value: string | undefined): FinishReason {
|
||||
return Schema.is(FinishReason)(value) ? value : "unknown"
|
||||
}
|
||||
|
||||
function providerMetadata(value: unknown): ProviderMetadata | undefined {
|
||||
if (value == null) return undefined
|
||||
return Schema.is(ProviderMetadata)(value) ? value : undefined
|
||||
}
|
||||
|
||||
function usage(value: unknown) {
|
||||
if (!value || typeof value !== "object") return undefined
|
||||
const item = value as {
|
||||
inputTokens?: number
|
||||
outputTokens?: number
|
||||
totalTokens?: number
|
||||
reasoningTokens?: number
|
||||
cachedInputTokens?: number
|
||||
inputTokenDetails?: { cacheReadTokens?: number; cacheWriteTokens?: number }
|
||||
outputTokenDetails?: { reasoningTokens?: number }
|
||||
}
|
||||
const result = Object.fromEntries(
|
||||
Object.entries({
|
||||
inputTokens: item.inputTokens,
|
||||
outputTokens: item.outputTokens,
|
||||
totalTokens: item.totalTokens,
|
||||
reasoningTokens: item.outputTokenDetails?.reasoningTokens ?? item.reasoningTokens,
|
||||
cacheReadInputTokens: item.inputTokenDetails?.cacheReadTokens ?? item.cachedInputTokens,
|
||||
cacheWriteInputTokens: item.inputTokenDetails?.cacheWriteTokens,
|
||||
}).filter((entry) => entry[1] !== undefined),
|
||||
)
|
||||
return result
|
||||
}
|
||||
|
||||
function currentTextID(state: ReturnType<typeof adapterState>, id: string | undefined) {
|
||||
state.currentTextID = id ?? state.currentTextID ?? `text-${state.text++}`
|
||||
return state.currentTextID
|
||||
}
|
||||
|
||||
function currentReasoningID(state: ReturnType<typeof adapterState>, id: string | undefined) {
|
||||
state.currentReasoningID = id ?? state.currentReasoningID ?? `reasoning-${state.reasoning++}`
|
||||
return state.currentReasoningID
|
||||
}
|
||||
|
||||
export function toLLMEvents(
|
||||
state: ReturnType<typeof adapterState>,
|
||||
event: AISDKEvent,
|
||||
): Effect.Effect<ReadonlyArray<LLMEvent>, unknown> {
|
||||
switch (event.type) {
|
||||
case "start":
|
||||
return Effect.succeed([])
|
||||
|
||||
case "start-step":
|
||||
return Effect.succeed([LLMEvent.stepStart({ index: state.step })])
|
||||
|
||||
case "finish-step":
|
||||
return Effect.sync(() => [
|
||||
LLMEvent.stepFinish({
|
||||
index: state.step++,
|
||||
reason: finishReason(event.finishReason),
|
||||
usage: usage(event.usage),
|
||||
providerMetadata: providerMetadata(event.providerMetadata),
|
||||
}),
|
||||
])
|
||||
|
||||
case "finish":
|
||||
return Effect.sync(() => {
|
||||
state.toolNames = {}
|
||||
return [
|
||||
LLMEvent.finish({
|
||||
reason: finishReason(event.finishReason),
|
||||
usage: usage(event.totalUsage),
|
||||
providerMetadata: "providerMetadata" in event ? providerMetadata(event.providerMetadata) : undefined,
|
||||
}),
|
||||
]
|
||||
})
|
||||
|
||||
case "text-start":
|
||||
return Effect.sync(() => {
|
||||
state.currentTextID = currentTextID(state, event.id)
|
||||
return [
|
||||
LLMEvent.textStart({
|
||||
id: state.currentTextID,
|
||||
providerMetadata: providerMetadata(event.providerMetadata),
|
||||
}),
|
||||
]
|
||||
})
|
||||
|
||||
case "text-delta":
|
||||
return Effect.succeed([
|
||||
LLMEvent.textDelta({
|
||||
id: currentTextID(state, event.id),
|
||||
text: event.text,
|
||||
providerMetadata: providerMetadata(event.providerMetadata),
|
||||
}),
|
||||
])
|
||||
|
||||
case "text-end":
|
||||
return Effect.sync(() => {
|
||||
const id = currentTextID(state, event.id)
|
||||
state.currentTextID = undefined
|
||||
return [
|
||||
LLMEvent.textEnd({
|
||||
id,
|
||||
providerMetadata: providerMetadata(event.providerMetadata),
|
||||
}),
|
||||
]
|
||||
})
|
||||
|
||||
case "reasoning-start":
|
||||
return Effect.sync(() => {
|
||||
state.currentReasoningID = currentReasoningID(state, event.id)
|
||||
return [
|
||||
LLMEvent.reasoningStart({
|
||||
id: state.currentReasoningID,
|
||||
providerMetadata: providerMetadata(event.providerMetadata),
|
||||
}),
|
||||
]
|
||||
})
|
||||
|
||||
case "reasoning-delta":
|
||||
return Effect.succeed([
|
||||
LLMEvent.reasoningDelta({
|
||||
id: currentReasoningID(state, event.id),
|
||||
text: event.text,
|
||||
providerMetadata: providerMetadata(event.providerMetadata),
|
||||
}),
|
||||
])
|
||||
|
||||
case "reasoning-end":
|
||||
return Effect.sync(() => {
|
||||
const id = currentReasoningID(state, event.id)
|
||||
state.currentReasoningID = undefined
|
||||
return [
|
||||
LLMEvent.reasoningEnd({
|
||||
id,
|
||||
providerMetadata: providerMetadata(event.providerMetadata),
|
||||
}),
|
||||
]
|
||||
})
|
||||
|
||||
case "tool-input-start":
|
||||
return Effect.sync(() => {
|
||||
state.toolNames[event.id] = event.toolName
|
||||
return [
|
||||
LLMEvent.toolInputStart({
|
||||
id: event.id,
|
||||
name: event.toolName,
|
||||
providerMetadata: providerMetadata(event.providerMetadata),
|
||||
}),
|
||||
]
|
||||
})
|
||||
|
||||
case "tool-input-delta":
|
||||
return Effect.succeed([
|
||||
LLMEvent.toolInputDelta({
|
||||
id: event.id,
|
||||
name: state.toolNames[event.id] ?? "unknown",
|
||||
text: event.delta ?? "",
|
||||
}),
|
||||
])
|
||||
|
||||
case "tool-input-end":
|
||||
return Effect.succeed([
|
||||
LLMEvent.toolInputEnd({
|
||||
id: event.id,
|
||||
name: state.toolNames[event.id] ?? "unknown",
|
||||
providerMetadata: providerMetadata(event.providerMetadata),
|
||||
}),
|
||||
])
|
||||
|
||||
case "tool-call":
|
||||
return Effect.sync(() => {
|
||||
state.toolNames[event.toolCallId] = event.toolName
|
||||
return [
|
||||
LLMEvent.toolCall({
|
||||
id: event.toolCallId,
|
||||
name: event.toolName,
|
||||
input: event.input,
|
||||
providerExecuted: "providerExecuted" in event ? event.providerExecuted : undefined,
|
||||
providerMetadata: providerMetadata(event.providerMetadata),
|
||||
}),
|
||||
]
|
||||
})
|
||||
|
||||
case "tool-result":
|
||||
return Effect.sync(() => {
|
||||
const name = state.toolNames[event.toolCallId] ?? "unknown"
|
||||
delete state.toolNames[event.toolCallId]
|
||||
return [
|
||||
LLMEvent.toolResult({
|
||||
id: event.toolCallId,
|
||||
name,
|
||||
result: ToolResultValue.make(event.output),
|
||||
providerExecuted: "providerExecuted" in event ? event.providerExecuted : undefined,
|
||||
providerMetadata: providerMetadata(event.providerMetadata),
|
||||
}),
|
||||
]
|
||||
})
|
||||
|
||||
case "tool-error":
|
||||
return Effect.sync(() => {
|
||||
const name = state.toolNames[event.toolCallId] ?? ("toolName" in event ? event.toolName : "unknown")
|
||||
delete state.toolNames[event.toolCallId]
|
||||
return [
|
||||
LLMEvent.toolError({
|
||||
id: event.toolCallId,
|
||||
name,
|
||||
message: errorMessage(event.error),
|
||||
error: event.error,
|
||||
providerMetadata: providerMetadata(event.providerMetadata),
|
||||
}),
|
||||
]
|
||||
})
|
||||
|
||||
case "error":
|
||||
return Effect.fail(event.error)
|
||||
|
||||
case "abort":
|
||||
case "source":
|
||||
case "file":
|
||||
case "raw":
|
||||
case "tool-output-denied":
|
||||
case "tool-approval-request":
|
||||
return Effect.succeed([])
|
||||
|
||||
default: {
|
||||
const _exhaustive: never = event
|
||||
void _exhaustive
|
||||
return Effect.succeed([])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export * as LLMAISDK from "./ai-sdk"
|
||||
188
packages/opencode/src/session/llm/native-request.ts
Normal file
188
packages/opencode/src/session/llm/native-request.ts
Normal file
@@ -0,0 +1,188 @@
|
||||
import type { JsonSchema, LLMRequest, ProviderMetadata } from "@opencode-ai/llm"
|
||||
import { LLM, Message, SystemPart, ToolCallPart, ToolDefinition, ToolResultPart } from "@opencode-ai/llm"
|
||||
import "@opencode-ai/llm/providers"
|
||||
import type { ModelMessage } from "ai"
|
||||
import type { Provider } from "@/provider/provider"
|
||||
import { isRecord } from "@/util/record"
|
||||
|
||||
type ToolInput = {
|
||||
readonly description?: string
|
||||
readonly inputSchema?: unknown
|
||||
}
|
||||
|
||||
export type RequestInput = {
|
||||
readonly model: Provider.Model
|
||||
readonly apiKey?: string
|
||||
readonly baseURL?: string
|
||||
readonly system?: readonly string[]
|
||||
readonly messages: readonly ModelMessage[]
|
||||
readonly tools?: Record<string, ToolInput>
|
||||
readonly toolChoice?: "auto" | "required" | "none"
|
||||
readonly temperature?: number
|
||||
readonly topP?: number
|
||||
readonly topK?: number
|
||||
readonly maxOutputTokens?: number
|
||||
readonly providerOptions?: LLMRequest["providerOptions"]
|
||||
readonly headers?: Record<string, string>
|
||||
}
|
||||
|
||||
const DEFAULT_BASE_URL: Record<string, string> = {
|
||||
"@ai-sdk/openai": "https://api.openai.com/v1",
|
||||
"@ai-sdk/anthropic": "https://api.anthropic.com/v1",
|
||||
"@ai-sdk/google": "https://generativelanguage.googleapis.com/v1beta",
|
||||
"@ai-sdk/amazon-bedrock": "https://bedrock-runtime.us-east-1.amazonaws.com",
|
||||
"@openrouter/ai-sdk-provider": "https://openrouter.ai/api/v1",
|
||||
}
|
||||
|
||||
const ROUTE: Record<string, string> = {
|
||||
"@ai-sdk/openai": "openai-responses",
|
||||
"@ai-sdk/azure": "azure-openai-responses",
|
||||
"@ai-sdk/anthropic": "anthropic-messages",
|
||||
"@ai-sdk/google": "gemini",
|
||||
"@ai-sdk/amazon-bedrock": "bedrock-converse",
|
||||
"@ai-sdk/openai-compatible": "openai-compatible-chat",
|
||||
"@openrouter/ai-sdk-provider": "openrouter",
|
||||
}
|
||||
|
||||
const providerMetadata = (value: unknown): ProviderMetadata | undefined => {
|
||||
if (!isRecord(value)) return undefined
|
||||
const result = Object.fromEntries(
|
||||
Object.entries(value).filter((entry): entry is [string, Record<string, unknown>] => isRecord(entry[1])),
|
||||
)
|
||||
return Object.keys(result).length === 0 ? undefined : result
|
||||
}
|
||||
|
||||
const textPart = (part: Record<string, unknown>) => ({
|
||||
type: "text" as const,
|
||||
text: typeof part.text === "string" ? part.text : "",
|
||||
providerMetadata: providerMetadata(part.providerOptions),
|
||||
})
|
||||
|
||||
const mediaPart = (part: Record<string, unknown>) => {
|
||||
if (typeof part.data !== "string" && !(part.data instanceof Uint8Array))
|
||||
throw new Error("Native LLM request adapter only supports file parts with string or Uint8Array data")
|
||||
return {
|
||||
type: "media" as const,
|
||||
mediaType: typeof part.mediaType === "string" ? part.mediaType : "application/octet-stream",
|
||||
data: part.data,
|
||||
filename: typeof part.filename === "string" ? part.filename : undefined,
|
||||
}
|
||||
}
|
||||
|
||||
const toolResult = (part: Record<string, unknown>) => {
|
||||
const output = isRecord(part.output) ? part.output : { type: "json", value: part.output }
|
||||
const type = output.type === "text" ? "text" : output.type === "error-text" ? "error" : "json"
|
||||
return ToolResultPart.make({
|
||||
id: typeof part.toolCallId === "string" ? part.toolCallId : "",
|
||||
name: typeof part.toolName === "string" ? part.toolName : "",
|
||||
result: "value" in output ? output.value : output,
|
||||
resultType: type,
|
||||
providerExecuted: typeof part.providerExecuted === "boolean" ? part.providerExecuted : undefined,
|
||||
providerMetadata: providerMetadata(part.providerOptions),
|
||||
})
|
||||
}
|
||||
|
||||
const contentPart = (part: unknown) => {
|
||||
if (!isRecord(part)) throw new Error("Native LLM request adapter only supports object content parts")
|
||||
if (part.type === "text") return textPart(part)
|
||||
if (part.type === "file") return mediaPart(part)
|
||||
if (part.type === "reasoning")
|
||||
return {
|
||||
type: "reasoning" as const,
|
||||
text: typeof part.text === "string" ? part.text : "",
|
||||
providerMetadata: providerMetadata(part.providerOptions),
|
||||
}
|
||||
if (part.type === "tool-call")
|
||||
return ToolCallPart.make({
|
||||
id: typeof part.toolCallId === "string" ? part.toolCallId : "",
|
||||
name: typeof part.toolName === "string" ? part.toolName : "",
|
||||
input: part.input,
|
||||
providerExecuted: typeof part.providerExecuted === "boolean" ? part.providerExecuted : undefined,
|
||||
providerMetadata: providerMetadata(part.providerOptions),
|
||||
})
|
||||
if (part.type === "tool-result") return toolResult(part)
|
||||
throw new Error(`Native LLM request adapter does not support ${String(part.type)} content parts`)
|
||||
}
|
||||
|
||||
const content = (value: ModelMessage["content"]) =>
|
||||
typeof value === "string" ? [{ type: "text" as const, text: value }] : value.map(contentPart)
|
||||
|
||||
const messages = (input: readonly ModelMessage[]) => {
|
||||
const system = input.flatMap((message) => (message.role === "system" ? [SystemPart.make(message.content)] : []))
|
||||
const messages = input.flatMap((message) => {
|
||||
if (message.role === "system") return []
|
||||
return [
|
||||
Message.make({
|
||||
role: message.role,
|
||||
content: content(message.content),
|
||||
native: isRecord(message.providerOptions) ? { providerOptions: message.providerOptions } : undefined,
|
||||
}),
|
||||
]
|
||||
})
|
||||
return { system, messages }
|
||||
}
|
||||
|
||||
const schema = (value: unknown): JsonSchema => {
|
||||
if (!isRecord(value)) return { type: "object", properties: {} }
|
||||
if (isRecord(value.jsonSchema)) return value.jsonSchema
|
||||
return value
|
||||
}
|
||||
|
||||
const tools = (input: Record<string, ToolInput> | undefined): ToolDefinition[] =>
|
||||
Object.entries(input ?? {}).map(([name, item]) =>
|
||||
ToolDefinition.make({
|
||||
name,
|
||||
description: item.description ?? "",
|
||||
inputSchema: schema(item.inputSchema),
|
||||
}),
|
||||
)
|
||||
|
||||
const generation = (input: RequestInput) => {
|
||||
const result = {
|
||||
temperature: input.temperature,
|
||||
topP: input.topP,
|
||||
topK: input.topK,
|
||||
maxTokens: input.maxOutputTokens,
|
||||
}
|
||||
return Object.values(result).some((value) => value !== undefined) ? result : undefined
|
||||
}
|
||||
|
||||
const baseURL = (model: Provider.Model) => {
|
||||
if (model.api.url) return model.api.url
|
||||
const fallback = DEFAULT_BASE_URL[model.api.npm]
|
||||
if (fallback) return fallback
|
||||
throw new Error(`Native LLM request adapter requires a base URL for ${model.providerID}/${model.id}`)
|
||||
}
|
||||
|
||||
export const model = (input: Provider.Model | RequestInput, headers?: Record<string, string>) => {
|
||||
const model = "model" in input ? input.model : input
|
||||
const route = ROUTE[model.api.npm]
|
||||
if (!route) throw new Error(`Native LLM request adapter does not support provider package ${model.api.npm}`)
|
||||
return LLM.model({
|
||||
id: model.api.id,
|
||||
provider: model.providerID,
|
||||
route,
|
||||
baseURL: "model" in input && input.baseURL ? input.baseURL : baseURL(model),
|
||||
apiKey: "model" in input ? input.apiKey : undefined,
|
||||
headers: Object.keys({ ...model.headers, ...headers }).length === 0 ? undefined : { ...model.headers, ...headers },
|
||||
limits: {
|
||||
context: model.limit.context,
|
||||
output: model.limit.output,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
export const request = (input: RequestInput) => {
|
||||
const converted = messages(input.messages)
|
||||
return LLM.request({
|
||||
model: model(input, input.headers),
|
||||
system: [...(input.system ?? []).map(SystemPart.make), ...converted.system],
|
||||
messages: converted.messages,
|
||||
tools: tools(input.tools),
|
||||
toolChoice: input.toolChoice,
|
||||
generation: generation(input),
|
||||
providerOptions: input.providerOptions,
|
||||
})
|
||||
}
|
||||
|
||||
export * as LLMNative from "./native-request"
|
||||
124
packages/opencode/src/session/llm/native-runtime.ts
Normal file
124
packages/opencode/src/session/llm/native-runtime.ts
Normal file
@@ -0,0 +1,124 @@
|
||||
import type { Auth } from "@/auth"
|
||||
import type { Provider } from "@/provider/provider"
|
||||
import { ProviderTransform } from "@/provider/transform"
|
||||
import { errorMessage } from "@/util/error"
|
||||
import { isRecord } from "@/util/record"
|
||||
import { asSchema, type ModelMessage, type Tool } from "ai"
|
||||
import { Effect } from "effect"
|
||||
import * as Stream from "effect/Stream"
|
||||
import { tool as nativeTool, ToolFailure, type JsonSchema, type LLMEvent } from "@opencode-ai/llm"
|
||||
import type { LLMClientShape } from "@opencode-ai/llm/route"
|
||||
import { LLMNative } from "./native-request"
|
||||
|
||||
export type RuntimeStatus =
|
||||
| { readonly type: "supported"; readonly apiKey: string; readonly baseURL?: string }
|
||||
| { readonly type: "unsupported"; readonly reason: string }
|
||||
export type StreamResult =
|
||||
| { readonly type: "supported"; readonly stream: Stream.Stream<LLMEvent, unknown> }
|
||||
| { readonly type: "unsupported"; readonly reason: string }
|
||||
|
||||
type StreamInput = {
|
||||
readonly model: Provider.Model
|
||||
readonly provider: Provider.Info
|
||||
readonly auth: Auth.Info | undefined
|
||||
readonly llmClient: LLMClientShape
|
||||
readonly isOpenaiOauth: boolean
|
||||
readonly system: string[]
|
||||
readonly messages: ModelMessage[]
|
||||
readonly tools: Record<string, Tool>
|
||||
readonly toolChoice?: "auto" | "required" | "none"
|
||||
readonly temperature?: number
|
||||
readonly topP?: number
|
||||
readonly topK?: number
|
||||
readonly maxOutputTokens?: number
|
||||
readonly providerOptions?: Record<string, any>
|
||||
readonly headers: Record<string, string>
|
||||
readonly abort: AbortSignal
|
||||
}
|
||||
|
||||
export function status(input: Pick<StreamInput, "model" | "provider" | "auth">): RuntimeStatus {
|
||||
if (input.model.providerID !== "openai" && !input.model.providerID.startsWith("opencode"))
|
||||
return { type: "unsupported", reason: "provider is not openai or opencode" }
|
||||
if (input.model.api.npm !== "@ai-sdk/openai") return { type: "unsupported", reason: "provider package is not OpenAI" }
|
||||
if (input.auth?.type === "oauth") return { type: "unsupported", reason: "OAuth auth is not supported" }
|
||||
|
||||
const apiKey =
|
||||
input.auth?.type === "api"
|
||||
? input.auth.key
|
||||
: typeof input.provider.options.apiKey === "string"
|
||||
? input.provider.options.apiKey
|
||||
: undefined
|
||||
if (!apiKey) return { type: "unsupported", reason: "OpenAI API key is not configured" }
|
||||
|
||||
return {
|
||||
type: "supported",
|
||||
apiKey,
|
||||
baseURL: typeof input.provider.options.baseURL === "string" ? input.provider.options.baseURL : undefined,
|
||||
}
|
||||
}
|
||||
|
||||
export function stream(input: StreamInput): StreamResult {
|
||||
const current = status(input)
|
||||
if (current.type === "unsupported") return current
|
||||
|
||||
return {
|
||||
...current,
|
||||
stream: input.llmClient.stream({
|
||||
request: LLMNative.request({
|
||||
model: input.model,
|
||||
apiKey: current.apiKey,
|
||||
baseURL: current.baseURL,
|
||||
system: input.isOpenaiOauth ? input.system : [],
|
||||
messages: ProviderTransform.message(input.messages, input.model, input.providerOptions ?? {}),
|
||||
toolChoice: input.toolChoice,
|
||||
temperature: input.temperature,
|
||||
topP: input.topP,
|
||||
topK: input.topK,
|
||||
maxOutputTokens: input.maxOutputTokens,
|
||||
providerOptions: ProviderTransform.providerOptions(input.model, input.providerOptions ?? {}),
|
||||
headers: { ...providerHeaders(input.provider.options.headers), ...input.headers },
|
||||
}),
|
||||
tools: nativeTools(input.tools, input),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
function providerHeaders(value: unknown): Record<string, string> | undefined {
|
||||
if (!isRecord(value)) return undefined
|
||||
return Object.fromEntries(
|
||||
Object.entries(value).filter((entry): entry is [string, string] => typeof entry[1] === "string"),
|
||||
)
|
||||
}
|
||||
|
||||
function nativeSchema(value: unknown): JsonSchema {
|
||||
if (!value || typeof value !== "object") return { type: "object", properties: {} }
|
||||
if ("jsonSchema" in value && value.jsonSchema && typeof value.jsonSchema === "object")
|
||||
return value.jsonSchema as JsonSchema
|
||||
return asSchema(value as Parameters<typeof asSchema>[0]).jsonSchema as JsonSchema
|
||||
}
|
||||
|
||||
function nativeTools(tools: Record<string, Tool>, input: Pick<StreamInput, "messages" | "abort">) {
|
||||
return Object.fromEntries(
|
||||
Object.entries(tools).map(([name, item]) => [
|
||||
name,
|
||||
nativeTool({
|
||||
description: item.description ?? "",
|
||||
jsonSchema: nativeSchema(item.inputSchema),
|
||||
execute: (args: unknown, ctx) =>
|
||||
Effect.tryPromise({
|
||||
try: () => {
|
||||
if (!item.execute) throw new Error(`Tool has no execute handler: ${name}`)
|
||||
return item.execute(args, {
|
||||
toolCallId: ctx?.id ?? name,
|
||||
messages: input.messages,
|
||||
abortSignal: input.abort,
|
||||
})
|
||||
},
|
||||
catch: (error) => new ToolFailure({ message: errorMessage(error), error }),
|
||||
}),
|
||||
}),
|
||||
]),
|
||||
)
|
||||
}
|
||||
|
||||
export * as LLMNativeRuntime from "./native-runtime"
|
||||
@@ -1,4 +1,5 @@
|
||||
import { Cause, Deferred, Effect, Exit, Layer, Context, Scope } from "effect"
|
||||
import { Image } from "@/image/image"
|
||||
import { Cause, Deferred, Effect, Exit, Layer, Context, Scope, Schema } from "effect"
|
||||
import * as Stream from "effect/Stream"
|
||||
import { Agent } from "@/agent/agent"
|
||||
import { Bus } from "@/bus"
|
||||
@@ -9,7 +10,6 @@ import { Snapshot } from "@/snapshot"
|
||||
import * as Session from "./session"
|
||||
import { LLM } from "./llm"
|
||||
import { MessageV2 } from "./message-v2"
|
||||
import { Image } from "@/image/image"
|
||||
import { isOverflow } from "./overflow"
|
||||
import { PartID } from "./schema"
|
||||
import type { SessionID } from "./schema"
|
||||
@@ -28,14 +28,13 @@ import { ModelV2 } from "@opencode-ai/core/model"
|
||||
import { ProviderV2 } from "@opencode-ai/core/provider"
|
||||
import * as DateTime from "effect/DateTime"
|
||||
import { RuntimeFlags } from "@/effect/runtime-flags"
|
||||
import { Usage, type LLMEvent } from "@opencode-ai/llm"
|
||||
|
||||
const DOOM_LOOP_THRESHOLD = 3
|
||||
const log = Log.create({ service: "session.processor" })
|
||||
|
||||
export type Result = "compact" | "stop" | "continue"
|
||||
|
||||
export type Event = LLM.Event
|
||||
|
||||
export interface Handle {
|
||||
readonly message: MessageV2.Assistant
|
||||
readonly updateToolCall: (
|
||||
@@ -69,6 +68,7 @@ type ToolCall = {
|
||||
messageID: MessageV2.ToolPart["messageID"]
|
||||
sessionID: MessageV2.ToolPart["sessionID"]
|
||||
done: Deferred.Deferred<void>
|
||||
inputEnded: boolean
|
||||
}
|
||||
|
||||
interface ProcessorContext extends Input {
|
||||
@@ -81,7 +81,7 @@ interface ProcessorContext extends Input {
|
||||
reasoningMap: Record<string, MessageV2.ReasoningPart>
|
||||
}
|
||||
|
||||
type StreamEvent = Event
|
||||
type StreamEvent = LLMEvent
|
||||
|
||||
export class Service extends Context.Service<Service, Interface>()("@opencode/SessionProcessor") {}
|
||||
|
||||
@@ -137,7 +137,7 @@ export const layer = Layer.effect(
|
||||
|
||||
const readToolCall = Effect.fn("SessionProcessor.readToolCall")(function* (toolCallID: string) {
|
||||
const call = ctx.toolcalls[toolCallID]
|
||||
if (!call) return
|
||||
if (!call) return undefined
|
||||
const part = yield* session.getPart({
|
||||
partID: call.partID,
|
||||
messageID: call.messageID,
|
||||
@@ -145,7 +145,7 @@ export const layer = Layer.effect(
|
||||
})
|
||||
if (!part || part.type !== "tool") {
|
||||
delete ctx.toolcalls[toolCallID]
|
||||
return
|
||||
return undefined
|
||||
}
|
||||
return { call, part }
|
||||
})
|
||||
@@ -155,7 +155,7 @@ export const layer = Layer.effect(
|
||||
update: (part: MessageV2.ToolPart) => MessageV2.ToolPart,
|
||||
) {
|
||||
const match = yield* readToolCall(toolCallID)
|
||||
if (!match) return
|
||||
if (!match) return undefined
|
||||
const part = yield* session.updatePart(update(match.part))
|
||||
ctx.toolcalls[toolCallID] = {
|
||||
...match.call,
|
||||
@@ -211,12 +211,98 @@ export const layer = Layer.effect(
|
||||
return true
|
||||
})
|
||||
|
||||
const finishReasoning = Effect.fn("SessionProcessor.finishReasoning")(function* (reasoningID: string) {
|
||||
if (!(reasoningID in ctx.reasoningMap)) return
|
||||
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
|
||||
if (flags.experimentalEventSystem) {
|
||||
yield* events.publish(SessionEvent.Reasoning.Ended, {
|
||||
sessionID: ctx.sessionID,
|
||||
reasoningID,
|
||||
text: ctx.reasoningMap[reasoningID].text,
|
||||
timestamp: DateTime.makeUnsafe(Date.now()),
|
||||
})
|
||||
}
|
||||
// oxlint-disable-next-line no-self-assign -- reactivity trigger
|
||||
ctx.reasoningMap[reasoningID].text = ctx.reasoningMap[reasoningID].text
|
||||
ctx.reasoningMap[reasoningID].time = { ...ctx.reasoningMap[reasoningID].time, end: Date.now() }
|
||||
yield* session.updatePart(ctx.reasoningMap[reasoningID])
|
||||
delete ctx.reasoningMap[reasoningID]
|
||||
})
|
||||
|
||||
const ensureToolCall = Effect.fn("SessionProcessor.ensureToolCall")(function* (input: {
|
||||
id: string
|
||||
name: string
|
||||
providerExecuted?: boolean
|
||||
}) {
|
||||
const existing = yield* readToolCall(input.id)
|
||||
if (existing) {
|
||||
if (!input.providerExecuted || existing.part.metadata?.providerExecuted) return existing
|
||||
const part = yield* session.updatePart({
|
||||
...existing.part,
|
||||
metadata: { ...existing.part.metadata, providerExecuted: true },
|
||||
})
|
||||
ctx.toolcalls[input.id] = {
|
||||
...existing.call,
|
||||
partID: part.id,
|
||||
messageID: part.messageID,
|
||||
sessionID: part.sessionID,
|
||||
}
|
||||
return { call: ctx.toolcalls[input.id], part }
|
||||
}
|
||||
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
|
||||
if (flags.experimentalEventSystem) {
|
||||
yield* events.publish(SessionEvent.Tool.Input.Started, {
|
||||
sessionID: ctx.sessionID,
|
||||
callID: input.id,
|
||||
name: input.name,
|
||||
timestamp: DateTime.makeUnsafe(Date.now()),
|
||||
})
|
||||
}
|
||||
const part = yield* session.updatePart({
|
||||
id: PartID.ascending(),
|
||||
messageID: ctx.assistantMessage.id,
|
||||
sessionID: ctx.assistantMessage.sessionID,
|
||||
type: "tool",
|
||||
tool: input.name,
|
||||
callID: input.id,
|
||||
state: { status: "pending", input: {}, raw: "" },
|
||||
metadata: input.providerExecuted ? { providerExecuted: true } : undefined,
|
||||
} satisfies MessageV2.ToolPart)
|
||||
ctx.toolcalls[input.id] = {
|
||||
done: yield* Deferred.make<void>(),
|
||||
partID: part.id,
|
||||
messageID: part.messageID,
|
||||
sessionID: part.sessionID,
|
||||
inputEnded: false,
|
||||
}
|
||||
return { call: ctx.toolcalls[input.id], part }
|
||||
})
|
||||
|
||||
const isFilePart = Schema.is(MessageV2.FilePart)
|
||||
|
||||
const toolResultOutput = (value: Extract<StreamEvent, { type: "tool-result" }>) => {
|
||||
if (isRecord(value.result.value) && typeof value.result.value.output === "string") {
|
||||
return {
|
||||
title: typeof value.result.value.title === "string" ? value.result.value.title : value.name,
|
||||
metadata: isRecord(value.result.value.metadata) ? value.result.value.metadata : {},
|
||||
output: value.result.value.output,
|
||||
attachments: Array.isArray(value.result.value.attachments)
|
||||
? value.result.value.attachments.filter(isFilePart)
|
||||
: undefined,
|
||||
}
|
||||
}
|
||||
return {
|
||||
title: value.name,
|
||||
metadata: value.result.type === "json" && isRecord(value.result.value) ? value.result.value : {},
|
||||
output:
|
||||
typeof value.result.value === "string" ? value.result.value : (JSON.stringify(value.result.value) ?? ""),
|
||||
}
|
||||
}
|
||||
|
||||
const toolInput = (value: unknown): Record<string, any> => (isRecord(value) ? value : { value })
|
||||
|
||||
const handleEvent = Effect.fnUntraced(function* (value: StreamEvent) {
|
||||
switch (value.type) {
|
||||
case "start":
|
||||
yield* status.set(ctx.sessionID, { type: "busy" })
|
||||
return
|
||||
|
||||
case "reasoning-start":
|
||||
if (value.id in ctx.reasoningMap) return
|
||||
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
|
||||
@@ -240,6 +326,7 @@ export const layer = Layer.effect(
|
||||
return
|
||||
|
||||
case "reasoning-delta":
|
||||
// Match dev: silently drop orphan deltas (no preceding reasoning-start).
|
||||
if (!(value.id in ctx.reasoningMap)) return
|
||||
ctx.reasoningMap[value.id].text += value.text
|
||||
if (value.providerMetadata) ctx.reasoningMap[value.id].metadata = value.providerMetadata
|
||||
@@ -253,59 +340,26 @@ export const layer = Layer.effect(
|
||||
return
|
||||
|
||||
case "reasoning-end":
|
||||
if (!(value.id in ctx.reasoningMap)) return
|
||||
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
|
||||
if (flags.experimentalEventSystem) {
|
||||
yield* events.publish(SessionEvent.Reasoning.Ended, {
|
||||
sessionID: ctx.sessionID,
|
||||
reasoningID: value.id,
|
||||
text: ctx.reasoningMap[value.id].text,
|
||||
timestamp: DateTime.makeUnsafe(Date.now()),
|
||||
})
|
||||
if (value.providerMetadata && value.id in ctx.reasoningMap) {
|
||||
ctx.reasoningMap[value.id].metadata = value.providerMetadata
|
||||
}
|
||||
// oxlint-disable-next-line no-self-assign -- reactivity trigger
|
||||
ctx.reasoningMap[value.id].text = ctx.reasoningMap[value.id].text
|
||||
ctx.reasoningMap[value.id].time = { ...ctx.reasoningMap[value.id].time, end: Date.now() }
|
||||
if (value.providerMetadata) ctx.reasoningMap[value.id].metadata = value.providerMetadata
|
||||
yield* session.updatePart(ctx.reasoningMap[value.id])
|
||||
delete ctx.reasoningMap[value.id]
|
||||
yield* finishReasoning(value.id)
|
||||
return
|
||||
|
||||
case "tool-input-start":
|
||||
if (ctx.assistantMessage.summary) {
|
||||
throw new Error(`Tool call not allowed while generating summary: ${value.toolName}`)
|
||||
}
|
||||
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
|
||||
if (flags.experimentalEventSystem) {
|
||||
yield* events.publish(SessionEvent.Tool.Input.Started, {
|
||||
sessionID: ctx.sessionID,
|
||||
callID: value.id,
|
||||
name: value.toolName,
|
||||
timestamp: DateTime.makeUnsafe(Date.now()),
|
||||
})
|
||||
}
|
||||
const part = yield* session.updatePart({
|
||||
id: ctx.toolcalls[value.id]?.partID ?? PartID.ascending(),
|
||||
messageID: ctx.assistantMessage.id,
|
||||
sessionID: ctx.assistantMessage.sessionID,
|
||||
type: "tool",
|
||||
tool: value.toolName,
|
||||
callID: value.id,
|
||||
state: { status: "pending", input: {}, raw: "" },
|
||||
metadata: value.providerExecuted ? { providerExecuted: true } : undefined,
|
||||
} satisfies MessageV2.ToolPart)
|
||||
ctx.toolcalls[value.id] = {
|
||||
done: yield* Deferred.make<void>(),
|
||||
partID: part.id,
|
||||
messageID: part.messageID,
|
||||
sessionID: part.sessionID,
|
||||
throw new Error(`Tool call not allowed while generating summary: ${value.name}`)
|
||||
}
|
||||
yield* ensureToolCall(value)
|
||||
return
|
||||
|
||||
case "tool-input-delta":
|
||||
// AI SDK emits a final `tool-call` with the parsed `input`; accumulating
|
||||
// delta fragments into `state.raw` is redundant work for no current consumer.
|
||||
return
|
||||
|
||||
case "tool-input-end": {
|
||||
const toolCall = yield* ensureToolCall(value)
|
||||
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
|
||||
if (flags.experimentalEventSystem) {
|
||||
yield* events.publish(SessionEvent.Tool.Input.Ended, {
|
||||
@@ -315,37 +369,52 @@ export const layer = Layer.effect(
|
||||
timestamp: DateTime.makeUnsafe(Date.now()),
|
||||
})
|
||||
}
|
||||
ctx.toolcalls[value.id] = { ...toolCall.call, inputEnded: true }
|
||||
return
|
||||
}
|
||||
|
||||
case "tool-call": {
|
||||
if (ctx.assistantMessage.summary) {
|
||||
throw new Error(`Tool call not allowed while generating summary: ${value.toolName}`)
|
||||
throw new Error(`Tool call not allowed while generating summary: ${value.name}`)
|
||||
}
|
||||
const toolCall = yield* ensureToolCall(value)
|
||||
const input = toolInput(value.input)
|
||||
if (!toolCall.call.inputEnded) {
|
||||
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
|
||||
if (flags.experimentalEventSystem) {
|
||||
yield* events.publish(SessionEvent.Tool.Input.Ended, {
|
||||
sessionID: ctx.sessionID,
|
||||
callID: value.id,
|
||||
text: "",
|
||||
timestamp: DateTime.makeUnsafe(Date.now()),
|
||||
})
|
||||
}
|
||||
}
|
||||
const toolCall = yield* readToolCall(value.toolCallId)
|
||||
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
|
||||
if (flags.experimentalEventSystem) {
|
||||
yield* events.publish(SessionEvent.Tool.Called, {
|
||||
sessionID: ctx.sessionID,
|
||||
callID: value.toolCallId,
|
||||
tool: value.toolName,
|
||||
input: value.input,
|
||||
callID: value.id,
|
||||
tool: value.name,
|
||||
input,
|
||||
provider: {
|
||||
executed: toolCall?.part.metadata?.providerExecuted === true,
|
||||
executed: toolCall.part.metadata?.providerExecuted === true,
|
||||
...(value.providerMetadata ? { metadata: value.providerMetadata } : {}),
|
||||
},
|
||||
timestamp: DateTime.makeUnsafe(Date.now()),
|
||||
})
|
||||
}
|
||||
yield* updateToolCall(value.toolCallId, (match) => ({
|
||||
yield* updateToolCall(value.id, (match) => ({
|
||||
...match,
|
||||
tool: value.toolName,
|
||||
state: {
|
||||
...match.state,
|
||||
status: "running",
|
||||
input: value.input,
|
||||
time: { start: Date.now() },
|
||||
},
|
||||
tool: value.name,
|
||||
state:
|
||||
match.state.status === "running"
|
||||
? { ...match.state, input }
|
||||
: {
|
||||
status: "running",
|
||||
input,
|
||||
time: { start: Date.now() },
|
||||
},
|
||||
metadata: match.metadata?.providerExecuted
|
||||
? { ...value.providerMetadata, providerExecuted: true }
|
||||
: value.providerMetadata,
|
||||
@@ -359,9 +428,9 @@ export const layer = Layer.effect(
|
||||
!recentParts.every(
|
||||
(part) =>
|
||||
part.type === "tool" &&
|
||||
part.tool === value.toolName &&
|
||||
part.tool === value.name &&
|
||||
part.state.status !== "pending" &&
|
||||
JSON.stringify(part.state.input) === JSON.stringify(value.input),
|
||||
JSON.stringify(part.state.input) === JSON.stringify(input),
|
||||
)
|
||||
) {
|
||||
return
|
||||
@@ -370,27 +439,19 @@ export const layer = Layer.effect(
|
||||
const agent = yield* agents.get(ctx.assistantMessage.agent)
|
||||
yield* permission.ask({
|
||||
permission: "doom_loop",
|
||||
patterns: [value.toolName],
|
||||
patterns: [value.name],
|
||||
sessionID: ctx.assistantMessage.sessionID,
|
||||
metadata: { tool: value.toolName, input: value.input },
|
||||
always: [value.toolName],
|
||||
metadata: { tool: value.name, input },
|
||||
always: [value.name],
|
||||
ruleset: agent.permission,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
case "tool-result": {
|
||||
const toolCall = yield* readToolCall(value.toolCallId)
|
||||
const toolAttachments: MessageV2.FilePart[] = (
|
||||
Array.isArray(value.output.attachments) ? value.output.attachments : []
|
||||
).filter(
|
||||
(attachment: unknown): attachment is MessageV2.FilePart =>
|
||||
isRecord(attachment) &&
|
||||
attachment.type === "file" &&
|
||||
typeof attachment.mime === "string" &&
|
||||
typeof attachment.url === "string",
|
||||
)
|
||||
const normalized = yield* Effect.forEach(toolAttachments, (attachment) =>
|
||||
const toolCall = yield* readToolCall(value.id)
|
||||
const rawOutput = toolResultOutput(value)
|
||||
const normalized = yield* Effect.forEach(rawOutput.attachments ?? [], (attachment) =>
|
||||
attachment.mime.startsWith("image/")
|
||||
? image.normalize(attachment).pipe(
|
||||
Effect.catchIf(
|
||||
@@ -404,18 +465,18 @@ export const layer = Layer.effect(
|
||||
const omitted = normalized.filter(Exit.isFailure).length
|
||||
const attachments = normalized.filter(Exit.isSuccess).map((item) => item.value)
|
||||
const output = {
|
||||
...value.output,
|
||||
...rawOutput,
|
||||
output:
|
||||
omitted === 0
|
||||
? value.output.output
|
||||
: `${value.output.output}\n\n[${omitted} image${omitted === 1 ? "" : "s"} omitted: could not be resized below the image size limit.]`,
|
||||
attachments: attachments?.length ? attachments : undefined,
|
||||
? rawOutput.output
|
||||
: `${rawOutput.output}\n\n[${omitted} image${omitted === 1 ? "" : "s"} omitted: could not be resized below the image size limit.]`,
|
||||
attachments: attachments.length ? attachments : undefined,
|
||||
}
|
||||
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
|
||||
if (flags.experimentalEventSystem) {
|
||||
yield* events.publish(SessionEvent.Tool.Success, {
|
||||
sessionID: ctx.sessionID,
|
||||
callID: value.toolCallId,
|
||||
callID: value.id,
|
||||
structured: output.metadata,
|
||||
content: [
|
||||
{
|
||||
@@ -423,32 +484,32 @@ export const layer = Layer.effect(
|
||||
text: output.output,
|
||||
},
|
||||
...(output.attachments?.map((item: MessageV2.FilePart) => ({
|
||||
type: "file",
|
||||
type: "file" as const,
|
||||
uri: item.url,
|
||||
mime: item.mime,
|
||||
name: item.filename,
|
||||
})) ?? []),
|
||||
],
|
||||
provider: {
|
||||
executed: toolCall?.part.metadata?.providerExecuted === true,
|
||||
executed: value.providerExecuted === true || toolCall?.part.metadata?.providerExecuted === true,
|
||||
},
|
||||
timestamp: DateTime.makeUnsafe(Date.now()),
|
||||
})
|
||||
}
|
||||
yield* completeToolCall(value.toolCallId, output)
|
||||
yield* completeToolCall(value.id, output)
|
||||
return
|
||||
}
|
||||
|
||||
case "tool-error": {
|
||||
const toolCall = yield* readToolCall(value.toolCallId)
|
||||
const toolCall = yield* readToolCall(value.id)
|
||||
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
|
||||
if (flags.experimentalEventSystem) {
|
||||
yield* events.publish(SessionEvent.Tool.Failed, {
|
||||
sessionID: ctx.sessionID,
|
||||
callID: value.toolCallId,
|
||||
callID: value.id,
|
||||
error: {
|
||||
type: "unknown",
|
||||
message: errorMessage(value.error),
|
||||
message: value.message,
|
||||
},
|
||||
provider: {
|
||||
executed: toolCall?.part.metadata?.providerExecuted === true,
|
||||
@@ -456,14 +517,14 @@ export const layer = Layer.effect(
|
||||
timestamp: DateTime.makeUnsafe(Date.now()),
|
||||
})
|
||||
}
|
||||
yield* failToolCall(value.toolCallId, value.error)
|
||||
yield* failToolCall(value.id, value.error ?? new Error(value.message))
|
||||
return
|
||||
}
|
||||
|
||||
case "error":
|
||||
throw value.error
|
||||
case "provider-error":
|
||||
throw new Error(value.message)
|
||||
|
||||
case "start-step":
|
||||
case "step-start":
|
||||
if (!ctx.snapshot) ctx.snapshot = yield* snapshot.track()
|
||||
if (!ctx.assistantMessage.summary) {
|
||||
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
|
||||
@@ -490,11 +551,12 @@ export const layer = Layer.effect(
|
||||
})
|
||||
return
|
||||
|
||||
case "finish-step": {
|
||||
case "step-finish": {
|
||||
const completedSnapshot = yield* snapshot.track()
|
||||
yield* Effect.forEach(Object.keys(ctx.reasoningMap), finishReasoning)
|
||||
const usage = Session.getUsage({
|
||||
model: ctx.model,
|
||||
usage: value.usage,
|
||||
usage: value.usage ?? new Usage({}),
|
||||
metadata: value.providerMetadata,
|
||||
})
|
||||
if (!ctx.assistantMessage.summary) {
|
||||
@@ -502,7 +564,7 @@ export const layer = Layer.effect(
|
||||
if (flags.experimentalEventSystem) {
|
||||
yield* events.publish(SessionEvent.Step.Ended, {
|
||||
sessionID: ctx.sessionID,
|
||||
finish: value.finishReason,
|
||||
finish: value.reason,
|
||||
cost: usage.cost,
|
||||
tokens: usage.tokens,
|
||||
snapshot: completedSnapshot,
|
||||
@@ -510,12 +572,12 @@ export const layer = Layer.effect(
|
||||
})
|
||||
}
|
||||
}
|
||||
ctx.assistantMessage.finish = value.finishReason
|
||||
ctx.assistantMessage.finish = value.reason
|
||||
ctx.assistantMessage.cost += usage.cost
|
||||
ctx.assistantMessage.tokens = usage.tokens
|
||||
yield* session.updatePart({
|
||||
id: PartID.ascending(),
|
||||
reason: value.finishReason,
|
||||
reason: value.reason,
|
||||
snapshot: completedSnapshot,
|
||||
messageID: ctx.assistantMessage.id,
|
||||
sessionID: ctx.assistantMessage.sessionID,
|
||||
@@ -622,10 +684,6 @@ export const layer = Layer.effect(
|
||||
|
||||
case "finish":
|
||||
return
|
||||
|
||||
default:
|
||||
slog.info("unhandled", { event: value.type, value })
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
@@ -727,6 +785,7 @@ export const layer = Layer.effect(
|
||||
yield* Effect.gen(function* () {
|
||||
ctx.currentText = undefined
|
||||
ctx.reasoningMap = {}
|
||||
yield* status.set(ctx.sessionID, { type: "busy" })
|
||||
const stream = llm.stream(streamInput)
|
||||
|
||||
yield* stream.pipe(
|
||||
|
||||
@@ -63,6 +63,7 @@ import * as DateTime from "effect/DateTime"
|
||||
import { eq } from "@/storage/db"
|
||||
import * as Database from "@/storage/db"
|
||||
import { SessionTable } from "./session.sql"
|
||||
import { LLMEvent } from "@opencode-ai/llm"
|
||||
|
||||
// @ts-ignore
|
||||
globalThis.AI_SDK_LOG_WARNINGS = false
|
||||
@@ -365,7 +366,7 @@ export const layer = Layer.effect(
|
||||
messages: [{ role: "user", content: "Generate a title for this conversation:\n" }, ...msgs],
|
||||
})
|
||||
.pipe(
|
||||
Stream.filter((e): e is Extract<LLM.Event, { type: "text-delta" }> => e.type === "text-delta"),
|
||||
Stream.filter(LLMEvent.is.textDelta),
|
||||
Stream.map((e) => e.text),
|
||||
Stream.mkString,
|
||||
Effect.orDie,
|
||||
|
||||
@@ -4,7 +4,8 @@ import { BackgroundJob } from "@/background/job"
|
||||
import { BusEvent } from "@/bus/bus-event"
|
||||
import { Bus } from "@/bus"
|
||||
import { Decimal } from "decimal.js"
|
||||
import { type ProviderMetadata, type LanguageModelUsage } from "ai"
|
||||
import { Flag } from "@opencode-ai/core/flag/flag"
|
||||
import type { ProviderMetadata, Usage } from "@opencode-ai/llm"
|
||||
import { InstallationVersion } from "@opencode-ai/core/installation/version"
|
||||
|
||||
import { Database } from "@/storage/db"
|
||||
@@ -374,21 +375,19 @@ export function plan(input: { slug: string; time: { created: number } }, instanc
|
||||
return path.join(base, [input.time.created, input.slug].join("-") + ".md")
|
||||
}
|
||||
|
||||
export const getUsage = (input: { model: Provider.Model; usage: LanguageModelUsage; metadata?: ProviderMetadata }) => {
|
||||
export const getUsage = (input: { model: Provider.Model; usage: Usage; metadata?: ProviderMetadata }) => {
|
||||
const safe = (value: number) => {
|
||||
if (!Number.isFinite(value)) return 0
|
||||
return Math.max(0, value)
|
||||
}
|
||||
const inputTokens = safe(input.usage.inputTokens ?? 0)
|
||||
const outputTokens = safe(input.usage.outputTokens ?? 0)
|
||||
const reasoningTokens = safe(input.usage.outputTokenDetails?.reasoningTokens ?? input.usage.reasoningTokens ?? 0)
|
||||
const reasoningTokens = safe(input.usage.reasoningTokens ?? 0)
|
||||
|
||||
const cacheReadInputTokens = safe(
|
||||
input.usage.inputTokenDetails?.cacheReadTokens ?? input.usage.cachedInputTokens ?? 0,
|
||||
)
|
||||
const cacheReadInputTokens = safe(input.usage.cacheReadInputTokens ?? 0)
|
||||
const cacheWriteInputTokens = safe(
|
||||
Number(
|
||||
input.usage.inputTokenDetails?.cacheWriteTokens ??
|
||||
input.usage.cacheWriteInputTokens ??
|
||||
input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
|
||||
// google-vertex-anthropic returns metadata under "vertex" key
|
||||
// (AnthropicMessagesLanguageModel custom provider key from 'vertex.anthropic.messages')
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
*/
|
||||
import { describe, expect, test } from "bun:test"
|
||||
import { aggregateFailures } from "@/cli/cmd/tui/context/aggregate-failures"
|
||||
import { ConfigError } from "@/config/error"
|
||||
|
||||
describe("aggregateFailures", () => {
|
||||
test("returns null when every result is fulfilled", () => {
|
||||
@@ -41,11 +42,48 @@ describe("aggregateFailures", () => {
|
||||
expect(err!.message).toContain("agents: boom")
|
||||
})
|
||||
|
||||
test("formats structured config errors hidden inside SDK error causes", () => {
|
||||
const configError = new ConfigError.InvalidError({
|
||||
path: "/tmp/opencode.json",
|
||||
issues: [{ message: "Expected object", path: ["provider", "anthropic", "options"] }],
|
||||
})
|
||||
const err = aggregateFailures([
|
||||
{
|
||||
name: "config.get",
|
||||
result: {
|
||||
status: "rejected",
|
||||
reason: new Error("ConfigInvalidError", {
|
||||
cause: {
|
||||
body: configError.toObject(),
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
])
|
||||
|
||||
expect(err!.message).toContain("config.get: Configuration is invalid at /tmp/opencode.json")
|
||||
expect(err!.message).toContain("Expected object provider.anthropic.options")
|
||||
})
|
||||
|
||||
test("deduplicates identical failure messages across startup requests", () => {
|
||||
const reason = new Error("same config problem")
|
||||
const err = aggregateFailures([
|
||||
{ name: "config.providers", result: { status: "rejected", reason } },
|
||||
{ name: "provider.list", result: { status: "rejected", reason } },
|
||||
{ name: "app.agents", result: { status: "rejected", reason } },
|
||||
{ name: "config.get", result: { status: "rejected", reason } },
|
||||
{ name: "project.sync", result: { status: "fulfilled", value: undefined } },
|
||||
])
|
||||
|
||||
expect(err!.message).toContain("4 of 5 requests failed: same config problem")
|
||||
expect(err!.message).toContain("Affected startup requests: config.providers, provider.list, app.agents, config.get")
|
||||
expect(err!.message.match(/same config problem/g)?.length).toBe(1)
|
||||
})
|
||||
|
||||
test("attaches structured failure list under .cause", () => {
|
||||
const reason = new Error("nope")
|
||||
const err = aggregateFailures([{ name: "providers", result: { status: "rejected", reason } }])
|
||||
const cause = err!.cause as { failures: Array<{ name: string; reason: unknown }> }
|
||||
expect(cause.failures).toEqual([{ name: "providers", reason }])
|
||||
expect(err!.cause).toEqual({ failures: [{ name: "providers", reason }] })
|
||||
})
|
||||
|
||||
test("falls back to String() for opaque reasons", () => {
|
||||
|
||||
146
packages/opencode/test/cli/tui/dialog-prompt.test.tsx
Normal file
146
packages/opencode/test/cli/tui/dialog-prompt.test.tsx
Normal file
@@ -0,0 +1,146 @@
|
||||
/** @jsxImportSource @opentui/solid */
|
||||
import { TextareaRenderable } from "@opentui/core"
|
||||
import { createDefaultOpenTuiKeymap } from "@opentui/keymap/opentui"
|
||||
import { testRender, useRenderer } from "@opentui/solid"
|
||||
import { expect, test } from "bun:test"
|
||||
import { mkdir } from "node:fs/promises"
|
||||
import path from "node:path"
|
||||
import { onCleanup } from "solid-js"
|
||||
import { tmpdir } from "../../fixture/fixture"
|
||||
import { createTuiResolvedConfig } from "../../fixture/tui-runtime"
|
||||
import type { TuiKeybind } from "../../../src/cli/cmd/tui/config/keybind"
|
||||
|
||||
async function wait(fn: () => boolean, timeout = 2000) {
|
||||
const start = Date.now()
|
||||
while (!fn()) {
|
||||
if (Date.now() - start > timeout) throw new Error("timed out waiting for condition")
|
||||
await Bun.sleep(10)
|
||||
}
|
||||
}
|
||||
|
||||
async function mountPrompt(input: {
|
||||
root: string
|
||||
keybinds: Partial<TuiKeybind.Keybinds>
|
||||
onConfirm: (value: string) => void
|
||||
}) {
|
||||
const { Global } = await import("@opencode-ai/core/global")
|
||||
const previous = {
|
||||
config: Global.Path.config,
|
||||
state: Global.Path.state,
|
||||
}
|
||||
Global.Path.config = path.join(input.root, "config")
|
||||
Global.Path.state = path.join(input.root, "state")
|
||||
await mkdir(Global.Path.config, { recursive: true })
|
||||
await mkdir(Global.Path.state, { recursive: true })
|
||||
await Bun.write(path.join(Global.Path.state, "kv.json"), "{}")
|
||||
|
||||
const [
|
||||
{ DialogProvider },
|
||||
{ DialogPrompt },
|
||||
{ KVProvider },
|
||||
{ ThemeProvider },
|
||||
{ TuiConfigProvider },
|
||||
{ ToastProvider },
|
||||
{ OpencodeKeymapProvider, registerOpencodeKeymap },
|
||||
] = await Promise.all([
|
||||
import("../../../src/cli/cmd/tui/ui/dialog"),
|
||||
import("../../../src/cli/cmd/tui/ui/dialog-prompt"),
|
||||
import("../../../src/cli/cmd/tui/context/kv"),
|
||||
import("../../../src/cli/cmd/tui/context/theme"),
|
||||
import("../../../src/cli/cmd/tui/context/tui-config"),
|
||||
import("../../../src/cli/cmd/tui/ui/toast"),
|
||||
import("../../../src/cli/cmd/tui/keymap"),
|
||||
])
|
||||
|
||||
function Harness() {
|
||||
const renderer = useRenderer()
|
||||
const keymap = createDefaultOpenTuiKeymap(renderer)
|
||||
const resolvedConfig = createTuiResolvedConfig({
|
||||
keybinds: input.keybinds,
|
||||
leader_timeout: 1000,
|
||||
})
|
||||
const off = registerOpencodeKeymap(keymap, renderer, resolvedConfig)
|
||||
onCleanup(off)
|
||||
|
||||
return (
|
||||
<OpencodeKeymapProvider keymap={keymap}>
|
||||
<TuiConfigProvider config={resolvedConfig}>
|
||||
<KVProvider>
|
||||
<ThemeProvider mode="dark">
|
||||
<ToastProvider>
|
||||
<DialogProvider>
|
||||
<DialogPrompt title="Rename Session" value="draft" onConfirm={input.onConfirm} />
|
||||
</DialogProvider>
|
||||
</ToastProvider>
|
||||
</ThemeProvider>
|
||||
</KVProvider>
|
||||
</TuiConfigProvider>
|
||||
</OpencodeKeymapProvider>
|
||||
)
|
||||
}
|
||||
|
||||
const app = await testRender(() => <Harness />, { kittyKeyboard: true })
|
||||
return {
|
||||
app,
|
||||
async cleanup() {
|
||||
app.renderer.destroy()
|
||||
Global.Path.config = previous.config
|
||||
Global.Path.state = previous.state
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
test("dialog prompt submit wins when return is also input newline", async () => {
|
||||
await using tmp = await tmpdir()
|
||||
const confirmed: string[] = []
|
||||
const prompt = await mountPrompt({
|
||||
root: tmp.path,
|
||||
keybinds: {
|
||||
input_submit: "super+return",
|
||||
input_newline: "return,shift+return,alt+return,ctrl+j",
|
||||
},
|
||||
onConfirm: (value) => confirmed.push(value),
|
||||
})
|
||||
|
||||
try {
|
||||
await wait(() => prompt.app.renderer.currentFocusedEditor instanceof TextareaRenderable)
|
||||
const textarea = prompt.app.renderer.currentFocusedEditor
|
||||
if (!(textarea instanceof TextareaRenderable)) throw new Error("expected focused dialog textarea")
|
||||
|
||||
prompt.app.mockInput.pressEnter()
|
||||
|
||||
expect(confirmed).toEqual(["draft"])
|
||||
expect(textarea.plainText).toBe("draft")
|
||||
} finally {
|
||||
await prompt.cleanup()
|
||||
}
|
||||
})
|
||||
|
||||
test("dialog prompt submit can be rebound separately from input submit", async () => {
|
||||
await using tmp = await tmpdir()
|
||||
const confirmed: string[] = []
|
||||
const prompt = await mountPrompt({
|
||||
root: tmp.path,
|
||||
keybinds: {
|
||||
input_submit: "return",
|
||||
"dialog.prompt.submit": "ctrl+y",
|
||||
},
|
||||
onConfirm: (value) => confirmed.push(value),
|
||||
})
|
||||
|
||||
try {
|
||||
await wait(() => prompt.app.renderer.currentFocusedEditor instanceof TextareaRenderable)
|
||||
const textarea = prompt.app.renderer.currentFocusedEditor
|
||||
if (!(textarea instanceof TextareaRenderable)) throw new Error("expected focused dialog textarea")
|
||||
|
||||
prompt.app.mockInput.pressEnter()
|
||||
expect(confirmed).toEqual([])
|
||||
expect(textarea.plainText).toBe("draft")
|
||||
|
||||
prompt.app.mockInput.pressKey("y", { ctrl: true })
|
||||
|
||||
expect(confirmed).toEqual(["draft"])
|
||||
} finally {
|
||||
await prompt.cleanup()
|
||||
}
|
||||
})
|
||||
@@ -470,6 +470,7 @@ it.instance("resolves keybind lookup from canonical keybinds", () =>
|
||||
which_key_toggle: "alt+k",
|
||||
editor_open: "ctrl+e",
|
||||
"prompt.autocomplete.next": "ctrl+j",
|
||||
"dialog.prompt.submit": "ctrl+s",
|
||||
"dialog.mcp.toggle": "ctrl+t",
|
||||
model_favorite_toggle: "ctrl+f",
|
||||
"dialog.plugins.install": "shift+i",
|
||||
@@ -491,6 +492,7 @@ it.instance("resolves keybind lookup from canonical keybinds", () =>
|
||||
)
|
||||
expect(config.keybinds.get("prompt.editor")?.[0]?.key).toBe("ctrl+e")
|
||||
expect(config.keybinds.get("prompt.autocomplete.next")?.[0]?.key).toBe("ctrl+j")
|
||||
expect(config.keybinds.get("dialog.prompt.submit")?.[0]?.key).toBe("ctrl+s")
|
||||
expect(config.keybinds.get("dialog.mcp.toggle")?.[0]?.key).toBe("ctrl+t")
|
||||
expect(config.keybinds.get("model.dialog.favorite")?.[0]?.key).toBe("ctrl+f")
|
||||
expect(config.keybinds.get("dialog.plugins.install")?.[0]?.key).toBe("shift+i")
|
||||
|
||||
@@ -62,6 +62,7 @@ describe("RuntimeFlags", () => {
|
||||
expect(flags.experimentalEventSystem).toBe(true)
|
||||
expect(flags.experimentalWorkspaces).toBe(true)
|
||||
expect(flags.experimentalIconDiscovery).toBe(true)
|
||||
expect(flags.experimentalNativeLlm).toBe(false)
|
||||
expect(flags.client).toBe("desktop")
|
||||
}),
|
||||
)
|
||||
@@ -80,6 +81,17 @@ describe("RuntimeFlags", () => {
|
||||
}),
|
||||
)
|
||||
|
||||
it.effect("requires explicit native LLM opt-in", () =>
|
||||
Effect.gen(function* () {
|
||||
const explicit = yield* readFlags.pipe(Effect.provide(fromConfig({ OPENCODE_EXPERIMENTAL_NATIVE_LLM: "true" })))
|
||||
const legacy = yield* readFlags.pipe(Effect.provide(fromConfig({ OPENCODE_LLM_RUNTIME: "native" })))
|
||||
|
||||
expect(explicit.experimentalNativeLlm).toBe(true)
|
||||
expect(legacy.experimentalNativeLlm).toBe(true)
|
||||
}),
|
||||
)
|
||||
|
||||
|
||||
it.effect("layer accepts partial test overrides and fills defaults from Config definitions", () =>
|
||||
Effect.gen(function* () {
|
||||
const flags = yield* readFlags.pipe(
|
||||
|
||||
31
packages/opencode/test/fixtures/recordings/session/native-openai-tool-call.json
vendored
Normal file
31
packages/opencode/test/fixtures/recordings/session/native-openai-tool-call.json
vendored
Normal file
File diff suppressed because one or more lines are too long
31
packages/opencode/test/fixtures/recordings/session/native-zen-tool-call.json
vendored
Normal file
31
packages/opencode/test/fixtures/recordings/session/native-zen-tool-call.json
vendored
Normal file
File diff suppressed because one or more lines are too long
@@ -1,6 +1,7 @@
|
||||
import { NodeHttpServer, NodeServices } from "@effect/platform-node"
|
||||
import { NamedError } from "@opencode-ai/core/util/error"
|
||||
import { describe, expect } from "bun:test"
|
||||
import { ConfigError } from "../../src/config/error"
|
||||
import { Effect, Layer } from "effect"
|
||||
import { HttpClient, HttpClientRequest, HttpRouter } from "effect/unstable/http"
|
||||
import { errorLayer } from "../../src/server/routes/instance/httpapi/middleware/error"
|
||||
@@ -50,6 +51,27 @@ describe("HttpApi error middleware", () => {
|
||||
}),
|
||||
)
|
||||
|
||||
it.live("preserves config defects as client-visible bad requests", () =>
|
||||
Effect.gen(function* () {
|
||||
const configError = new ConfigError.InvalidError({
|
||||
path: "/tmp/opencode.json",
|
||||
issues: [{ message: "Expected object", path: ["provider", "anthropic", "options"] }],
|
||||
})
|
||||
|
||||
yield* HttpRouter.add("GET", "/config-error", Effect.die(configError)).pipe(
|
||||
Layer.provide(errorLayer),
|
||||
HttpRouter.serve,
|
||||
Layer.build,
|
||||
)
|
||||
|
||||
const response = yield* HttpClientRequest.get("/config-error").pipe(HttpClient.execute)
|
||||
const body = yield* response.json
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(JSON.stringify(body)).toBe(JSON.stringify(configError.toObject()))
|
||||
}),
|
||||
)
|
||||
|
||||
it.live("does not map storage not-found defects to 404", () =>
|
||||
Effect.gen(function* () {
|
||||
yield* HttpRouter.add(
|
||||
|
||||
@@ -30,6 +30,7 @@ import { TestConfig } from "../fixture/config"
|
||||
import { SyncEvent } from "@/sync"
|
||||
import { RuntimeFlags } from "@/effect/runtime-flags"
|
||||
import { EventV2Bridge } from "@/event-v2-bridge"
|
||||
import { LLMEvent, Usage } from "@opencode-ai/llm"
|
||||
|
||||
void Log.init({ print: false })
|
||||
|
||||
@@ -47,6 +48,10 @@ const ref = {
|
||||
modelID: ModelID.make("test-model"),
|
||||
}
|
||||
|
||||
const usage = (input: ConstructorParameters<typeof Usage>[0]) => new Usage(input)
|
||||
|
||||
const basicUsage = () => usage({ inputTokens: 1, outputTokens: 1, totalTokens: 2 })
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
})
|
||||
@@ -296,11 +301,11 @@ function readCompactionPart(sessionID: SessionID) {
|
||||
|
||||
function llm() {
|
||||
const queue: Array<
|
||||
Stream.Stream<LLM.Event, unknown> | ((input: LLM.StreamInput) => Stream.Stream<LLM.Event, unknown>)
|
||||
Stream.Stream<LLMEvent, unknown> | ((input: LLM.StreamInput) => Stream.Stream<LLMEvent, unknown>)
|
||||
> = []
|
||||
|
||||
return {
|
||||
push(stream: Stream.Stream<LLM.Event, unknown> | ((input: LLM.StreamInput) => Stream.Stream<LLM.Event, unknown>)) {
|
||||
push(stream: Stream.Stream<LLMEvent, unknown> | ((input: LLM.StreamInput) => Stream.Stream<LLMEvent, unknown>)) {
|
||||
queue.push(stream)
|
||||
},
|
||||
layer: Layer.succeed(
|
||||
@@ -319,54 +324,22 @@ function llm() {
|
||||
function reply(
|
||||
text: string,
|
||||
capture?: (input: LLM.StreamInput) => void,
|
||||
): (input: LLM.StreamInput) => Stream.Stream<LLM.Event, unknown> {
|
||||
): (input: LLM.StreamInput) => Stream.Stream<LLMEvent, unknown> {
|
||||
return (input) => {
|
||||
capture?.(input)
|
||||
return Stream.make(
|
||||
{ type: "start" } satisfies LLM.Event,
|
||||
{ type: "text-start", id: "txt-0" } satisfies LLM.Event,
|
||||
{ type: "text-delta", id: "txt-0", delta: text, text } as LLM.Event,
|
||||
{ type: "text-end", id: "txt-0" } satisfies LLM.Event,
|
||||
{
|
||||
type: "finish-step",
|
||||
finishReason: "stop",
|
||||
rawFinishReason: "stop",
|
||||
response: { id: "res", modelId: "test-model", timestamp: new Date() },
|
||||
providerMetadata: undefined,
|
||||
usage: {
|
||||
inputTokens: 1,
|
||||
outputTokens: 1,
|
||||
totalTokens: 2,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: undefined,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
} satisfies LLM.Event,
|
||||
{
|
||||
type: "finish",
|
||||
finishReason: "stop",
|
||||
rawFinishReason: "stop",
|
||||
totalUsage: {
|
||||
inputTokens: 1,
|
||||
outputTokens: 1,
|
||||
totalTokens: 2,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: undefined,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
} satisfies LLM.Event,
|
||||
LLMEvent.textStart({ id: "txt-0" }),
|
||||
LLMEvent.textDelta({ id: "txt-0", text }),
|
||||
LLMEvent.textEnd({ id: "txt-0" }),
|
||||
LLMEvent.stepFinish({
|
||||
index: 0,
|
||||
reason: "stop",
|
||||
usage: basicUsage(),
|
||||
}),
|
||||
LLMEvent.finish({
|
||||
reason: "stop",
|
||||
usage: basicUsage(),
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1204,7 +1177,7 @@ describe("session.compaction.process", () => {
|
||||
Stream.fromAsyncIterable(
|
||||
{
|
||||
async *[Symbol.asyncIterator]() {
|
||||
yield { type: "start" } as LLM.Event
|
||||
yield LLMEvent.stepStart({ index: 0 })
|
||||
throw new APICallError({
|
||||
message: "boom",
|
||||
url: "https://example.com/v1/chat/completions",
|
||||
@@ -1290,55 +1263,62 @@ describe("session.compaction.process", () => {
|
||||
{ git: true },
|
||||
)
|
||||
|
||||
itCompaction.instance(
|
||||
"silently drops reasoning-delta arriving without prior reasoning-start",
|
||||
() => {
|
||||
// Regression: PR initially auto-created a reasoning Part for orphan deltas (no preceding
|
||||
// reasoning-start). Reverted to match dev — drop silently. Pinned here so any future
|
||||
// change to processor.ts reasoning-delta handling triggers this test.
|
||||
const stub = llm()
|
||||
stub.push(
|
||||
Stream.make(
|
||||
LLMEvent.reasoningDelta({ id: "orphan-1", text: "stray reasoning" }),
|
||||
LLMEvent.textStart({ id: "txt-0" }),
|
||||
LLMEvent.textDelta({ id: "txt-0", text: "summary" }),
|
||||
LLMEvent.textEnd({ id: "txt-0" }),
|
||||
LLMEvent.stepFinish({ index: 0, reason: "stop", usage: basicUsage() }),
|
||||
LLMEvent.finish({ reason: "stop", usage: basicUsage() }),
|
||||
),
|
||||
)
|
||||
return Effect.gen(function* () {
|
||||
const ssn = yield* SessionNs.Service
|
||||
const session = yield* ssn.create({})
|
||||
const msg = yield* createUserMessage(session.id, "hello")
|
||||
const msgs = yield* ssn.messages({ sessionID: session.id })
|
||||
yield* SessionCompaction.use.process({
|
||||
parentID: msg.id,
|
||||
messages: msgs,
|
||||
sessionID: session.id,
|
||||
auto: false,
|
||||
})
|
||||
|
||||
const summary = (yield* ssn.messages({ sessionID: session.id })).find(
|
||||
(item) => item.info.role === "assistant" && item.info.summary,
|
||||
)
|
||||
expect(summary?.parts.some((part) => part.type === "reasoning")).toBe(false)
|
||||
// Sanity: the text part still got through.
|
||||
expect(summary?.parts.some((part) => part.type === "text" && part.text === "summary")).toBe(true)
|
||||
}).pipe(withCompaction({ llm: stub.layer }))
|
||||
},
|
||||
{ git: true },
|
||||
)
|
||||
|
||||
itCompaction.instance(
|
||||
"does not allow tool calls while generating the summary",
|
||||
() => {
|
||||
const stub = llm()
|
||||
stub.push(
|
||||
Stream.make(
|
||||
{ type: "start" } satisfies LLM.Event,
|
||||
{ type: "tool-input-start", id: "call-1", toolName: "_noop" } satisfies LLM.Event,
|
||||
{ type: "tool-call", toolCallId: "call-1", toolName: "_noop", input: {} } satisfies LLM.Event,
|
||||
{
|
||||
type: "finish-step",
|
||||
finishReason: "tool-calls",
|
||||
rawFinishReason: "tool_calls",
|
||||
response: { id: "res", modelId: "test-model", timestamp: new Date() },
|
||||
providerMetadata: undefined,
|
||||
usage: {
|
||||
inputTokens: 1,
|
||||
outputTokens: 1,
|
||||
totalTokens: 2,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: undefined,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
} satisfies LLM.Event,
|
||||
{
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
rawFinishReason: "tool_calls",
|
||||
totalUsage: {
|
||||
inputTokens: 1,
|
||||
outputTokens: 1,
|
||||
totalTokens: 2,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: undefined,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
} satisfies LLM.Event,
|
||||
LLMEvent.toolCall({ id: "call-1", name: "_noop", input: {} }),
|
||||
LLMEvent.stepFinish({
|
||||
index: 0,
|
||||
reason: "tool-calls",
|
||||
usage: basicUsage(),
|
||||
}),
|
||||
LLMEvent.finish({
|
||||
reason: "tool-calls",
|
||||
usage: basicUsage(),
|
||||
}),
|
||||
),
|
||||
)
|
||||
return Effect.gen(function* () {
|
||||
@@ -1544,20 +1524,7 @@ describe("SessionNs.getUsage", () => {
|
||||
const model = createModel({ context: 100_000, output: 32_000 })
|
||||
const result = SessionNs.getUsage({
|
||||
model,
|
||||
usage: {
|
||||
inputTokens: 1000,
|
||||
outputTokens: 500,
|
||||
totalTokens: 1500,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: undefined,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
usage: usage({ inputTokens: 1000, outputTokens: 500, totalTokens: 1500 }),
|
||||
})
|
||||
|
||||
expect(result.tokens.input).toBe(1000)
|
||||
@@ -1571,20 +1538,7 @@ describe("SessionNs.getUsage", () => {
|
||||
const model = createModel({ context: 100_000, output: 32_000 })
|
||||
const result = SessionNs.getUsage({
|
||||
model,
|
||||
usage: {
|
||||
inputTokens: 1000,
|
||||
outputTokens: 500,
|
||||
totalTokens: 1500,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: 800,
|
||||
cacheReadTokens: 200,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
usage: usage({ inputTokens: 1000, outputTokens: 500, totalTokens: 1500, cacheReadInputTokens: 200 }),
|
||||
})
|
||||
|
||||
expect(result.tokens.input).toBe(800)
|
||||
@@ -1595,20 +1549,7 @@ describe("SessionNs.getUsage", () => {
|
||||
const model = createModel({ context: 100_000, output: 32_000 })
|
||||
const result = SessionNs.getUsage({
|
||||
model,
|
||||
usage: {
|
||||
inputTokens: 1000,
|
||||
outputTokens: 500,
|
||||
totalTokens: 1500,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: undefined,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
usage: usage({ inputTokens: 1000, outputTokens: 500, totalTokens: 1500 }),
|
||||
metadata: {
|
||||
anthropic: {
|
||||
cacheCreationInputTokens: 300,
|
||||
@@ -1624,20 +1565,7 @@ describe("SessionNs.getUsage", () => {
|
||||
// AI SDK v6 normalizes inputTokens to include cached tokens for all providers
|
||||
const result = SessionNs.getUsage({
|
||||
model,
|
||||
usage: {
|
||||
inputTokens: 1000,
|
||||
outputTokens: 500,
|
||||
totalTokens: 1500,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: 800,
|
||||
cacheReadTokens: 200,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
usage: usage({ inputTokens: 1000, outputTokens: 500, totalTokens: 1500, cacheReadInputTokens: 200 }),
|
||||
metadata: {
|
||||
anthropic: {},
|
||||
},
|
||||
@@ -1651,20 +1579,7 @@ describe("SessionNs.getUsage", () => {
|
||||
const model = createModel({ context: 100_000, output: 32_000 })
|
||||
const result = SessionNs.getUsage({
|
||||
model,
|
||||
usage: {
|
||||
inputTokens: 1000,
|
||||
outputTokens: 500,
|
||||
totalTokens: 1500,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: undefined,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: 400,
|
||||
reasoningTokens: 100,
|
||||
},
|
||||
},
|
||||
usage: usage({ inputTokens: 1000, outputTokens: 500, reasoningTokens: 100, totalTokens: 1500 }),
|
||||
})
|
||||
|
||||
expect(result.tokens.input).toBe(1000)
|
||||
@@ -1685,20 +1600,7 @@ describe("SessionNs.getUsage", () => {
|
||||
})
|
||||
const result = SessionNs.getUsage({
|
||||
model,
|
||||
usage: {
|
||||
inputTokens: 0,
|
||||
outputTokens: 1_000_000,
|
||||
totalTokens: 1_000_000,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: undefined,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: 750_000,
|
||||
reasoningTokens: 250_000,
|
||||
},
|
||||
},
|
||||
usage: usage({ inputTokens: 0, outputTokens: 1_000_000, reasoningTokens: 250_000, totalTokens: 1_000_000 }),
|
||||
})
|
||||
|
||||
expect(result.tokens.output).toBe(750_000)
|
||||
@@ -1710,20 +1612,7 @@ describe("SessionNs.getUsage", () => {
|
||||
const model = createModel({ context: 100_000, output: 32_000 })
|
||||
const result = SessionNs.getUsage({
|
||||
model,
|
||||
usage: {
|
||||
inputTokens: 0,
|
||||
outputTokens: 0,
|
||||
totalTokens: 0,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: undefined,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
usage: usage({ inputTokens: 0, outputTokens: 0, totalTokens: 0 }),
|
||||
})
|
||||
|
||||
expect(result.tokens.input).toBe(0)
|
||||
@@ -1746,20 +1635,7 @@ describe("SessionNs.getUsage", () => {
|
||||
})
|
||||
const result = SessionNs.getUsage({
|
||||
model,
|
||||
usage: {
|
||||
inputTokens: 1_000_000,
|
||||
outputTokens: 100_000,
|
||||
totalTokens: 1_100_000,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: undefined,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
usage: usage({ inputTokens: 1_000_000, outputTokens: 100_000, totalTokens: 1_100_000 }),
|
||||
})
|
||||
|
||||
expect(result.cost).toBe(3 + 1.5)
|
||||
@@ -1796,20 +1672,12 @@ describe("SessionNs.getUsage", () => {
|
||||
})
|
||||
const result = SessionNs.getUsage({
|
||||
model,
|
||||
usage: {
|
||||
usage: usage({
|
||||
inputTokens: 650_000,
|
||||
outputTokens: 100_000,
|
||||
totalTokens: 750_000,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: 100_000,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
cacheReadInputTokens: 100_000,
|
||||
}),
|
||||
})
|
||||
|
||||
expect(result.tokens.input).toBe(550_000)
|
||||
@@ -1841,20 +1709,7 @@ describe("SessionNs.getUsage", () => {
|
||||
})
|
||||
const result = SessionNs.getUsage({
|
||||
model,
|
||||
usage: {
|
||||
inputTokens: 300_000,
|
||||
outputTokens: 100_000,
|
||||
totalTokens: 400_000,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: undefined,
|
||||
cacheReadTokens: undefined,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
usage: usage({ inputTokens: 300_000, outputTokens: 100_000, totalTokens: 400_000 }),
|
||||
})
|
||||
|
||||
expect(result.cost).toBe(0.9 + 0.4)
|
||||
@@ -1865,24 +1720,16 @@ describe("SessionNs.getUsage", () => {
|
||||
(npm) => {
|
||||
const model = createModel({ context: 100_000, output: 32_000, npm })
|
||||
// AI SDK v6: inputTokens includes cached tokens for all providers
|
||||
const usage = {
|
||||
const item = usage({
|
||||
inputTokens: 1000,
|
||||
outputTokens: 500,
|
||||
totalTokens: 1500,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: 800,
|
||||
cacheReadTokens: 200,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
}
|
||||
cacheReadInputTokens: 200,
|
||||
})
|
||||
if (npm === "@ai-sdk/amazon-bedrock") {
|
||||
const result = SessionNs.getUsage({
|
||||
model,
|
||||
usage,
|
||||
usage: item,
|
||||
metadata: {
|
||||
bedrock: {
|
||||
usage: {
|
||||
@@ -1903,7 +1750,7 @@ describe("SessionNs.getUsage", () => {
|
||||
|
||||
const result = SessionNs.getUsage({
|
||||
model,
|
||||
usage,
|
||||
usage: item,
|
||||
metadata: {
|
||||
anthropic: {
|
||||
cacheCreationInputTokens: 300,
|
||||
@@ -1924,20 +1771,7 @@ describe("SessionNs.getUsage", () => {
|
||||
const model = createModel({ context: 100_000, output: 32_000, npm: "@ai-sdk/google-vertex/anthropic" })
|
||||
const result = SessionNs.getUsage({
|
||||
model,
|
||||
usage: {
|
||||
inputTokens: 1000,
|
||||
outputTokens: 500,
|
||||
totalTokens: 1500,
|
||||
inputTokenDetails: {
|
||||
noCacheTokens: 800,
|
||||
cacheReadTokens: 200,
|
||||
cacheWriteTokens: undefined,
|
||||
},
|
||||
outputTokenDetails: {
|
||||
textTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
},
|
||||
},
|
||||
usage: usage({ inputTokens: 1000, outputTokens: 500, totalTokens: 1500, cacheReadInputTokens: 200 }),
|
||||
metadata: {
|
||||
vertex: {
|
||||
cacheCreationInputTokens: 300,
|
||||
|
||||
283
packages/opencode/test/session/llm-native-recorded.test.ts
Normal file
283
packages/opencode/test/session/llm-native-recorded.test.ts
Normal file
@@ -0,0 +1,283 @@
|
||||
import { NodeFileSystem } from "@effect/platform-node"
|
||||
import { HttpRecorder, Redactor } from "@opencode-ai/http-recorder"
|
||||
import { describe, expect } from "bun:test"
|
||||
import { tool } from "ai"
|
||||
import { Effect, Layer, Stream } from "effect"
|
||||
import { FetchHttpClient } from "effect/unstable/http"
|
||||
import path from "node:path"
|
||||
import z from "zod"
|
||||
import { Auth } from "@/auth"
|
||||
import { Config } from "@/config/config"
|
||||
import { Plugin } from "@/plugin"
|
||||
import { Provider } from "@/provider/provider"
|
||||
import { ModelID, ProviderID } from "@/provider/schema"
|
||||
import { Filesystem } from "@/util/filesystem"
|
||||
import { LLMClient, RequestExecutor } from "@opencode-ai/llm/route"
|
||||
import { RuntimeFlags } from "@/effect/runtime-flags"
|
||||
import type { Agent } from "../../src/agent/agent"
|
||||
import { LLM } from "../../src/session/llm"
|
||||
import { MessageV2 } from "../../src/session/message-v2"
|
||||
import { MessageID, SessionID } from "../../src/session/schema"
|
||||
import type { ModelsDev } from "@opencode-ai/core/models"
|
||||
import { TestInstance } from "../fixture/fixture"
|
||||
import { testEffect } from "../lib/effect"
|
||||
|
||||
const OPENAI_CASSETTE = "session/native-openai-tool-call"
|
||||
const ZEN_CASSETTE = "session/native-zen-tool-call"
|
||||
const FIXTURES_DIR = path.join(import.meta.dir, "../fixtures/recordings")
|
||||
const OPENAI_API_KEY = process.env.OPENCODE_RECORD_OPENAI_API_KEY ?? process.env.OPENAI_API_KEY
|
||||
const CONSOLE_TOKEN = process.env.OPENCODE_RECORD_CONSOLE_TOKEN
|
||||
const ZEN_ORG_ID = process.env.OPENCODE_RECORD_ZEN_ORG_ID
|
||||
const ZEN_API_URL =
|
||||
process.env.OPENCODE_RECORD_ZEN_API_URL ?? "https://console.opencode.ai/proxy/connections/fixture/v1"
|
||||
|
||||
const shouldRecord = process.env.RECORD === "true"
|
||||
const canRunOpenAI = shouldRecord
|
||||
? Boolean(OPENAI_API_KEY)
|
||||
: HttpRecorder.hasCassetteSync(OPENAI_CASSETTE, { directory: FIXTURES_DIR })
|
||||
const canRunZen = shouldRecord
|
||||
? Boolean(CONSOLE_TOKEN && ZEN_ORG_ID)
|
||||
: HttpRecorder.hasCassetteSync(ZEN_CASSETTE, { directory: FIXTURES_DIR })
|
||||
|
||||
async function loadFixture(providerID: string, modelID: string) {
|
||||
const data = await Filesystem.readJson<Record<string, ModelsDev.Provider>>(
|
||||
path.join(import.meta.dir, "../tool/fixtures/models-api.json"),
|
||||
)
|
||||
const provider = data[providerID]
|
||||
if (!provider) throw new Error(`Missing provider in fixture: ${providerID}`)
|
||||
const model = provider.models[modelID]
|
||||
if (!model) throw new Error(`Missing model in fixture: ${modelID}`)
|
||||
return model
|
||||
}
|
||||
|
||||
const openAIConfig = (model: ModelsDev.Provider["models"][string]): Partial<Config.Info> => ({
|
||||
enabled_providers: ["openai"],
|
||||
provider: {
|
||||
openai: {
|
||||
name: "OpenAI",
|
||||
env: ["OPENAI_API_KEY"],
|
||||
npm: "@ai-sdk/openai",
|
||||
api: "https://api.openai.com/v1",
|
||||
models: {
|
||||
[model.id]: JSON.parse(JSON.stringify(model)) as NonNullable<
|
||||
NonNullable<Config.Info["provider"]>[string]["models"]
|
||||
>[string],
|
||||
},
|
||||
options: {
|
||||
apiKey: OPENAI_API_KEY ?? "fixture-openai-key",
|
||||
baseURL: "https://api.openai.com/v1",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
const zenConfig = (model: ModelsDev.Provider["models"][string]): Partial<Config.Info> => ({
|
||||
enabled_providers: ["opencode"],
|
||||
provider: {
|
||||
opencode: {
|
||||
name: "OpenCode Zen",
|
||||
env: ["OPENCODE_CONSOLE_TOKEN"],
|
||||
npm: "@ai-sdk/openai-compatible",
|
||||
api: ZEN_API_URL,
|
||||
models: {
|
||||
[model.id]: JSON.parse(JSON.stringify(model)) as NonNullable<
|
||||
NonNullable<Config.Info["provider"]>[string]["models"]
|
||||
>[string],
|
||||
},
|
||||
options: {
|
||||
apiKey: CONSOLE_TOKEN ?? "fixture-console-token",
|
||||
headers: {
|
||||
"x-org-id": ZEN_ORG_ID ?? "fixture-org",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
function recordedNativeLLMLayer(cassette: string, metadata: Record<string, unknown>) {
|
||||
const cassetteService = HttpRecorder.Cassette.fileSystem({ directory: FIXTURES_DIR }).pipe(
|
||||
Layer.provide(NodeFileSystem.layer),
|
||||
)
|
||||
// Only the HTTP client is recorded; RequestExecutor and the opencode LLM stack remain real.
|
||||
const recorder = HttpRecorder.recordingLayer(cassette, {
|
||||
mode: shouldRecord ? "record" : "replay",
|
||||
metadata,
|
||||
redactor: Redactor.compose(
|
||||
Redactor.defaults({
|
||||
url: {
|
||||
transform: (url) => url.replace(/\/proxy\/connections\/[^/]+\/v1/, "/proxy/connections/{connection}/v1"),
|
||||
},
|
||||
}),
|
||||
{
|
||||
response: (snapshot) => ({ ...snapshot, body: snapshot.body.replace(/wrk_[A-Z0-9]+/g, "wrk_redacted") }),
|
||||
},
|
||||
),
|
||||
}).pipe(Layer.provide(FetchHttpClient.layer))
|
||||
const executor = RequestExecutor.layer.pipe(Layer.provide(recorder))
|
||||
const client = LLMClient.layer.pipe(Layer.provide(executor))
|
||||
|
||||
const providerLayer = Provider.defaultLayer.pipe(
|
||||
Layer.provide(Auth.defaultLayer),
|
||||
Layer.provide(Config.defaultLayer),
|
||||
Layer.provide(Plugin.defaultLayer),
|
||||
)
|
||||
const llmLayer = LLM.layer.pipe(
|
||||
Layer.provide(Auth.defaultLayer),
|
||||
Layer.provide(Config.defaultLayer),
|
||||
Layer.provide(Provider.defaultLayer),
|
||||
Layer.provide(Plugin.defaultLayer),
|
||||
Layer.provide(client),
|
||||
Layer.provide(cassetteService),
|
||||
Layer.provide(RuntimeFlags.layer({ experimentalNativeLlm: true })),
|
||||
)
|
||||
|
||||
return Layer.mergeAll(providerLayer, llmLayer)
|
||||
}
|
||||
|
||||
const openAIIt = testEffect(
|
||||
recordedNativeLLMLayer(OPENAI_CASSETTE, {
|
||||
provider: "openai",
|
||||
protocol: "openai-responses",
|
||||
route: "openai-responses",
|
||||
tags: ["opencode", "native", "tool-call"],
|
||||
}),
|
||||
)
|
||||
const zenIt = testEffect(
|
||||
recordedNativeLLMLayer(ZEN_CASSETTE, {
|
||||
provider: "opencode",
|
||||
protocol: "openai-responses",
|
||||
route: "openai-responses",
|
||||
tags: ["opencode", "zen", "native", "tool-call"],
|
||||
}),
|
||||
)
|
||||
const recordedOpenAIInstance = canRunOpenAI ? openAIIt.instance : openAIIt.instance.skip
|
||||
const recordedZenInstance = canRunZen ? zenIt.instance : zenIt.instance.skip
|
||||
|
||||
const writeConfig = (
|
||||
directory: string,
|
||||
model: ModelsDev.Provider["models"][string],
|
||||
config: (model: ModelsDev.Provider["models"][string]) => Partial<Config.Info> = openAIConfig,
|
||||
) =>
|
||||
Effect.promise(() =>
|
||||
Bun.write(
|
||||
path.join(directory, "opencode.json"),
|
||||
JSON.stringify({ $schema: "https://opencode.ai/config.json", ...config(model) }),
|
||||
),
|
||||
)
|
||||
|
||||
const getModel = (providerID: ProviderID, modelID: ModelID) =>
|
||||
Effect.gen(function* () {
|
||||
const provider = yield* Provider.Service
|
||||
return yield* provider.getModel(providerID, modelID)
|
||||
})
|
||||
|
||||
const collect = (input: LLM.StreamInput) =>
|
||||
Effect.gen(function* () {
|
||||
const llm = yield* LLM.Service
|
||||
return Array.from(yield* llm.stream(input).pipe(Stream.runCollect))
|
||||
})
|
||||
|
||||
describe("session.llm native recorded", () => {
|
||||
recordedOpenAIInstance("uses real RequestExecutor with HTTP recorder for native OpenAI tools", () =>
|
||||
Effect.gen(function* () {
|
||||
const test = yield* TestInstance
|
||||
const model = yield* Effect.promise(() => loadFixture("openai", "gpt-4.1-mini"))
|
||||
yield* writeConfig(test.directory, model)
|
||||
|
||||
const sessionID = SessionID.make("session-recorded-native-tool")
|
||||
const agent = {
|
||||
name: "test",
|
||||
mode: "primary",
|
||||
prompt: "Call tools exactly as instructed.",
|
||||
options: {},
|
||||
permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
||||
temperature: 0,
|
||||
} satisfies Agent.Info
|
||||
const resolved = yield* getModel(ProviderID.openai, ModelID.make(model.id))
|
||||
let executed: unknown
|
||||
|
||||
const events = yield* collect({
|
||||
user: {
|
||||
id: MessageID.make("msg_user-recorded-native-tool"),
|
||||
sessionID,
|
||||
role: "user",
|
||||
time: { created: 0 },
|
||||
agent: agent.name,
|
||||
model: { providerID: ProviderID.make("openai"), modelID: ModelID.make(model.id) },
|
||||
} satisfies MessageV2.User,
|
||||
sessionID,
|
||||
model: resolved,
|
||||
agent,
|
||||
system: ["You must call the lookup tool exactly once with query weather. Do not answer in text."],
|
||||
messages: [{ role: "user", content: "Use lookup." }],
|
||||
toolChoice: "required",
|
||||
tools: {
|
||||
lookup: tool({
|
||||
description: "Lookup data.",
|
||||
inputSchema: z.object({ query: z.string() }),
|
||||
execute: async (args, options) => {
|
||||
executed = { args, toolCallId: options.toolCallId }
|
||||
return { output: "looked up" }
|
||||
},
|
||||
}),
|
||||
},
|
||||
})
|
||||
|
||||
expect(events.filter((event) => event.type === "step-finish")).toHaveLength(1)
|
||||
expect(events.filter((event) => event.type === "finish")).toHaveLength(1)
|
||||
expect(events.some((event) => event.type === "tool-result")).toBe(true)
|
||||
expect(executed).toMatchObject({ args: { query: "weather" }, toolCallId: expect.any(String) })
|
||||
}),
|
||||
)
|
||||
|
||||
recordedZenInstance("uses console-managed Zen config with native OpenAI-compatible tools", () =>
|
||||
Effect.gen(function* () {
|
||||
const test = yield* TestInstance
|
||||
const model = yield* Effect.promise(() => loadFixture("opencode", "gpt-5.2-codex"))
|
||||
yield* writeConfig(test.directory, model, zenConfig)
|
||||
|
||||
const sessionID = SessionID.make("session-recorded-native-zen-tool")
|
||||
const agent = {
|
||||
name: "test",
|
||||
mode: "primary",
|
||||
prompt: "Call tools exactly as instructed.",
|
||||
options: {},
|
||||
permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
||||
} satisfies Agent.Info
|
||||
const resolved = yield* getModel(ProviderID.opencode, ModelID.make(model.id))
|
||||
let executed: unknown
|
||||
|
||||
const events = yield* collect({
|
||||
user: {
|
||||
id: MessageID.make("msg_user-recorded-native-zen-tool"),
|
||||
sessionID,
|
||||
role: "user",
|
||||
time: { created: 0 },
|
||||
agent: agent.name,
|
||||
model: { providerID: ProviderID.opencode, modelID: ModelID.make(model.id) },
|
||||
} satisfies MessageV2.User,
|
||||
sessionID,
|
||||
model: resolved,
|
||||
agent,
|
||||
system: ["You must call the lookup tool exactly once with query weather. Do not answer in text."],
|
||||
messages: [{ role: "user", content: "Use lookup." }],
|
||||
toolChoice: "required",
|
||||
tools: {
|
||||
lookup: tool({
|
||||
description: "Lookup data.",
|
||||
inputSchema: z.object({ query: z.string() }),
|
||||
execute: async (args, options) => {
|
||||
executed = { args, toolCallId: options.toolCallId }
|
||||
return { output: "looked up" }
|
||||
},
|
||||
}),
|
||||
},
|
||||
})
|
||||
|
||||
expect(events.filter((event) => event.type === "step-finish")).toHaveLength(1)
|
||||
expect(events.filter((event) => event.type === "finish")).toHaveLength(1)
|
||||
expect(events.some((event) => event.type === "tool-result")).toBe(true)
|
||||
expect(executed).toMatchObject({ args: { query: "weather" }, toolCallId: expect.any(String) })
|
||||
}),
|
||||
)
|
||||
})
|
||||
303
packages/opencode/test/session/llm-native.test.ts
Normal file
303
packages/opencode/test/session/llm-native.test.ts
Normal file
@@ -0,0 +1,303 @@
|
||||
import { describe, expect, test } from "bun:test"
|
||||
import { LLMClient, RequestExecutor } from "@opencode-ai/llm/route"
|
||||
import { jsonSchema, tool, type ModelMessage } from "ai"
|
||||
import { Effect } from "effect"
|
||||
import { LLMNative } from "@/session/llm/native-request"
|
||||
import { LLMNativeRuntime } from "@/session/llm/native-runtime"
|
||||
import type { Provider } from "@/provider/provider"
|
||||
import { ModelID, ProviderID } from "@/provider/schema"
|
||||
|
||||
const baseModel: Provider.Model = {
|
||||
id: ModelID.make("gpt-5-mini"),
|
||||
providerID: ProviderID.make("openai"),
|
||||
api: {
|
||||
id: "gpt-5-mini",
|
||||
url: "https://api.openai.com/v1",
|
||||
npm: "@ai-sdk/openai",
|
||||
},
|
||||
name: "GPT-5 Mini",
|
||||
capabilities: {
|
||||
temperature: true,
|
||||
reasoning: true,
|
||||
attachment: true,
|
||||
toolcall: true,
|
||||
input: {
|
||||
text: true,
|
||||
audio: false,
|
||||
image: true,
|
||||
video: false,
|
||||
pdf: false,
|
||||
},
|
||||
output: {
|
||||
text: true,
|
||||
audio: false,
|
||||
image: false,
|
||||
video: false,
|
||||
pdf: false,
|
||||
},
|
||||
interleaved: false,
|
||||
},
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cache: {
|
||||
read: 0,
|
||||
write: 0,
|
||||
},
|
||||
},
|
||||
limit: {
|
||||
context: 128_000,
|
||||
input: 128_000,
|
||||
output: 32_000,
|
||||
},
|
||||
status: "active",
|
||||
options: {},
|
||||
headers: {
|
||||
"x-model": "model-header",
|
||||
},
|
||||
release_date: "2026-01-01",
|
||||
}
|
||||
|
||||
const providerInfo: Provider.Info = {
|
||||
id: ProviderID.make("openai"),
|
||||
name: "OpenAI",
|
||||
source: "config",
|
||||
env: ["OPENAI_API_KEY"],
|
||||
options: { apiKey: "test-openai-key" },
|
||||
models: {},
|
||||
}
|
||||
|
||||
describe("session.llm-native.request", () => {
|
||||
test("maps normalized stream inputs to a native LLM request", () => {
|
||||
const messages: ModelMessage[] = [
|
||||
{
|
||||
role: "system",
|
||||
content: "system from messages",
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "hello", providerOptions: { openai: { cacheControl: { type: "ephemeral" } } } },
|
||||
{ type: "file", mediaType: "image/png", filename: "img.png", data: "data:image/png;base64,Zm9v" },
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "reasoning", text: "thinking", providerOptions: { openai: { encryptedContent: "secret" } } },
|
||||
{ type: "text", text: "I'll run it" },
|
||||
{
|
||||
type: "tool-call",
|
||||
toolCallId: "call-1",
|
||||
toolName: "bash",
|
||||
input: { command: "ls" },
|
||||
providerOptions: { openai: { itemId: "item-1" } },
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "tool",
|
||||
content: [
|
||||
{
|
||||
type: "tool-result",
|
||||
toolCallId: "call-1",
|
||||
toolName: "bash",
|
||||
output: { type: "text", value: "ok" },
|
||||
providerOptions: { openai: { outputId: "output-1" } },
|
||||
},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
const request = LLMNative.request({
|
||||
model: baseModel,
|
||||
system: ["agent system"],
|
||||
messages,
|
||||
tools: {
|
||||
bash: tool({
|
||||
description: "Run a shell command",
|
||||
inputSchema: jsonSchema({
|
||||
type: "object",
|
||||
properties: {
|
||||
command: { type: "string" },
|
||||
},
|
||||
required: ["command"],
|
||||
}),
|
||||
}),
|
||||
},
|
||||
toolChoice: "required",
|
||||
temperature: 0.2,
|
||||
topP: 0.9,
|
||||
topK: 40,
|
||||
maxOutputTokens: 1024,
|
||||
providerOptions: { openai: { store: false } },
|
||||
headers: { "x-request": "request-header" },
|
||||
})
|
||||
|
||||
expect(request.model).toMatchObject({
|
||||
id: "gpt-5-mini",
|
||||
provider: "openai",
|
||||
route: "openai-responses",
|
||||
baseURL: "https://api.openai.com/v1",
|
||||
headers: {
|
||||
"x-model": "model-header",
|
||||
"x-request": "request-header",
|
||||
},
|
||||
limits: {
|
||||
context: 128_000,
|
||||
output: 32_000,
|
||||
},
|
||||
})
|
||||
expect(request.system).toEqual([
|
||||
{ type: "text", text: "agent system" },
|
||||
{ type: "text", text: "system from messages" },
|
||||
])
|
||||
expect(request.generation).toMatchObject({
|
||||
temperature: 0.2,
|
||||
topP: 0.9,
|
||||
topK: 40,
|
||||
maxTokens: 1024,
|
||||
})
|
||||
expect(request.providerOptions).toEqual({ openai: { store: false } })
|
||||
expect(request.toolChoice).toMatchObject({ type: "required" })
|
||||
expect(request.tools).toMatchObject([
|
||||
{
|
||||
name: "bash",
|
||||
description: "Run a shell command",
|
||||
inputSchema: {
|
||||
type: "object",
|
||||
properties: {
|
||||
command: { type: "string" },
|
||||
},
|
||||
required: ["command"],
|
||||
},
|
||||
},
|
||||
])
|
||||
expect(request.messages).toMatchObject([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "hello", providerMetadata: { openai: { cacheControl: { type: "ephemeral" } } } },
|
||||
{ type: "media", mediaType: "image/png", filename: "img.png", data: "data:image/png;base64,Zm9v" },
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "reasoning", text: "thinking", providerMetadata: { openai: { encryptedContent: "secret" } } },
|
||||
{ type: "text", text: "I'll run it" },
|
||||
{
|
||||
type: "tool-call",
|
||||
id: "call-1",
|
||||
name: "bash",
|
||||
input: { command: "ls" },
|
||||
providerMetadata: { openai: { itemId: "item-1" } },
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "tool",
|
||||
content: [
|
||||
{
|
||||
type: "tool-result",
|
||||
id: "call-1",
|
||||
name: "bash",
|
||||
result: { type: "text", value: "ok" },
|
||||
providerMetadata: { openai: { outputId: "output-1" } },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("selects native routes from existing provider packages", () => {
|
||||
expect(
|
||||
LLMNative.model({ ...baseModel, api: { ...baseModel.api, url: "", npm: "@ai-sdk/anthropic" } }),
|
||||
).toMatchObject({
|
||||
route: "anthropic-messages",
|
||||
baseURL: "https://api.anthropic.com/v1",
|
||||
})
|
||||
expect(LLMNative.model({ ...baseModel, api: { ...baseModel.api, url: "", npm: "@ai-sdk/google" } })).toMatchObject({
|
||||
route: "gemini",
|
||||
baseURL: "https://generativelanguage.googleapis.com/v1beta",
|
||||
})
|
||||
expect(
|
||||
LLMNative.model({ ...baseModel, api: { ...baseModel.api, npm: "@ai-sdk/openai-compatible" } }),
|
||||
).toMatchObject({
|
||||
route: "openai-compatible-chat",
|
||||
baseURL: "https://api.openai.com/v1",
|
||||
})
|
||||
expect(
|
||||
LLMNative.model({ ...baseModel, api: { ...baseModel.api, url: "", npm: "@openrouter/ai-sdk-provider" } }),
|
||||
).toMatchObject({
|
||||
route: "openrouter",
|
||||
baseURL: "https://openrouter.ai/api/v1",
|
||||
})
|
||||
})
|
||||
|
||||
test("fails fast for unsupported provider packages", () => {
|
||||
expect(() =>
|
||||
LLMNative.request({
|
||||
model: { ...baseModel, api: { ...baseModel.api, npm: "unknown-provider" } },
|
||||
messages: [],
|
||||
}),
|
||||
).toThrow("Native LLM request adapter does not support provider package unknown-provider")
|
||||
})
|
||||
|
||||
test("only enables native runtime for supported OpenAI API-key models", () => {
|
||||
expect(LLMNativeRuntime.status({ model: baseModel, provider: providerInfo, auth: undefined })).toMatchObject({
|
||||
type: "supported",
|
||||
apiKey: "test-openai-key",
|
||||
})
|
||||
expect(
|
||||
LLMNativeRuntime.status({
|
||||
model: { ...baseModel, providerID: ProviderID.make("opencode") },
|
||||
provider: { ...providerInfo, id: ProviderID.make("opencode") },
|
||||
auth: undefined,
|
||||
}),
|
||||
).toMatchObject({
|
||||
type: "supported",
|
||||
apiKey: "test-openai-key",
|
||||
})
|
||||
expect(
|
||||
LLMNativeRuntime.status({
|
||||
model: { ...baseModel, providerID: ProviderID.make("anthropic") },
|
||||
provider: { ...providerInfo, id: ProviderID.make("anthropic") },
|
||||
auth: undefined,
|
||||
}),
|
||||
).toEqual({ type: "unsupported", reason: "provider is not openai or opencode" })
|
||||
expect(
|
||||
LLMNativeRuntime.status({
|
||||
model: baseModel,
|
||||
provider: providerInfo,
|
||||
auth: { type: "oauth", refresh: "refresh", access: "access", expires: 1 },
|
||||
}),
|
||||
).toEqual({ type: "unsupported", reason: "OAuth auth is not supported" })
|
||||
})
|
||||
|
||||
test("compiles through the native OpenAI Responses route", async () => {
|
||||
const prepared = await Effect.runPromise(
|
||||
LLMClient.prepare(
|
||||
LLMNative.request({
|
||||
model: baseModel,
|
||||
messages: [{ role: "user", content: "hello" }],
|
||||
providerOptions: { openai: { store: false } },
|
||||
maxOutputTokens: 512,
|
||||
headers: { "x-request": "request-header" },
|
||||
}),
|
||||
).pipe(Effect.provide(LLMClient.layer), Effect.provide(RequestExecutor.defaultLayer)),
|
||||
)
|
||||
|
||||
expect(prepared).toMatchObject({
|
||||
route: "openai-responses",
|
||||
protocol: "openai-responses",
|
||||
body: {
|
||||
model: "gpt-5-mini",
|
||||
input: [{ role: "user", content: [{ type: "input_text", text: "hello" }] }],
|
||||
max_output_tokens: 512,
|
||||
store: false,
|
||||
stream: true,
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,15 +1,20 @@
|
||||
import { afterAll, beforeAll, beforeEach, describe, expect, test } from "bun:test"
|
||||
import path from "path"
|
||||
import { tool, type ModelMessage } from "ai"
|
||||
import { Cause, Effect, Exit, Stream } from "effect"
|
||||
import { Cause, Effect, Exit, Layer, Stream } from "effect"
|
||||
import { HttpClientRequest, HttpClientResponse } from "effect/unstable/http"
|
||||
import z from "zod"
|
||||
import { makeRuntime } from "../../src/effect/run-service"
|
||||
import { InstanceRef } from "../../src/effect/instance-ref"
|
||||
import { LLM } from "../../src/session/llm"
|
||||
import type { InstanceContext } from "../../src/project/instance-context"
|
||||
import { LLMClient, RequestExecutor } from "@opencode-ai/llm/route"
|
||||
import { Auth } from "@/auth"
|
||||
import { Config } from "@/config/config"
|
||||
import { Provider } from "@/provider/provider"
|
||||
import { ProviderTransform } from "@/provider/transform"
|
||||
import { ModelsDev } from "@opencode-ai/core/models"
|
||||
import { Plugin } from "@/plugin"
|
||||
import { ProviderID, ModelID } from "../../src/provider/schema"
|
||||
import { Filesystem } from "@/util/filesystem"
|
||||
import { tmpdir, withTestInstance } from "../fixture/fixture"
|
||||
@@ -17,6 +22,33 @@ import type { Agent } from "../../src/agent/agent"
|
||||
import { MessageV2 } from "../../src/session/message-v2"
|
||||
import { SessionID, MessageID } from "../../src/session/schema"
|
||||
import { AppRuntime } from "../../src/effect/app-runtime"
|
||||
import { RuntimeFlags } from "@/effect/runtime-flags"
|
||||
import { Permission } from "@/permission"
|
||||
import { LLMAISDK } from "@/session/llm/ai-sdk"
|
||||
import { Session as SessionNs } from "@/session/session"
|
||||
|
||||
const openAIConfig = (model: ModelsDev.Provider["models"][string], baseURL: string): Partial<Config.Info> => {
|
||||
const { experimental: _experimental, ...configModel } = model
|
||||
type ConfigModel = NonNullable<NonNullable<Config.Info["provider"]>[string]["models"]>[string]
|
||||
return {
|
||||
enabled_providers: ["openai"],
|
||||
provider: {
|
||||
openai: {
|
||||
name: "OpenAI",
|
||||
env: ["OPENAI_API_KEY"],
|
||||
npm: "@ai-sdk/openai",
|
||||
api: "https://api.openai.com/v1",
|
||||
models: {
|
||||
[model.id]: JSON.parse(JSON.stringify(configModel)) as ConfigModel,
|
||||
},
|
||||
options: {
|
||||
apiKey: "test-openai-key",
|
||||
baseURL,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async function getModel(providerID: ProviderID, modelID: ModelID, ctx: InstanceContext) {
|
||||
const effect = Effect.gen(function* () {
|
||||
@@ -35,6 +67,26 @@ async function drain(input: LLM.StreamInput, ctx: InstanceContext) {
|
||||
})
|
||||
}
|
||||
|
||||
async function drainWith(layer: Layer.Layer<LLM.Service>, input: LLM.StreamInput, ctx: InstanceContext) {
|
||||
return Effect.runPromise(
|
||||
LLM.Service.use((svc) => svc.stream(input).pipe(Stream.runDrain)).pipe(
|
||||
Effect.provide(layer),
|
||||
Effect.provideService(InstanceRef, ctx),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
function llmLayerWithExecutor(executor: Layer.Layer<RequestExecutor.Service>, flags: Partial<RuntimeFlags.Info> = {}) {
|
||||
return LLM.layer.pipe(
|
||||
Layer.provide(Auth.defaultLayer),
|
||||
Layer.provide(Config.defaultLayer),
|
||||
Layer.provide(Provider.defaultLayer),
|
||||
Layer.provide(Plugin.defaultLayer),
|
||||
Layer.provide(LLMClient.layer.pipe(Layer.provide(executor))),
|
||||
Layer.provide(RuntimeFlags.layer(flags)),
|
||||
)
|
||||
}
|
||||
|
||||
describe("session.llm.hasToolCalls", () => {
|
||||
test("returns false for empty messages array", () => {
|
||||
expect(LLM.hasToolCalls([])).toBe(false)
|
||||
@@ -122,6 +174,245 @@ describe("session.llm.hasToolCalls", () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe("session.llm.ai-sdk adapter", () => {
|
||||
type AISDKAdapterEvent = Parameters<typeof LLMAISDK.toLLMEvents>[1]
|
||||
|
||||
const adapt = (events: ReadonlyArray<AISDKAdapterEvent>) => {
|
||||
const state = LLMAISDK.adapterState()
|
||||
return Effect.runPromise(
|
||||
Effect.forEach(events, (event) => LLMAISDK.toLLMEvents(state, event)).pipe(Effect.map((items) => items.flat())),
|
||||
)
|
||||
}
|
||||
// oxlint-disable-next-line typescript-eslint/no-unsafe-type-assertion -- tests defensive adapter branches outside AI SDK's current typed surface
|
||||
const uncheckedAdapterEvent = (input: unknown) => input as AISDKAdapterEvent
|
||||
|
||||
test("maps AI SDK stream chunks without losing session-visible fields", async () => {
|
||||
const metadata = { openai: { itemID: "item-1" } }
|
||||
const events = await adapt([
|
||||
{ type: "start" },
|
||||
{ type: "start-step", request: {}, warnings: [] },
|
||||
{ type: "text-start", id: "text-1", providerMetadata: metadata },
|
||||
{ type: "text-delta", id: "text-1", text: "Hel", providerMetadata: { openai: { delta: 1 } } },
|
||||
{ type: "text-delta", id: "text-1", text: "lo", providerMetadata: { openai: { delta: 2 } } },
|
||||
{ type: "text-end", id: "text-1", providerMetadata: { openai: { done: true } } },
|
||||
{ type: "reasoning-start", id: "reasoning-1", providerMetadata: metadata },
|
||||
{ type: "reasoning-delta", id: "reasoning-1", text: "Think", providerMetadata: { openai: { delta: 3 } } },
|
||||
{ type: "reasoning-end", id: "reasoning-1", providerMetadata: { openai: { done: true } } },
|
||||
{ type: "tool-input-start", id: "call-1", toolName: "lookup", providerMetadata: metadata },
|
||||
{ type: "tool-input-delta", id: "call-1", delta: '{"query":' },
|
||||
{ type: "tool-input-delta", id: "call-1", delta: '"weather"}' },
|
||||
{ type: "tool-input-end", id: "call-1", providerMetadata: { openai: { inputDone: true } } },
|
||||
{
|
||||
type: "tool-call",
|
||||
toolCallId: "call-1",
|
||||
toolName: "lookup",
|
||||
input: { query: "weather" },
|
||||
providerExecuted: true,
|
||||
providerMetadata: { openai: { called: true } },
|
||||
},
|
||||
{
|
||||
type: "tool-result",
|
||||
toolCallId: "call-1",
|
||||
toolName: "lookup",
|
||||
input: { query: "weather" },
|
||||
output: { title: "Lookup", output: "sunny", metadata: { ok: true } },
|
||||
providerExecuted: true,
|
||||
providerMetadata: { openai: { result: true } },
|
||||
},
|
||||
{
|
||||
type: "finish-step",
|
||||
response: { id: "response-1", timestamp: new Date(0), modelId: "gpt-test" },
|
||||
finishReason: "other",
|
||||
rawFinishReason: "other",
|
||||
usage: {
|
||||
inputTokens: 10,
|
||||
outputTokens: 5,
|
||||
totalTokens: 15,
|
||||
inputTokenDetails: { noCacheTokens: 5, cacheReadTokens: 3, cacheWriteTokens: 2 },
|
||||
outputTokenDetails: { textTokens: 4, reasoningTokens: 1 },
|
||||
},
|
||||
providerMetadata: { openai: { step: true } },
|
||||
},
|
||||
{
|
||||
type: "finish",
|
||||
finishReason: "other",
|
||||
rawFinishReason: "other",
|
||||
totalUsage: {
|
||||
inputTokens: 11,
|
||||
outputTokens: 6,
|
||||
totalTokens: 17,
|
||||
cachedInputTokens: 4,
|
||||
reasoningTokens: 2,
|
||||
inputTokenDetails: { noCacheTokens: 7, cacheReadTokens: 4, cacheWriteTokens: undefined },
|
||||
outputTokenDetails: { textTokens: 4, reasoningTokens: 2 },
|
||||
},
|
||||
},
|
||||
])
|
||||
|
||||
expect(events).toMatchObject([
|
||||
{ type: "step-start", index: 0 },
|
||||
{ type: "text-start", id: "text-1", providerMetadata: metadata },
|
||||
{ type: "text-delta", id: "text-1", text: "Hel", providerMetadata: { openai: { delta: 1 } } },
|
||||
{ type: "text-delta", id: "text-1", text: "lo", providerMetadata: { openai: { delta: 2 } } },
|
||||
{ type: "text-end", id: "text-1", providerMetadata: { openai: { done: true } } },
|
||||
{ type: "reasoning-start", id: "reasoning-1", providerMetadata: metadata },
|
||||
{ type: "reasoning-delta", id: "reasoning-1", text: "Think", providerMetadata: { openai: { delta: 3 } } },
|
||||
{ type: "reasoning-end", id: "reasoning-1", providerMetadata: { openai: { done: true } } },
|
||||
{ type: "tool-input-start", id: "call-1", name: "lookup", providerMetadata: metadata },
|
||||
{ type: "tool-input-delta", id: "call-1", name: "lookup", text: '{"query":' },
|
||||
{ type: "tool-input-delta", id: "call-1", name: "lookup", text: '"weather"}' },
|
||||
{ type: "tool-input-end", id: "call-1", name: "lookup", providerMetadata: { openai: { inputDone: true } } },
|
||||
{
|
||||
type: "tool-call",
|
||||
id: "call-1",
|
||||
name: "lookup",
|
||||
input: { query: "weather" },
|
||||
providerExecuted: true,
|
||||
providerMetadata: { openai: { called: true } },
|
||||
},
|
||||
{
|
||||
type: "tool-result",
|
||||
id: "call-1",
|
||||
name: "lookup",
|
||||
result: { type: "json", value: { title: "Lookup", output: "sunny", metadata: { ok: true } } },
|
||||
providerExecuted: true,
|
||||
providerMetadata: { openai: { result: true } },
|
||||
},
|
||||
{
|
||||
type: "step-finish",
|
||||
index: 0,
|
||||
reason: "other",
|
||||
usage: {
|
||||
inputTokens: 10,
|
||||
outputTokens: 5,
|
||||
totalTokens: 15,
|
||||
reasoningTokens: 1,
|
||||
cacheReadInputTokens: 3,
|
||||
cacheWriteInputTokens: 2,
|
||||
},
|
||||
providerMetadata: { openai: { step: true } },
|
||||
},
|
||||
{
|
||||
type: "finish",
|
||||
reason: "other",
|
||||
usage: {
|
||||
inputTokens: 11,
|
||||
outputTokens: 6,
|
||||
totalTokens: 17,
|
||||
reasoningTokens: 2,
|
||||
cacheReadInputTokens: 4,
|
||||
},
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("creates stable block ids when AI SDK omits them", async () => {
|
||||
const events = await adapt([
|
||||
uncheckedAdapterEvent({ type: "text-delta", text: "implicit text" }),
|
||||
uncheckedAdapterEvent({ type: "text-end" }),
|
||||
uncheckedAdapterEvent({ type: "reasoning-delta", text: "implicit reasoning" }),
|
||||
uncheckedAdapterEvent({ type: "reasoning-end" }),
|
||||
])
|
||||
|
||||
expect(events).toMatchObject([
|
||||
{ type: "text-delta", id: "text-0", text: "implicit text" },
|
||||
{ type: "text-end", id: "text-0" },
|
||||
{ type: "reasoning-delta", id: "reasoning-0", text: "implicit reasoning" },
|
||||
{ type: "reasoning-end", id: "reasoning-0" },
|
||||
])
|
||||
})
|
||||
|
||||
test("explicitly ignores non-session-visible AI SDK chunks", async () => {
|
||||
expect(
|
||||
await adapt([
|
||||
uncheckedAdapterEvent({ type: "abort" }),
|
||||
uncheckedAdapterEvent({ type: "source" }),
|
||||
uncheckedAdapterEvent({ type: "file" }),
|
||||
uncheckedAdapterEvent({ type: "raw" }),
|
||||
uncheckedAdapterEvent({ type: "tool-output-denied" }),
|
||||
uncheckedAdapterEvent({ type: "tool-approval-request" }),
|
||||
]),
|
||||
).toEqual([])
|
||||
})
|
||||
|
||||
test("preserves tool-error cause", async () => {
|
||||
const error = new Permission.RejectedError()
|
||||
const events = await Effect.runPromise(
|
||||
LLMAISDK.toLLMEvents(LLMAISDK.adapterState(), {
|
||||
type: "tool-error",
|
||||
toolCallId: "call_123",
|
||||
toolName: "bash",
|
||||
input: {},
|
||||
error,
|
||||
}),
|
||||
)
|
||||
|
||||
expect(events).toHaveLength(1)
|
||||
expect(events[0]).toMatchObject({
|
||||
type: "tool-error",
|
||||
id: "call_123",
|
||||
name: "bash",
|
||||
message: error.message,
|
||||
error,
|
||||
})
|
||||
})
|
||||
|
||||
// Anthropic emits cache write counts in providerMetadata.anthropic.cacheCreationInputTokens
|
||||
// rather than usage.inputTokenDetails.cacheWriteTokens. Session.getUsage falls back to the
|
||||
// metadata path — but only if the adapter preserves providerMetadata on step-finish.
|
||||
test("preserves providerMetadata on step-finish so Anthropic cache writes survive getUsage", async () => {
|
||||
const events = await adapt([
|
||||
{
|
||||
type: "finish-step",
|
||||
response: { id: "msg_test", timestamp: new Date(0), modelId: "claude-3-5-sonnet" },
|
||||
finishReason: "stop",
|
||||
rawFinishReason: "stop",
|
||||
// Anthropic's AI SDK shape: cacheWriteTokens is NOT in usage, it arrives via providerMetadata.
|
||||
usage: {
|
||||
inputTokens: 1000,
|
||||
outputTokens: 500,
|
||||
totalTokens: 1500,
|
||||
inputTokenDetails: { noCacheTokens: 800, cacheReadTokens: 200, cacheWriteTokens: undefined },
|
||||
outputTokenDetails: { textTokens: 500, reasoningTokens: undefined },
|
||||
},
|
||||
providerMetadata: { anthropic: { cacheCreationInputTokens: 300 } },
|
||||
},
|
||||
])
|
||||
|
||||
expect(events).toHaveLength(1)
|
||||
const stepFinish = events[0]
|
||||
if (stepFinish.type !== "step-finish") throw new Error("expected step-finish")
|
||||
expect(stepFinish.providerMetadata).toEqual({ anthropic: { cacheCreationInputTokens: 300 } })
|
||||
expect(stepFinish.usage?.cacheWriteInputTokens).toBeUndefined()
|
||||
expect(stepFinish.usage?.cacheReadInputTokens).toBe(200)
|
||||
|
||||
// End-to-end: with the metadata preserved, getUsage extracts cache.write from the fallback path.
|
||||
const result = SessionNs.getUsage({
|
||||
model: {
|
||||
id: "claude-3-5-sonnet",
|
||||
providerID: "anthropic",
|
||||
name: "Claude",
|
||||
limit: { context: 200_000, output: 8_000 },
|
||||
cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
|
||||
capabilities: {
|
||||
toolcall: true,
|
||||
attachment: false,
|
||||
reasoning: false,
|
||||
temperature: true,
|
||||
input: { text: true, image: false, audio: false, video: false },
|
||||
output: { text: true, image: false, audio: false, video: false },
|
||||
},
|
||||
api: { npm: "@ai-sdk/anthropic" },
|
||||
options: {},
|
||||
} as never,
|
||||
usage: stepFinish.usage!,
|
||||
metadata: stepFinish.providerMetadata,
|
||||
})
|
||||
expect(result.tokens.cache.write).toBe(300)
|
||||
expect(result.tokens.cache.read).toBe(200)
|
||||
})
|
||||
})
|
||||
|
||||
type Capture = {
|
||||
url: URL
|
||||
headers: Headers
|
||||
@@ -608,6 +899,18 @@ describe("session.llm.stream", () => {
|
||||
service_tier: null,
|
||||
},
|
||||
},
|
||||
{
|
||||
type: "response.output_item.added",
|
||||
output_index: 0,
|
||||
item: { type: "message", id: "item-1", status: "in_progress", role: "assistant", content: [] },
|
||||
},
|
||||
{
|
||||
type: "response.content_part.added",
|
||||
item_id: "item-1",
|
||||
output_index: 0,
|
||||
content_index: 0,
|
||||
part: { type: "output_text", text: "", annotations: [] },
|
||||
},
|
||||
{
|
||||
type: "response.output_text.delta",
|
||||
item_id: "item-1",
|
||||
@@ -630,32 +933,7 @@ describe("session.llm.stream", () => {
|
||||
]
|
||||
const request = waitRequest("/responses", createEventResponse(responseChunks, true))
|
||||
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
enabled_providers: ["openai"],
|
||||
provider: {
|
||||
openai: {
|
||||
name: "OpenAI",
|
||||
env: ["OPENAI_API_KEY"],
|
||||
npm: "@ai-sdk/openai",
|
||||
api: "https://api.openai.com/v1",
|
||||
models: {
|
||||
[model.id]: configModel(model),
|
||||
},
|
||||
options: {
|
||||
apiKey: "test-openai-key",
|
||||
baseURL: `${server.url.origin}/v1`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await using tmp = await tmpdir({ config: openAIConfig(model, `${server.url.origin}/v1`) })
|
||||
|
||||
await withTestInstance({
|
||||
directory: tmp.path,
|
||||
@@ -706,6 +984,438 @@ describe("session.llm.stream", () => {
|
||||
})
|
||||
})
|
||||
|
||||
test("keeps supported OpenAI models on AI SDK path when native flag is off", async () => {
|
||||
const server = state.server
|
||||
if (!server) {
|
||||
throw new Error("Server not initialized")
|
||||
}
|
||||
|
||||
const source = await loadFixture("openai", "gpt-5.2")
|
||||
const model = source.model
|
||||
const request = waitRequest(
|
||||
"/responses",
|
||||
createEventResponse(
|
||||
[
|
||||
{
|
||||
type: "response.created",
|
||||
response: {
|
||||
id: "resp-flag-off",
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
model: model.id,
|
||||
service_tier: null,
|
||||
},
|
||||
},
|
||||
{
|
||||
type: "response.output_item.added",
|
||||
output_index: 0,
|
||||
item: { type: "message", id: "item-flag-off", status: "in_progress", role: "assistant", content: [] },
|
||||
},
|
||||
{
|
||||
type: "response.content_part.added",
|
||||
item_id: "item-flag-off",
|
||||
output_index: 0,
|
||||
content_index: 0,
|
||||
part: { type: "output_text", text: "", annotations: [] },
|
||||
},
|
||||
{
|
||||
type: "response.output_text.delta",
|
||||
item_id: "item-flag-off",
|
||||
delta: "Flag off",
|
||||
logprobs: null,
|
||||
},
|
||||
{
|
||||
type: "response.completed",
|
||||
response: {
|
||||
incomplete_details: null,
|
||||
usage: {
|
||||
input_tokens: 1,
|
||||
input_tokens_details: null,
|
||||
output_tokens: 1,
|
||||
output_tokens_details: null,
|
||||
},
|
||||
service_tier: null,
|
||||
},
|
||||
},
|
||||
],
|
||||
true,
|
||||
),
|
||||
)
|
||||
const failingNativeClient = Layer.succeed(
|
||||
LLMClient.Service,
|
||||
LLMClient.Service.of({
|
||||
prepare: () => Effect.die(new Error("native LLM client should not be used when the flag is off")),
|
||||
stream: () => Stream.die(new Error("native LLM client should not be used when the flag is off")),
|
||||
generate: () => Effect.die(new Error("native LLM client should not be used when the flag is off")),
|
||||
}),
|
||||
)
|
||||
|
||||
await using tmp = await tmpdir({ config: openAIConfig(model, `${server.url.origin}/v1`) })
|
||||
|
||||
await withTestInstance({
|
||||
directory: tmp.path,
|
||||
fn: async (ctx) => {
|
||||
const resolved = await getModel(ProviderID.openai, ModelID.make(model.id), ctx)
|
||||
const sessionID = SessionID.make("session-test-native-flag-off")
|
||||
const agent = {
|
||||
name: "test",
|
||||
mode: "primary",
|
||||
options: {},
|
||||
permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
||||
} satisfies Agent.Info
|
||||
|
||||
await drainWith(
|
||||
LLM.layer.pipe(
|
||||
Layer.provide(Auth.defaultLayer),
|
||||
Layer.provide(Config.defaultLayer),
|
||||
Layer.provide(Provider.defaultLayer),
|
||||
Layer.provide(Plugin.defaultLayer),
|
||||
Layer.provide(failingNativeClient),
|
||||
Layer.provide(RuntimeFlags.layer({ experimentalNativeLlm: false })),
|
||||
),
|
||||
{
|
||||
user: {
|
||||
id: MessageID.make("msg_user-native-flag-off"),
|
||||
sessionID,
|
||||
role: "user",
|
||||
time: { created: Date.now() },
|
||||
agent: agent.name,
|
||||
model: { providerID: ProviderID.make("openai"), modelID: resolved.id, variant: "high" },
|
||||
} satisfies MessageV2.User,
|
||||
sessionID,
|
||||
model: resolved,
|
||||
agent,
|
||||
system: ["You are a helpful assistant."],
|
||||
messages: [{ role: "user", content: "Hello" }],
|
||||
tools: {},
|
||||
},
|
||||
ctx,
|
||||
)
|
||||
|
||||
const capture = await request
|
||||
expect(capture.url.pathname.endsWith("/responses")).toBe(true)
|
||||
expect(capture.body.model).toBe(resolved.api.id)
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("streams OpenAI through native runtime when opted in", async () => {
|
||||
const server = state.server
|
||||
if (!server) {
|
||||
throw new Error("Server not initialized")
|
||||
}
|
||||
|
||||
const source = await loadFixture("openai", "gpt-5.2")
|
||||
const model = source.model
|
||||
const chunks = [
|
||||
{
|
||||
type: "response.created",
|
||||
response: {
|
||||
id: "resp-native",
|
||||
},
|
||||
},
|
||||
{
|
||||
type: "response.output_item.added",
|
||||
item: { type: "message", id: "item-native", status: "in_progress" },
|
||||
},
|
||||
{
|
||||
type: "response.output_text.delta",
|
||||
item_id: "item-native",
|
||||
delta: "Hello native",
|
||||
},
|
||||
{
|
||||
type: "response.completed",
|
||||
response: {
|
||||
incomplete_details: null,
|
||||
usage: {
|
||||
input_tokens: 1,
|
||||
input_tokens_details: null,
|
||||
output_tokens: 1,
|
||||
output_tokens_details: null,
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
const request = waitRequest("/responses", createEventResponse(chunks, true))
|
||||
|
||||
await using tmp = await tmpdir({ config: openAIConfig(model, `${server.url.origin}/v1`) })
|
||||
|
||||
await withTestInstance({
|
||||
directory: tmp.path,
|
||||
fn: async (ctx) => {
|
||||
const resolved = await getModel(ProviderID.openai, ModelID.make(model.id), ctx)
|
||||
const sessionID = SessionID.make("session-test-native")
|
||||
const agent = {
|
||||
name: "test",
|
||||
mode: "primary",
|
||||
options: {},
|
||||
permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
||||
temperature: 0.2,
|
||||
} satisfies Agent.Info
|
||||
|
||||
await drainWith(
|
||||
llmLayerWithExecutor(RequestExecutor.defaultLayer, { experimentalNativeLlm: true }),
|
||||
{
|
||||
user: {
|
||||
id: MessageID.make("msg_user-native"),
|
||||
sessionID,
|
||||
role: "user",
|
||||
time: { created: Date.now() },
|
||||
agent: agent.name,
|
||||
model: { providerID: ProviderID.make("openai"), modelID: resolved.id, variant: "high" },
|
||||
} satisfies MessageV2.User,
|
||||
sessionID,
|
||||
model: resolved,
|
||||
agent,
|
||||
system: ["You are a helpful assistant."],
|
||||
messages: [{ role: "user", content: "Hello" }],
|
||||
tools: {},
|
||||
},
|
||||
ctx,
|
||||
)
|
||||
|
||||
const capture = await request
|
||||
expect(capture.url.pathname.endsWith("/responses")).toBe(true)
|
||||
expect(capture.headers.get("Authorization")).toBe("Bearer test-openai-key")
|
||||
expect(capture.body.model).toBe(model.id)
|
||||
expect(capture.body.stream).toBe(true)
|
||||
expect((capture.body.reasoning as { effort?: string } | undefined)?.effort).toBe("high")
|
||||
expect(JSON.stringify(capture.body.input)).toContain("You are a helpful assistant.")
|
||||
expect(capture.body.input).toContainEqual({ role: "user", content: [{ type: "input_text", text: "Hello" }] })
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("uses injected native request executor for tool calls", async () => {
|
||||
const source = await loadFixture("openai", "gpt-5.2")
|
||||
const model = source.model
|
||||
const chunks = [
|
||||
{
|
||||
type: "response.output_item.added",
|
||||
item: { type: "function_call", id: "item-injected-tool", call_id: "call-injected-tool", name: "lookup" },
|
||||
},
|
||||
{
|
||||
type: "response.function_call_arguments.delta",
|
||||
item_id: "item-injected-tool",
|
||||
delta: '{"query":"weather"}',
|
||||
},
|
||||
{
|
||||
type: "response.output_item.done",
|
||||
item: {
|
||||
type: "function_call",
|
||||
id: "item-injected-tool",
|
||||
call_id: "call-injected-tool",
|
||||
name: "lookup",
|
||||
arguments: '{"query":"weather"}',
|
||||
},
|
||||
},
|
||||
{
|
||||
type: "response.completed",
|
||||
response: { incomplete_details: null, usage: { input_tokens: 1, output_tokens: 1 } },
|
||||
},
|
||||
]
|
||||
let captured: Record<string, unknown> | undefined
|
||||
let executed: unknown
|
||||
const executor = Layer.succeed(
|
||||
RequestExecutor.Service,
|
||||
RequestExecutor.Service.of({
|
||||
execute: (request) =>
|
||||
Effect.gen(function* () {
|
||||
const web = yield* HttpClientRequest.toWeb(request).pipe(Effect.orDie)
|
||||
captured = (yield* Effect.promise(() => web.json())) as Record<string, unknown>
|
||||
return HttpClientResponse.fromWeb(request, createEventResponse(chunks, true))
|
||||
}),
|
||||
}),
|
||||
)
|
||||
|
||||
await using tmp = await tmpdir({ config: openAIConfig(model, "https://injected-openai.test/v1") })
|
||||
|
||||
await withTestInstance({
|
||||
directory: tmp.path,
|
||||
fn: async (ctx) => {
|
||||
const resolved = await getModel(ProviderID.openai, ModelID.make(model.id), ctx)
|
||||
const sessionID = SessionID.make("session-test-native-injected-tool")
|
||||
const agent = {
|
||||
name: "test",
|
||||
mode: "primary",
|
||||
options: {},
|
||||
permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
||||
} satisfies Agent.Info
|
||||
|
||||
await drainWith(
|
||||
llmLayerWithExecutor(executor, { experimentalNativeLlm: true }),
|
||||
{
|
||||
user: {
|
||||
id: MessageID.make("msg_user-native-injected-tool"),
|
||||
sessionID,
|
||||
role: "user",
|
||||
time: { created: Date.now() },
|
||||
agent: agent.name,
|
||||
model: { providerID: ProviderID.make("openai"), modelID: resolved.id },
|
||||
} satisfies MessageV2.User,
|
||||
sessionID,
|
||||
model: resolved,
|
||||
agent,
|
||||
system: [],
|
||||
messages: [{ role: "user", content: "Use lookup" }],
|
||||
tools: {
|
||||
lookup: tool({
|
||||
description: "Lookup data",
|
||||
inputSchema: z.object({ query: z.string() }),
|
||||
execute: async (args, options) => {
|
||||
executed = { args, toolCallId: options.toolCallId }
|
||||
return { output: "looked up" }
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(captured?.model).toBe(model.id)
|
||||
expect(captured?.tools).toEqual([
|
||||
{
|
||||
type: "function",
|
||||
name: "lookup",
|
||||
description: "Lookup data",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: { query: { type: "string" } },
|
||||
required: ["query"],
|
||||
additionalProperties: false,
|
||||
$schema: "http://json-schema.org/draft-07/schema#",
|
||||
},
|
||||
},
|
||||
])
|
||||
expect(executed).toEqual({ args: { query: "weather" }, toolCallId: "call-injected-tool" })
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("executes OpenAI tool calls through native runtime", async () => {
|
||||
const server = state.server
|
||||
if (!server) {
|
||||
throw new Error("Server not initialized")
|
||||
}
|
||||
|
||||
const source = await loadFixture("openai", "gpt-5.2")
|
||||
const model = source.model
|
||||
const chunks = [
|
||||
{
|
||||
type: "response.output_item.added",
|
||||
item: { type: "function_call", id: "item-native-tool", call_id: "call-native-tool", name: "lookup" },
|
||||
},
|
||||
{
|
||||
type: "response.function_call_arguments.delta",
|
||||
item_id: "item-native-tool",
|
||||
delta: '{"query":"weather"}',
|
||||
},
|
||||
{
|
||||
type: "response.output_item.done",
|
||||
item: {
|
||||
type: "function_call",
|
||||
id: "item-native-tool",
|
||||
call_id: "call-native-tool",
|
||||
name: "lookup",
|
||||
arguments: '{"query":"weather"}',
|
||||
},
|
||||
},
|
||||
{
|
||||
type: "response.completed",
|
||||
response: { incomplete_details: null, usage: { input_tokens: 1, output_tokens: 1 } },
|
||||
},
|
||||
]
|
||||
const request = waitRequest("/responses", createEventResponse(chunks, true))
|
||||
let executed: unknown
|
||||
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
enabled_providers: ["openai"],
|
||||
provider: {
|
||||
openai: {
|
||||
name: "OpenAI",
|
||||
env: ["OPENAI_API_KEY"],
|
||||
npm: "@ai-sdk/openai",
|
||||
api: "https://api.openai.com/v1",
|
||||
models: {
|
||||
[model.id]: model,
|
||||
},
|
||||
options: {
|
||||
apiKey: "test-openai-key",
|
||||
baseURL: `${server.url.origin}/v1`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
|
||||
await withTestInstance({
|
||||
directory: tmp.path,
|
||||
fn: async (ctx) => {
|
||||
const resolved = await getModel(ProviderID.openai, ModelID.make(model.id), ctx)
|
||||
const sessionID = SessionID.make("session-test-native-tool")
|
||||
const agent = {
|
||||
name: "test",
|
||||
mode: "primary",
|
||||
options: {},
|
||||
permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
||||
} satisfies Agent.Info
|
||||
|
||||
await drainWith(
|
||||
llmLayerWithExecutor(RequestExecutor.defaultLayer, { experimentalNativeLlm: true }),
|
||||
{
|
||||
user: {
|
||||
id: MessageID.make("msg_user-native-tool"),
|
||||
sessionID,
|
||||
role: "user",
|
||||
time: { created: Date.now() },
|
||||
agent: agent.name,
|
||||
model: { providerID: ProviderID.make("openai"), modelID: resolved.id },
|
||||
} satisfies MessageV2.User,
|
||||
sessionID,
|
||||
model: resolved,
|
||||
agent,
|
||||
system: [],
|
||||
messages: [{ role: "user", content: "Use lookup" }],
|
||||
tools: {
|
||||
lookup: tool({
|
||||
description: "Lookup data",
|
||||
inputSchema: z.object({ query: z.string() }),
|
||||
execute: async (args, options) => {
|
||||
executed = { args, toolCallId: options.toolCallId }
|
||||
return { output: "looked up" }
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
ctx,
|
||||
)
|
||||
|
||||
const capture = await request
|
||||
expect(capture.body.tools).toEqual([
|
||||
{
|
||||
type: "function",
|
||||
name: "lookup",
|
||||
description: "Lookup data",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: { query: { type: "string" } },
|
||||
required: ["query"],
|
||||
additionalProperties: false,
|
||||
$schema: "http://json-schema.org/draft-07/schema#",
|
||||
},
|
||||
},
|
||||
])
|
||||
expect(executed).toEqual({ args: { query: "weather" }, toolCallId: "call-native-tool" })
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("accepts user image attachments as data URLs for OpenAI models", async () => {
|
||||
const server = state.server
|
||||
if (!server) {
|
||||
@@ -724,6 +1434,18 @@ describe("session.llm.stream", () => {
|
||||
service_tier: null,
|
||||
},
|
||||
},
|
||||
{
|
||||
type: "response.output_item.added",
|
||||
output_index: 0,
|
||||
item: { type: "message", id: "item-data-url", status: "in_progress", role: "assistant", content: [] },
|
||||
},
|
||||
{
|
||||
type: "response.content_part.added",
|
||||
item_id: "item-data-url",
|
||||
output_index: 0,
|
||||
content_index: 0,
|
||||
part: { type: "output_text", text: "", annotations: [] },
|
||||
},
|
||||
{
|
||||
type: "response.output_text.delta",
|
||||
item_id: "item-data-url",
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import { NodeFileSystem } from "@effect/platform-node"
|
||||
import { expect } from "bun:test"
|
||||
import { tool } from "ai"
|
||||
import { Cause, Effect, Exit, Fiber, Layer } from "effect"
|
||||
import path from "path"
|
||||
import z from "zod"
|
||||
import type { Agent } from "../../src/agent/agent"
|
||||
import { Agent as AgentSvc } from "../../src/agent/agent"
|
||||
import { Bus } from "../../src/bus"
|
||||
@@ -661,6 +663,71 @@ it.live("session.processor effect tests compact on structured context overflow",
|
||||
),
|
||||
)
|
||||
|
||||
it.live("session.processor effect tests complete AI SDK tool calls when native flag is off", () =>
|
||||
provideTmpdirServer(
|
||||
({ dir, llm }) =>
|
||||
Effect.gen(function* () {
|
||||
const { processors, session, provider } = yield* boot()
|
||||
|
||||
yield* llm.tool("lookup", { query: "weather" })
|
||||
|
||||
const chat = yield* session.create({})
|
||||
const parent = yield* user(chat.id, "tool")
|
||||
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
|
||||
const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
|
||||
const handle = yield* processors.create({
|
||||
assistantMessage: msg,
|
||||
sessionID: chat.id,
|
||||
model: mdl,
|
||||
})
|
||||
|
||||
const value = yield* handle.process({
|
||||
user: {
|
||||
id: parent.id,
|
||||
sessionID: chat.id,
|
||||
role: "user",
|
||||
time: parent.time,
|
||||
agent: parent.agent,
|
||||
model: { providerID: ref.providerID, modelID: ref.modelID },
|
||||
} satisfies MessageV2.User,
|
||||
sessionID: chat.id,
|
||||
model: mdl,
|
||||
agent: agent(),
|
||||
system: [],
|
||||
messages: [{ role: "user", content: "tool" }],
|
||||
tools: {
|
||||
lookup: tool({
|
||||
description: "Look up information",
|
||||
inputSchema: z.object({ query: z.string() }),
|
||||
execute: async (input) => ({
|
||||
title: "Weather lookup",
|
||||
output: `result:${input.query}`,
|
||||
metadata: { source: "test" },
|
||||
}),
|
||||
}),
|
||||
},
|
||||
})
|
||||
|
||||
const parts = MessageV2.parts(msg.id)
|
||||
const call = parts.find((part): part is MessageV2.ToolPart => part.type === "tool")
|
||||
|
||||
expect(value).toBe("continue")
|
||||
expect(yield* llm.calls).toBe(1)
|
||||
expect(call?.callID).toBe("call_1")
|
||||
expect(call?.tool).toBe("lookup")
|
||||
expect(call?.state.status).toBe("completed")
|
||||
if (call?.state.status !== "completed") return
|
||||
expect(call.state.input).toEqual({ query: "weather" })
|
||||
expect(call.state.output).toBe("result:weather")
|
||||
expect(call.state.title).toBe("Weather lookup")
|
||||
expect(call.state.metadata).toEqual({ source: "test" })
|
||||
expect(call.state.time.start).toBeDefined()
|
||||
expect(call.state.time.end).toBeDefined()
|
||||
}),
|
||||
{ git: true, config: (url) => providerCfg(url) },
|
||||
),
|
||||
)
|
||||
|
||||
it.live("session.processor effect tests mark pending tools as aborted on cleanup", () =>
|
||||
provideTmpdirServer(
|
||||
({ dir, llm }) =>
|
||||
|
||||
@@ -22,9 +22,9 @@
|
||||
"zod": "catalog:"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@opentui/core": ">=0.2.10",
|
||||
"@opentui/keymap": ">=0.2.10",
|
||||
"@opentui/solid": ">=0.2.10"
|
||||
"@opentui/core": ">=0.2.11",
|
||||
"@opentui/keymap": ">=0.2.11",
|
||||
"@opentui/solid": ">=0.2.11"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@opentui/core": {
|
||||
|
||||
@@ -145,6 +145,7 @@ OpenCode has a list of keybinds that you can customize through `tui.json`.
|
||||
"dialog.select.home": "home",
|
||||
"dialog.select.end": "end",
|
||||
"dialog.select.submit": "return",
|
||||
"dialog.prompt.submit": "return",
|
||||
"dialog.mcp.toggle": "space",
|
||||
"prompt.autocomplete.prev": "up,ctrl+p",
|
||||
"prompt.autocomplete.next": "down,ctrl+n",
|
||||
|
||||
Reference in New Issue
Block a user