diff --git a/packages/llm/example/tutorial.ts b/packages/llm/example/tutorial.ts index 6b0b894b13..a9adecf369 100644 --- a/packages/llm/example/tutorial.ts +++ b/packages/llm/example/tutorial.ts @@ -184,7 +184,7 @@ const FakeProtocol = Protocol.make({ stream: { event: Schema.String, initial: () => undefined, - step: (_, frame) => Effect.succeed([undefined, [{ type: "text-delta", text: frame }]] as const), + step: (_, frame) => Effect.succeed([undefined, [{ type: "text-delta", id: "text-0", text: frame }]] as const), onHalt: () => [{ type: "request-finish", reason: "stop" }], }, }) diff --git a/packages/llm/src/protocols/anthropic-messages.ts b/packages/llm/src/protocols/anthropic-messages.ts index ff2239c0d7..7f87fee89d 100644 --- a/packages/llm/src/protocols/anthropic-messages.ts +++ b/packages/llm/src/protocols/anthropic-messages.ts @@ -5,10 +5,10 @@ import { Endpoint } from "../route/endpoint" import { Framing } from "../route/framing" import { Protocol } from "../route/protocol" import { + LLMEvent, Usage, type CacheHint, type FinishReason, - type LLMEvent, type LLMRequest, type ProviderMetadata, type ToolCallPart, @@ -415,14 +415,13 @@ const serverToolResultEvent = (block: NonNullable).type) : "" const isError = errorPayload.endsWith("_tool_result_error") - return { - type: "tool-result", + return LLMEvent.toolResult({ id: block.tool_use_id ?? "", name: SERVER_TOOL_RESULT_NAMES[block.type], result: isError ? { type: "error", value: block.content } : { type: "json", value: block.content }, providerExecuted: true, providerMetadata: anthropicMetadata({ blockType: block.type }), - } + }) } type StepResult = readonly [ParserState, ReadonlyArray] @@ -453,18 +452,17 @@ const onContentBlockStart = (state: ParserState, event: AnthropicEvent): StepRes } if (block.type === "text" && block.text) { - return [state, [{ type: "text-delta", text: block.text }]] + return [state, [LLMEvent.textDelta({ id: `text-${event.index ?? 0}`, text: block.text })]] } if (block.type === "thinking" && block.thinking) { return [ state, [ - { - type: "reasoning-delta", + LLMEvent.reasoningDelta({ + id: `reasoning-${event.index ?? 0}`, text: block.thinking, - ...(block.signature ? { providerMetadata: anthropicMetadata({ signature: block.signature }) } : {}), - }, + }), ], ] } @@ -480,17 +478,17 @@ const onContentBlockDelta = Effect.fn("AnthropicMessages.onContentBlockDelta")(f const delta = event.delta if (delta?.type === "text_delta" && delta.text) { - return [state, [{ type: "text-delta", text: delta.text }]] satisfies StepResult + return [state, [LLMEvent.textDelta({ id: `text-${event.index ?? 0}`, text: delta.text })]] satisfies StepResult } if (delta?.type === "thinking_delta" && delta.thinking) { - return [state, [{ type: "reasoning-delta", text: delta.thinking }]] satisfies StepResult + return [state, [LLMEvent.reasoningDelta({ id: `reasoning-${event.index ?? 0}`, text: delta.thinking })]] satisfies StepResult } if (delta?.type === "signature_delta" && delta.signature) { return [ state, - [{ type: "reasoning-delta", text: "", providerMetadata: anthropicMetadata({ signature: delta.signature }) }], + [LLMEvent.reasoningEnd({ id: `reasoning-${event.index ?? 0}`, providerMetadata: anthropicMetadata({ signature: delta.signature }) })], ] satisfies StepResult } @@ -524,21 +522,20 @@ const onMessageDelta = (state: ParserState, event: AnthropicEvent): StepResult = return [ { ...state, usage }, [ - { - type: "request-finish", + LLMEvent.requestFinish({ reason: mapFinishReason(event.delta?.stop_reason), usage, - ...(event.delta?.stop_sequence - ? { providerMetadata: anthropicMetadata({ stopSequence: event.delta.stop_sequence }) } - : {}), - }, + providerMetadata: event.delta?.stop_sequence + ? anthropicMetadata({ stopSequence: event.delta.stop_sequence }) + : undefined, + }), ], ] } const onError = (state: ParserState, event: AnthropicEvent): StepResult => [ state, - [{ type: "provider-error", message: event.error?.message ?? "Anthropic Messages stream error" }], + [LLMEvent.providerError({ message: event.error?.message ?? "Anthropic Messages stream error" })], ] const step = (state: ParserState, event: AnthropicEvent) => { diff --git a/packages/llm/src/protocols/bedrock-converse.ts b/packages/llm/src/protocols/bedrock-converse.ts index 09176104df..5632396fe0 100644 --- a/packages/llm/src/protocols/bedrock-converse.ts +++ b/packages/llm/src/protocols/bedrock-converse.ts @@ -3,10 +3,10 @@ import { Route, type RouteModelInput } from "../route/client" import { Endpoint } from "../route/endpoint" import { Protocol } from "../route/protocol" import { + LLMEvent, Usage, type CacheHint, type FinishReason, - type LLMEvent, type LLMRequest, type ToolCallPart, type ToolDefinition, @@ -400,13 +400,26 @@ const step = (state: ParserState, event: BedrockEvent) => } if (event.contentBlockDelta?.delta?.text) { - return [state, [{ type: "text-delta" as const, text: event.contentBlockDelta.delta.text }]] as const + return [ + state, + [ + LLMEvent.textDelta({ + id: `text-${event.contentBlockDelta.contentBlockIndex}`, + text: event.contentBlockDelta.delta.text, + }), + ], + ] as const } if (event.contentBlockDelta?.delta?.reasoningContent?.text) { return [ state, - [{ type: "reasoning-delta" as const, text: event.contentBlockDelta.delta.reasoningContent.text }], + [ + LLMEvent.reasoningDelta({ + id: `reasoning-${event.contentBlockDelta.contentBlockIndex}`, + text: event.contentBlockDelta.delta.reasoningContent.text, + }), + ], ] as const } @@ -449,7 +462,7 @@ const step = (state: ParserState, event: BedrockEvent) => event.modelStreamErrorException?.message ?? event.serviceUnavailableException?.message ?? "Bedrock Converse stream error" - return [state, [{ type: "provider-error" as const, message, retryable: true }]] as const + return [state, [LLMEvent.providerError({ message, retryable: true })]] as const } if (event.validationException || event.throttlingException) { @@ -457,7 +470,7 @@ const step = (state: ParserState, event: BedrockEvent) => event.validationException?.message ?? event.throttlingException?.message ?? "Bedrock Converse error" return [ state, - [{ type: "provider-error" as const, message, retryable: event.throttlingException !== undefined }], + [LLMEvent.providerError({ message, retryable: event.throttlingException !== undefined })], ] as const } @@ -468,7 +481,7 @@ const framing = BedrockEventStream.framing(ADAPTER) const onHalt = (state: ParserState): ReadonlyArray => state.pendingFinish - ? [{ type: "request-finish", reason: state.pendingFinish.reason, usage: state.pendingFinish.usage }] + ? [LLMEvent.requestFinish({ reason: state.pendingFinish.reason, usage: state.pendingFinish.usage })] : [] // ============================================================================= diff --git a/packages/llm/src/protocols/gemini.ts b/packages/llm/src/protocols/gemini.ts index 0d2bdc8e14..140da521a5 100644 --- a/packages/llm/src/protocols/gemini.ts +++ b/packages/llm/src/protocols/gemini.ts @@ -5,9 +5,9 @@ import { Endpoint } from "../route/endpoint" import { Framing } from "../route/framing" import { Protocol } from "../route/protocol" import { + LLMEvent, Usage, type FinishReason, - type LLMEvent, type LLMRequest, type MediaPart, type TextPart, @@ -311,7 +311,7 @@ const mapFinishReason = (finishReason: string | undefined, hasToolCalls: boolean const finish = (state: ParserState): ReadonlyArray => state.finishReason || state.usage - ? [{ type: "request-finish", reason: mapFinishReason(state.finishReason, state.hasToolCalls), usage: state.usage }] + ? [LLMEvent.requestFinish({ reason: mapFinishReason(state.finishReason, state.hasToolCalls), usage: state.usage })] : [] const step = (state: ParserState, event: GeminiEvent) => { @@ -332,14 +332,18 @@ const step = (state: ParserState, event: GeminiEvent) => { for (const part of candidate.content.parts) { if ("text" in part && part.text.length > 0) { - events.push({ type: part.thought ? "reasoning-delta" : "text-delta", text: part.text }) + events.push( + part.thought + ? LLMEvent.reasoningDelta({ id: "reasoning-0", text: part.text }) + : LLMEvent.textDelta({ id: "text-0", text: part.text }), + ) continue } if ("functionCall" in part) { const input = part.functionCall.args const id = `tool_${nextToolCallId++}` - events.push({ type: "tool-call", id, name: part.functionCall.name, input }) + events.push(LLMEvent.toolCall({ id, name: part.functionCall.name, input })) hasToolCalls = true } } diff --git a/packages/llm/src/protocols/openai-chat.ts b/packages/llm/src/protocols/openai-chat.ts index 974e22950d..8948c31c9d 100644 --- a/packages/llm/src/protocols/openai-chat.ts +++ b/packages/llm/src/protocols/openai-chat.ts @@ -6,9 +6,9 @@ import { Framing } from "../route/framing" import { HttpTransport } from "../route/transport" import { Protocol } from "../route/protocol" import { + LLMEvent, Usage, type FinishReason, - type LLMEvent, type LLMRequest, type TextPart, type ToolCallPart, @@ -312,7 +312,7 @@ const step = (state: ParserState, event: OpenAIChatEvent) => const toolDeltas = delta?.tool_calls ?? [] let tools = state.tools - if (delta?.content) events.push({ type: "text-delta", text: delta.content }) + if (delta?.content) events.push(LLMEvent.textDelta({ id: "text-0", text: delta.content })) for (const tool of toolDeltas) { const result = ToolStream.appendOrStart( @@ -350,7 +350,7 @@ const finishEvents = (state: ParserState): ReadonlyArray => { const reason = state.finishReason === "stop" && hasToolCalls ? "tool-calls" : state.finishReason return [ ...state.toolCallEvents, - ...(reason ? ([{ type: "request-finish", reason, usage: state.usage }] satisfies ReadonlyArray) : []), + ...(reason ? [LLMEvent.requestFinish({ reason, usage: state.usage })] : []), ] } diff --git a/packages/llm/src/protocols/openai-responses.ts b/packages/llm/src/protocols/openai-responses.ts index 780ed31bfc..863c4aa7bc 100644 --- a/packages/llm/src/protocols/openai-responses.ts +++ b/packages/llm/src/protocols/openai-responses.ts @@ -6,9 +6,9 @@ import { Framing } from "../route/framing" import { HttpTransport, WebSocketTransport } from "../route/transport" import { Protocol } from "../route/protocol" import { + LLMEvent, Usage, type FinishReason, - type LLMEvent, type LLMRequest, type ProviderMetadata, type TextPart, @@ -348,22 +348,20 @@ const hostedToolEvents = ( const tool = HOSTED_TOOLS[item.type] const providerMetadata = openaiMetadata({ itemId: item.id }) return [ - { - type: "tool-call", + LLMEvent.toolCall({ id: item.id, name: tool.name, input: tool.input(item), providerExecuted: true, providerMetadata, - }, - { - type: "tool-result", + }), + LLMEvent.toolResult({ id: item.id, name: tool.name, result: hostedToolResult(item), providerExecuted: true, providerMetadata, - }, + }), ] } @@ -382,12 +380,7 @@ const onOutputTextDelta = (state: ParserState, event: OpenAIResponsesEvent): Ste return [ state, [ - { - type: "text-delta", - id: event.item_id, - text: event.delta, - ...(event.item_id ? { providerMetadata: openaiMetadata({ itemId: event.item_id }) } : {}), - }, + LLMEvent.textDelta({ id: event.item_id ?? "text-0", text: event.delta }), ], ] } @@ -458,30 +451,28 @@ const onOutputItemDone = Effect.fn("OpenAIResponses.onOutputItemDone")(function* const onResponseFinish = (state: ParserState, event: OpenAIResponsesEvent): StepResult => [ state, [ - { - type: "request-finish", + LLMEvent.requestFinish({ reason: mapFinishReason(event, state.hasFunctionCall), usage: mapUsage(event.response?.usage), - ...(event.response?.id || event.response?.service_tier - ? { - providerMetadata: openaiMetadata({ + providerMetadata: + event.response?.id || event.response?.service_tier + ? openaiMetadata({ responseId: event.response.id, serviceTier: event.response.service_tier, - }), - } - : {}), - }, + }) + : undefined, + }), ], ] const onResponseFailed = (state: ParserState, event: OpenAIResponsesEvent): StepResult => [ state, - [{ type: "provider-error", message: event.message ?? event.code ?? "OpenAI Responses response failed" }], + [LLMEvent.providerError({ message: event.message ?? event.code ?? "OpenAI Responses response failed" })], ] const onError = (state: ParserState, event: OpenAIResponsesEvent): StepResult => [ state, - [{ type: "provider-error", message: event.message ?? event.code ?? "OpenAI Responses stream error" }], + [LLMEvent.providerError({ message: event.message ?? event.code ?? "OpenAI Responses stream error" })], ] const step = (state: ParserState, event: OpenAIResponsesEvent) => { diff --git a/packages/llm/src/protocols/utils/tool-stream.ts b/packages/llm/src/protocols/utils/tool-stream.ts index e6ac5fefd0..aa9c70f017 100644 --- a/packages/llm/src/protocols/utils/tool-stream.ts +++ b/packages/llm/src/protocols/utils/tool-stream.ts @@ -1,5 +1,5 @@ import { Effect } from "effect" -import { LLMError, type ProviderMetadata, type ToolCall, type ToolInputDelta } from "../../schema" +import { LLMError, LLMEvent, type ProviderMetadata, type ToolCall, type ToolInputDelta } from "../../schema" import { eventError, parseToolInput, type ToolAccumulator } from "../shared" type StreamKey = string | number @@ -49,34 +49,24 @@ const withoutTool = (tools: State, key: K): State => return next } -const inputDelta = (tool: PendingTool, text: string): ToolInputDelta => ({ - type: "tool-input-delta", - id: tool.id, - name: tool.name, - text, - ...(tool.providerMetadata ? { providerMetadata: tool.providerMetadata } : {}), -}) +const inputDelta = (tool: PendingTool, text: string): ToolInputDelta => + LLMEvent.toolInputDelta({ + id: tool.id, + name: tool.name, + text, + }) const toolCall = (route: string, tool: PendingTool, inputOverride?: string) => parseToolInput(route, tool.name, inputOverride ?? tool.input).pipe( Effect.map( (input): ToolCall => - tool.providerExecuted - ? { - type: "tool-call", - id: tool.id, - name: tool.name, - input, - providerExecuted: true, - ...(tool.providerMetadata ? { providerMetadata: tool.providerMetadata } : {}), - } - : { - type: "tool-call", - id: tool.id, - name: tool.name, - input, - ...(tool.providerMetadata ? { providerMetadata: tool.providerMetadata } : {}), - }, + LLMEvent.toolCall({ + id: tool.id, + name: tool.name, + input, + providerExecuted: tool.providerExecuted ? true : undefined, + providerMetadata: tool.providerMetadata, + }), ), ) diff --git a/packages/llm/src/schema/events.ts b/packages/llm/src/schema/events.ts index 2fa69370f4..d0befe246e 100644 --- a/packages/llm/src/schema/events.ts +++ b/packages/llm/src/schema/events.ts @@ -1,5 +1,5 @@ import { Schema } from "effect" -import { FinishReason, ProtocolID, ProviderMetadata, RouteID } from "./ids" +import { ContentBlockID, FinishReason, ProtocolID, ProviderMetadata, ResponseID, RouteID, ToolCallID } from "./ids" import { ModelRef } from "./options" import { ToolResultValue } from "./messages" @@ -14,60 +14,87 @@ export class Usage extends Schema.Class("LLM.Usage")({ }) {} export const RequestStart = Schema.Struct({ - type: Schema.Literal("request-start"), - id: Schema.String, + type: Schema.tag("request-start"), + id: ResponseID, model: ModelRef, }).annotate({ identifier: "LLM.Event.RequestStart" }) export type RequestStart = Schema.Schema.Type export const StepStart = Schema.Struct({ - type: Schema.Literal("step-start"), + type: Schema.tag("step-start"), index: Schema.Number, }).annotate({ identifier: "LLM.Event.StepStart" }) export type StepStart = Schema.Schema.Type export const TextStart = Schema.Struct({ - type: Schema.Literal("text-start"), - id: Schema.String, + type: Schema.tag("text-start"), + id: ContentBlockID, providerMetadata: Schema.optional(ProviderMetadata), }).annotate({ identifier: "LLM.Event.TextStart" }) export type TextStart = Schema.Schema.Type export const TextDelta = Schema.Struct({ - type: Schema.Literal("text-delta"), - id: Schema.optional(Schema.String), + type: Schema.tag("text-delta"), + id: ContentBlockID, text: Schema.String, - providerMetadata: Schema.optional(ProviderMetadata), }).annotate({ identifier: "LLM.Event.TextDelta" }) export type TextDelta = Schema.Schema.Type export const TextEnd = Schema.Struct({ - type: Schema.Literal("text-end"), - id: Schema.String, + type: Schema.tag("text-end"), + id: ContentBlockID, providerMetadata: Schema.optional(ProviderMetadata), }).annotate({ identifier: "LLM.Event.TextEnd" }) export type TextEnd = Schema.Schema.Type -export const ReasoningDelta = Schema.Struct({ - type: Schema.Literal("reasoning-delta"), - id: Schema.optional(Schema.String), - text: Schema.String, +export const ReasoningStart = Schema.Struct({ + type: Schema.tag("reasoning-start"), + id: ContentBlockID, providerMetadata: Schema.optional(ProviderMetadata), +}).annotate({ identifier: "LLM.Event.ReasoningStart" }) +export type ReasoningStart = Schema.Schema.Type + +export const ReasoningDelta = Schema.Struct({ + type: Schema.tag("reasoning-delta"), + id: ContentBlockID, + text: Schema.String, }).annotate({ identifier: "LLM.Event.ReasoningDelta" }) export type ReasoningDelta = Schema.Schema.Type +export const ReasoningEnd = Schema.Struct({ + type: Schema.tag("reasoning-end"), + id: ContentBlockID, + providerMetadata: Schema.optional(ProviderMetadata), +}).annotate({ identifier: "LLM.Event.ReasoningEnd" }) +export type ReasoningEnd = Schema.Schema.Type + +export const ToolInputStart = Schema.Struct({ + type: Schema.tag("tool-input-start"), + id: ToolCallID, + name: Schema.String, + providerMetadata: Schema.optional(ProviderMetadata), +}).annotate({ identifier: "LLM.Event.ToolInputStart" }) +export type ToolInputStart = Schema.Schema.Type + export const ToolInputDelta = Schema.Struct({ - type: Schema.Literal("tool-input-delta"), - id: Schema.String, + type: Schema.tag("tool-input-delta"), + id: ToolCallID, name: Schema.String, text: Schema.String, - providerMetadata: Schema.optional(ProviderMetadata), }).annotate({ identifier: "LLM.Event.ToolInputDelta" }) export type ToolInputDelta = Schema.Schema.Type +export const ToolInputEnd = Schema.Struct({ + type: Schema.tag("tool-input-end"), + id: ToolCallID, + name: Schema.String, + providerMetadata: Schema.optional(ProviderMetadata), +}).annotate({ identifier: "LLM.Event.ToolInputEnd" }) +export type ToolInputEnd = Schema.Schema.Type + export const ToolCall = Schema.Struct({ - type: Schema.Literal("tool-call"), - id: Schema.String, + type: Schema.tag("tool-call"), + id: ToolCallID, name: Schema.String, input: Schema.Unknown, providerExecuted: Schema.optional(Schema.Boolean), @@ -76,8 +103,8 @@ export const ToolCall = Schema.Struct({ export type ToolCall = Schema.Schema.Type export const ToolResult = Schema.Struct({ - type: Schema.Literal("tool-result"), - id: Schema.String, + type: Schema.tag("tool-result"), + id: ToolCallID, name: Schema.String, result: ToolResultValue, providerExecuted: Schema.optional(Schema.Boolean), @@ -86,8 +113,8 @@ export const ToolResult = Schema.Struct({ export type ToolResult = Schema.Schema.Type export const ToolError = Schema.Struct({ - type: Schema.Literal("tool-error"), - id: Schema.String, + type: Schema.tag("tool-error"), + id: ToolCallID, name: Schema.String, message: Schema.String, providerMetadata: Schema.optional(ProviderMetadata), @@ -95,7 +122,7 @@ export const ToolError = Schema.Struct({ export type ToolError = Schema.Schema.Type export const StepFinish = Schema.Struct({ - type: Schema.Literal("step-finish"), + type: Schema.tag("step-finish"), index: Schema.Number, reason: FinishReason, usage: Schema.optional(Usage), @@ -104,7 +131,7 @@ export const StepFinish = Schema.Struct({ export type StepFinish = Schema.Schema.Type export const RequestFinish = Schema.Struct({ - type: Schema.Literal("request-finish"), + type: Schema.tag("request-finish"), reason: FinishReason, usage: Schema.optional(Usage), providerMetadata: Schema.optional(ProviderMetadata), @@ -112,7 +139,7 @@ export const RequestFinish = Schema.Struct({ export type RequestFinish = Schema.Schema.Type export const ProviderErrorEvent = Schema.Struct({ - type: Schema.Literal("provider-error"), + type: Schema.tag("provider-error"), message: Schema.String, retryable: Schema.optional(Schema.Boolean), providerMetadata: Schema.optional(ProviderMetadata), @@ -125,8 +152,12 @@ const llmEventTagged = Schema.Union([ TextStart, TextDelta, TextEnd, + ReasoningStart, ReasoningDelta, + ReasoningEnd, + ToolInputStart, ToolInputDelta, + ToolInputEnd, ToolCall, ToolResult, ToolError, @@ -135,20 +166,52 @@ const llmEventTagged = Schema.Union([ ProviderErrorEvent, ]).pipe(Schema.toTaggedUnion("type")) +type WithID = Omit & { readonly id: ID | string } + +const responseID = (value: ResponseID | string) => ResponseID.make(value) +const contentBlockID = (value: ContentBlockID | string) => ContentBlockID.make(value) +const toolCallID = (value: ToolCallID | string) => ToolCallID.make(value) + /** * camelCase aliases for `LLMEvent.guards` (provided by `Schema.toTaggedUnion`). * Lets consumers write `events.filter(LLMEvent.is.toolCall)` instead of * `events.filter(LLMEvent.guards["tool-call"])`. */ export const LLMEvent = Object.assign(llmEventTagged, { + requestStart: (input: WithID) => RequestStart.make({ ...input, id: responseID(input.id) }), + stepStart: StepStart.make, + textStart: (input: WithID) => TextStart.make({ ...input, id: contentBlockID(input.id) }), + textDelta: (input: WithID) => TextDelta.make({ ...input, id: contentBlockID(input.id) }), + textEnd: (input: WithID) => TextEnd.make({ ...input, id: contentBlockID(input.id) }), + reasoningStart: (input: WithID) => + ReasoningStart.make({ ...input, id: contentBlockID(input.id) }), + reasoningDelta: (input: WithID) => + ReasoningDelta.make({ ...input, id: contentBlockID(input.id) }), + reasoningEnd: (input: WithID) => + ReasoningEnd.make({ ...input, id: contentBlockID(input.id) }), + toolInputStart: (input: WithID) => + ToolInputStart.make({ ...input, id: toolCallID(input.id) }), + toolInputDelta: (input: WithID) => + ToolInputDelta.make({ ...input, id: toolCallID(input.id) }), + toolInputEnd: (input: WithID) => ToolInputEnd.make({ ...input, id: toolCallID(input.id) }), + toolCall: (input: WithID) => ToolCall.make({ ...input, id: toolCallID(input.id) }), + toolResult: (input: WithID) => ToolResult.make({ ...input, id: toolCallID(input.id) }), + toolError: (input: WithID) => ToolError.make({ ...input, id: toolCallID(input.id) }), + stepFinish: StepFinish.make, + requestFinish: RequestFinish.make, + providerError: ProviderErrorEvent.make, is: { requestStart: llmEventTagged.guards["request-start"], stepStart: llmEventTagged.guards["step-start"], textStart: llmEventTagged.guards["text-start"], textDelta: llmEventTagged.guards["text-delta"], textEnd: llmEventTagged.guards["text-end"], + reasoningStart: llmEventTagged.guards["reasoning-start"], reasoningDelta: llmEventTagged.guards["reasoning-delta"], + reasoningEnd: llmEventTagged.guards["reasoning-end"], + toolInputStart: llmEventTagged.guards["tool-input-start"], toolInputDelta: llmEventTagged.guards["tool-input-delta"], + toolInputEnd: llmEventTagged.guards["tool-input-end"], toolCall: llmEventTagged.guards["tool-call"], toolResult: llmEventTagged.guards["tool-result"], toolError: llmEventTagged.guards["tool-error"], diff --git a/packages/llm/src/schema/ids.ts b/packages/llm/src/schema/ids.ts index 9261842770..ada133f0db 100644 --- a/packages/llm/src/schema/ids.ts +++ b/packages/llm/src/schema/ids.ts @@ -14,6 +14,15 @@ export type ModelID = typeof ModelID.Type export const ProviderID = Schema.String.pipe(Schema.brand("LLM.ProviderID")) export type ProviderID = typeof ProviderID.Type +export const ResponseID = Schema.String +export type ResponseID = Schema.Schema.Type + +export const ContentBlockID = Schema.String +export type ContentBlockID = Schema.Schema.Type + +export const ToolCallID = Schema.String +export type ToolCallID = Schema.Schema.Type + export const ReasoningEfforts = ["none", "minimal", "low", "medium", "high", "xhigh", "max"] as const export const ReasoningEffort = Schema.Literals(ReasoningEfforts) export type ReasoningEffort = Schema.Schema.Type diff --git a/packages/llm/src/tool-runtime.ts b/packages/llm/src/tool-runtime.ts index 20e27379bd..c6e716d45e 100644 --- a/packages/llm/src/tool-runtime.ts +++ b/packages/llm/src/tool-runtime.ts @@ -4,7 +4,7 @@ import { type ContentPart, type FinishReason, type LLMError, - type LLMEvent, + LLMEvent, LLMRequest, Message, type ProviderMetadata, @@ -115,11 +115,19 @@ interface StepState { const accumulate = (state: StepState, event: LLMEvent) => { if (event.type === "text-delta") { - appendStreamingText(state, "text", event.text, event.providerMetadata) + appendStreamingText(state, "text", event.text, undefined) return } if (event.type === "reasoning-delta") { - appendStreamingText(state, "reasoning", event.text, event.providerMetadata) + appendStreamingText(state, "reasoning", event.text, undefined) + return + } + if (event.type === "reasoning-end") { + appendStreamingText(state, "reasoning", "", event.providerMetadata) + return + } + if (event.type === "text-end") { + appendStreamingText(state, "text", "", event.providerMetadata) return } if (event.type === "tool-call") { @@ -219,10 +227,10 @@ const decodeAndExecute = (tool: AnyTool, input: unknown): Effect.Effect => result.type === "error" ? [ - { type: "tool-error", id: call.id, name: call.name, message: String(result.value) }, - { type: "tool-result", id: call.id, name: call.name, result }, + LLMEvent.toolError({ id: call.id, name: call.name, message: String(result.value) }), + LLMEvent.toolResult({ id: call.id, name: call.name, result }), ] - : [{ type: "tool-result", id: call.id, name: call.name, result }] + : [LLMEvent.toolResult({ id: call.id, name: call.name, result })] const followUpRequest = ( request: LLMRequest, diff --git a/packages/llm/test/adapter.test.ts b/packages/llm/test/adapter.test.ts index 191b8529c0..5ac8b9d818 100644 --- a/packages/llm/test/adapter.test.ts +++ b/packages/llm/test/adapter.test.ts @@ -50,7 +50,9 @@ const request = LLM.request({ }) const raiseEvent = (event: FakeEvent): import("../src/schema").LLMEvent => - event.type === "finish" ? { type: "request-finish", reason: event.reason } : { type: "text-delta", text: event.text } + event.type === "finish" + ? { type: "request-finish", reason: event.reason } + : { type: "text-delta", id: "text-0", text: event.text } const fakeProtocol = Protocol.make({ id: "fake", diff --git a/packages/llm/test/llm.test.ts b/packages/llm/test/llm.test.ts index 9380e554bf..e9ef58afa8 100644 --- a/packages/llm/test/llm.test.ts +++ b/packages/llm/test/llm.test.ts @@ -126,7 +126,7 @@ describe("llm constructors", () => { expect( LLMResponse.text({ events: [ - { type: "text-delta", text: "hi" }, + { type: "text-delta", id: "text-0", text: "hi" }, { type: "request-finish", reason: "stop" }, ], }), diff --git a/packages/llm/test/provider/anthropic-messages.test.ts b/packages/llm/test/provider/anthropic-messages.test.ts index 263828a0ad..85900a1143 100644 --- a/packages/llm/test/provider/anthropic-messages.test.ts +++ b/packages/llm/test/provider/anthropic-messages.test.ts @@ -115,7 +115,7 @@ describe("Anthropic Messages route", () => { cacheReadInputTokens: 1, totalTokens: 7, }) - expect(response.events.find((event) => event.type === "reasoning-delta" && event.text === "")).toMatchObject({ + expect(response.events.find((event) => event.type === "reasoning-end")).toMatchObject({ providerMetadata: { anthropic: { signature: "sig_1" } }, }) expect(response.events.at(-1)).toMatchObject({ diff --git a/packages/llm/test/provider/gemini.test.ts b/packages/llm/test/provider/gemini.test.ts index a80ab740c3..9de4e0dc25 100644 --- a/packages/llm/test/provider/gemini.test.ts +++ b/packages/llm/test/provider/gemini.test.ts @@ -204,9 +204,9 @@ describe("Gemini route", () => { totalTokens: 7, }) expect(response.events).toEqual([ - { type: "reasoning-delta", text: "thinking" }, - { type: "text-delta", text: "Hello" }, - { type: "text-delta", text: "!" }, + { type: "reasoning-delta", id: "reasoning-0", text: "thinking" }, + { type: "text-delta", id: "text-0", text: "Hello" }, + { type: "text-delta", id: "text-0", text: "!" }, { type: "request-finish", reason: "stop", diff --git a/packages/llm/test/provider/openai-chat.test.ts b/packages/llm/test/provider/openai-chat.test.ts index 0998401094..8b0dfc2894 100644 --- a/packages/llm/test/provider/openai-chat.test.ts +++ b/packages/llm/test/provider/openai-chat.test.ts @@ -225,8 +225,8 @@ describe("OpenAI Chat route", () => { expect(response.text).toBe("Hello!") expect(response.events).toEqual([ - { type: "text-delta", text: "Hello" }, - { type: "text-delta", text: "!" }, + { type: "text-delta", id: "text-0", text: "Hello" }, + { type: "text-delta", id: "text-0", text: "!" }, { type: "request-finish", reason: "stop", diff --git a/packages/llm/test/provider/openai-responses.test.ts b/packages/llm/test/provider/openai-responses.test.ts index 30add06d83..5141b44cc2 100644 --- a/packages/llm/test/provider/openai-responses.test.ts +++ b/packages/llm/test/provider/openai-responses.test.ts @@ -336,8 +336,8 @@ describe("OpenAI Responses route", () => { expect(response.text).toBe("Hello!") expect(response.events).toEqual([ - { type: "text-delta", id: "msg_1", text: "Hello", providerMetadata: { openai: { itemId: "msg_1" } } }, - { type: "text-delta", id: "msg_1", text: "!", providerMetadata: { openai: { itemId: "msg_1" } } }, + { type: "text-delta", id: "msg_1", text: "Hello" }, + { type: "text-delta", id: "msg_1", text: "!" }, { type: "request-finish", reason: "stop", @@ -394,14 +394,12 @@ describe("OpenAI Responses route", () => { id: "call_1", name: "lookup", text: '{"query"', - providerMetadata: { openai: { itemId: "item_1" } }, }, { type: "tool-input-delta", id: "call_1", name: "lookup", text: ':"weather"}', - providerMetadata: { openai: { itemId: "item_1" } }, }, { type: "tool-call",