mirror of
https://github.com/browseros-ai/BrowserOS.git
synced 2026-05-14 08:03:58 +00:00
Compare commits
17 Commits
fix/browse
...
feat/click
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f7e2098c4 | ||
|
|
283f76eba7 | ||
|
|
758445fbbe | ||
|
|
9031062676 | ||
|
|
0eddab8499 | ||
|
|
a62f9771a2 | ||
|
|
0816e217e8 | ||
|
|
fc2f669445 | ||
|
|
daeb33fc6c | ||
|
|
b444afa117 | ||
|
|
3c66cbabae | ||
|
|
940ef6dbd5 | ||
|
|
3bc399f94c | ||
|
|
7765d99c73 | ||
|
|
db5e55a174 | ||
|
|
fbae45eb97 | ||
|
|
554fcd7c06 |
@@ -1,5 +1,5 @@
|
||||
import { ArrowLeft } from 'lucide-react'
|
||||
import { type FC, useEffect, useMemo, useRef } from 'react'
|
||||
import { ArrowLeft, PanelRight } from 'lucide-react'
|
||||
import { type FC, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { Navigate, useNavigate, useParams, useSearchParams } from 'react-router'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import type {
|
||||
@@ -16,8 +16,14 @@ import {
|
||||
useUpdateHarnessAgent,
|
||||
} from '@/entrypoints/app/agents/useAgents'
|
||||
import type { AgentEntry } from '@/entrypoints/app/agents/useOpenClaw'
|
||||
import { type ProducedFilesRailGroup, useAgentOutputs } from '@/lib/agent-files'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { AgentRail } from './AgentRail'
|
||||
import { useAgentCommandData } from './agent-command-layout'
|
||||
import {
|
||||
OutputsRail,
|
||||
useOutputsRailOpen,
|
||||
} from './agent-conversation.outputs-rail'
|
||||
import { ClawChat } from './ClawChat'
|
||||
import { ConversationHeader } from './ConversationHeader'
|
||||
import { ConversationInput } from './ConversationInput'
|
||||
@@ -25,6 +31,8 @@ import {
|
||||
buildChatHistoryFromClawMessages,
|
||||
filterTurnsPersistedInHistory,
|
||||
flattenHistoryPages,
|
||||
mapHistoryToProducedFilesGroups,
|
||||
selectStripOnlyTurns,
|
||||
} from './claw-chat-types'
|
||||
import { consumePendingInitialMessage } from './pending-initial-message'
|
||||
import { QueuePanel } from './QueuePanel'
|
||||
@@ -38,6 +46,7 @@ function AgentConversationController({
|
||||
agents,
|
||||
agentPathPrefix,
|
||||
createAgentPath,
|
||||
onOpenOutputsRail,
|
||||
}: {
|
||||
agentId: string
|
||||
initialMessage: string | null
|
||||
@@ -45,6 +54,7 @@ function AgentConversationController({
|
||||
agents: AgentEntry[]
|
||||
agentPathPrefix: string
|
||||
createAgentPath: string
|
||||
onOpenOutputsRail?: ((turnId?: string | null) => void) | null
|
||||
}) {
|
||||
const navigate = useNavigate()
|
||||
const initialMessageSentRef = useRef<string | null>(null)
|
||||
@@ -76,6 +86,15 @@ function AgentConversationController({
|
||||
const harnessAgent = harnessAgents.find((entry) => entry.id === agentId)
|
||||
const queue = harnessAgent?.queue ?? []
|
||||
const activeTurnId = harnessAgent?.activeTurnId ?? null
|
||||
const isOpenClawAgent = harnessAgent?.adapter === 'openclaw'
|
||||
|
||||
// Used to surface produced-files strips on a fresh page load
|
||||
// when there's no optimistic turn to carry the data. Disabled
|
||||
// for non-openclaw adapters since they don't attribute files.
|
||||
const { groups: agentOutputGroups } = useAgentOutputs(
|
||||
agentId,
|
||||
isOpenClawAgent,
|
||||
)
|
||||
|
||||
const { turns, streaming, send } = useAgentConversation(agentId, {
|
||||
runtime: 'agent-harness',
|
||||
@@ -100,6 +119,44 @@ function AgentConversationController({
|
||||
() => filterTurnsPersistedInHistory(turns, historyMessages),
|
||||
[historyMessages, turns],
|
||||
)
|
||||
// Persisted turns that still need to surface their FileCardStrip
|
||||
// — history items don't carry produced-files data, so without
|
||||
// these the strip would vanish on history reload.
|
||||
const stripOnlyTurns = useMemo(
|
||||
() => selectStripOnlyTurns(turns, historyMessages),
|
||||
[historyMessages, turns],
|
||||
)
|
||||
// Two outputs from the per-turn matcher:
|
||||
// - filesByAssistantId → strip rendered directly under the
|
||||
// matching assistant history bubble.
|
||||
// - tailUnmatched → groups with no history pair (orphans);
|
||||
// rendered at the conversation tail.
|
||||
// Both are filtered to exclude turnIds already covered by a
|
||||
// live or strip-only optimistic turn (those carry their own
|
||||
// strip and history hasn't reloaded yet).
|
||||
const { filesByAssistantId, tailStripGroups } = useMemo(() => {
|
||||
if (!isOpenClawAgent) {
|
||||
return {
|
||||
filesByAssistantId: new Map<string, ProducedFilesRailGroup>(),
|
||||
tailStripGroups: [] as ProducedFilesRailGroup[],
|
||||
}
|
||||
}
|
||||
const coveredTurnIds = new Set<string>()
|
||||
for (const turn of turns) {
|
||||
if (turn.turnId) coveredTurnIds.add(turn.turnId)
|
||||
}
|
||||
const eligibleGroups = agentOutputGroups.filter(
|
||||
(group) => !coveredTurnIds.has(group.turnId),
|
||||
)
|
||||
const { byAssistantMessageId, unmatched } = mapHistoryToProducedFilesGroups(
|
||||
historyMessages,
|
||||
eligibleGroups,
|
||||
)
|
||||
return {
|
||||
filesByAssistantId: byAssistantMessageId,
|
||||
tailStripGroups: unmatched,
|
||||
}
|
||||
}, [agentOutputGroups, isOpenClawAgent, historyMessages, turns])
|
||||
onInitialMessageConsumedRef.current = onInitialMessageConsumed
|
||||
|
||||
const disabled = !agent
|
||||
@@ -171,12 +228,16 @@ function AgentConversationController({
|
||||
agentName={agentName}
|
||||
historyMessages={historyMessages}
|
||||
turns={visibleTurns}
|
||||
stripOnlyTurns={stripOnlyTurns}
|
||||
filesByAssistantId={filesByAssistantId}
|
||||
tailStripGroups={tailStripGroups}
|
||||
streaming={streaming}
|
||||
isInitialLoading={harnessHistoryQuery.isLoading}
|
||||
error={error}
|
||||
hasNextPage={false}
|
||||
isFetchingNextPage={false}
|
||||
onFetchNextPage={() => {}}
|
||||
onOpenOutputsRail={onOpenOutputsRail}
|
||||
onRetry={() => {
|
||||
void harnessHistoryQuery.refetch()
|
||||
}}
|
||||
@@ -287,6 +348,45 @@ export const AgentCommandConversation: FC<AgentCommandConversationProps> = ({
|
||||
const isPageVariant = variant === 'page'
|
||||
const backLabel = isPageVariant ? 'Back to agents' : 'Back to home'
|
||||
|
||||
const isOpenClawAgent = harnessAgent?.adapter === 'openclaw'
|
||||
const [outputsRailOpen, setOutputsRailOpen] =
|
||||
useOutputsRailOpen(resolvedAgentId)
|
||||
const railVisible = isOpenClawAgent && outputsRailOpen
|
||||
|
||||
// Deep-link target for the rail. Set when (a) the user clicks
|
||||
// View / +N on an inline file-card strip, or (b) an external nav
|
||||
// arrived with `?outputsTurn=<turnId>`. Cleared by the rail
|
||||
// itself once it has scrolled to + expanded the matching group.
|
||||
const urlOutputsTurn = searchParams.get('outputsTurn')
|
||||
const [focusTurnId, setFocusTurnId] = useState<string | null>(urlOutputsTurn)
|
||||
// If the URL param flips while we're already on this agent, sync.
|
||||
useEffect(() => {
|
||||
if (!urlOutputsTurn) return
|
||||
setFocusTurnId(urlOutputsTurn)
|
||||
if (isOpenClawAgent) setOutputsRailOpen(true)
|
||||
}, [urlOutputsTurn, isOpenClawAgent, setOutputsRailOpen])
|
||||
|
||||
const handleOpenOutputsRail = (turnId?: string | null) => {
|
||||
if (!isOpenClawAgent) return
|
||||
setOutputsRailOpen(true)
|
||||
setFocusTurnId(turnId ?? null)
|
||||
}
|
||||
const handleFocusTurnConsumed = () => {
|
||||
setFocusTurnId(null)
|
||||
if (urlOutputsTurn) {
|
||||
// Drop the URL param so a back-nav doesn't re-trigger the
|
||||
// scroll. `replace: true` keeps history clean.
|
||||
setSearchParams(
|
||||
(prev) => {
|
||||
const next = new URLSearchParams(prev)
|
||||
next.delete('outputsTurn')
|
||||
return next
|
||||
},
|
||||
{ replace: true },
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const adapterHealth = useMemo<AgentAdapterHealth | null>(() => {
|
||||
const adapterId = harnessAgent?.adapter
|
||||
if (!adapterId) return null
|
||||
@@ -346,13 +446,34 @@ export const AgentCommandConversation: FC<AgentCommandConversationProps> = ({
|
||||
onPinToggle={(next) =>
|
||||
handlePinToggle(harnessAgent ?? null, next)
|
||||
}
|
||||
headerExtra={
|
||||
isOpenClawAgent ? (
|
||||
<Button
|
||||
variant={railVisible ? 'secondary' : 'ghost'}
|
||||
size="icon"
|
||||
className="size-8 rounded-xl"
|
||||
onClick={() => setOutputsRailOpen(!railVisible)}
|
||||
title={railVisible ? 'Hide outputs' : 'Show outputs'}
|
||||
>
|
||||
<PanelRight className="size-4" />
|
||||
</Button>
|
||||
) : undefined
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Body grid: rail list + chat. Both columns share the same
|
||||
top edge (the band above) so headers can never drift. */}
|
||||
<div className="grid min-h-0 flex-1 grid-rows-[minmax(0,1fr)] lg:grid-cols-[288px_minmax(0,1fr)]">
|
||||
{/* Body grid: rail list + chat (+ outputs rail when an
|
||||
openclaw agent has it open). Columns share the same top
|
||||
edge as the band above so headers can never drift. */}
|
||||
<div
|
||||
className={cn(
|
||||
'grid min-h-0 flex-1 grid-rows-[minmax(0,1fr)]',
|
||||
railVisible
|
||||
? 'lg:grid-cols-[288px_minmax(0,1fr)_320px]'
|
||||
: 'lg:grid-cols-[288px_minmax(0,1fr)]',
|
||||
)}
|
||||
>
|
||||
<AgentRail
|
||||
agents={harnessAgents}
|
||||
adapters={adapters}
|
||||
@@ -367,13 +488,34 @@ export const AgentCommandConversation: FC<AgentCommandConversationProps> = ({
|
||||
agentId={resolvedAgentId}
|
||||
agents={agents}
|
||||
initialMessage={initialMessage}
|
||||
onInitialMessageConsumed={() =>
|
||||
setSearchParams({}, { replace: true })
|
||||
}
|
||||
onInitialMessageConsumed={() => {
|
||||
// Preserve the outputsTurn deep-link if present —
|
||||
// dropping all params would erase the rail focus
|
||||
// before it had a chance to consume.
|
||||
setSearchParams(
|
||||
(prev) => {
|
||||
const next = new URLSearchParams()
|
||||
const turn = prev.get('outputsTurn')
|
||||
if (turn) next.set('outputsTurn', turn)
|
||||
return next
|
||||
},
|
||||
{ replace: true },
|
||||
)
|
||||
}}
|
||||
agentPathPrefix={agentPathPrefix}
|
||||
createAgentPath={createAgentPath}
|
||||
onOpenOutputsRail={isOpenClawAgent ? handleOpenOutputsRail : null}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{railVisible ? (
|
||||
<OutputsRail
|
||||
agentId={resolvedAgentId}
|
||||
onClose={() => setOutputsRailOpen(false)}
|
||||
focusTurnId={focusTurnId}
|
||||
onFocusTurnConsumed={handleFocusTurnConsumed}
|
||||
/>
|
||||
) : null}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -162,12 +162,16 @@ export const AgentCommandHome: FC = () => {
|
||||
<>
|
||||
<div className="flex flex-col items-center gap-5 pt-[max(10vh,24px)] text-center">
|
||||
<div className="space-y-3">
|
||||
<h1 className="font-semibold text-[clamp(2rem,4vw,3.25rem)] leading-tight tracking-tight">
|
||||
What should your agent work on next?
|
||||
<h1 className="font-semibold text-[clamp(2.25rem,4.5vw,3.5rem)] leading-[1.08] tracking-[-0.025em] [text-wrap:balance]">
|
||||
What should your agent{' '}
|
||||
<span className="font-medium text-[var(--accent-orange)] italic">
|
||||
work on
|
||||
</span>{' '}
|
||||
next?
|
||||
</h1>
|
||||
<p className="mx-auto max-w-2xl text-muted-foreground text-sm leading-6">
|
||||
Start with a task, continue a thread, or switch to another
|
||||
agent without leaving the new tab.
|
||||
<p className="mx-auto max-w-2xl text-muted-foreground text-sm leading-6 [text-wrap:pretty]">
|
||||
Start a task, continue a thread, or hand off to a different
|
||||
agent — all without leaving this tab.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -27,6 +27,14 @@ interface AgentSelectorProps {
|
||||
onSelectAgent: (agent: AgentEntry) => void
|
||||
onCreateAgent?: () => void
|
||||
status?: string
|
||||
/**
|
||||
* `'pill'` renders the filled-pill variant used by the calm
|
||||
* composer on `/home` — bordered, slightly elevated background,
|
||||
* mono agent name, used as the visual anchor on the left of the
|
||||
* footer chip row. Default `'ghost'` keeps the existing flat
|
||||
* shadcn ghost-button trigger used by the chat surface.
|
||||
*/
|
||||
triggerVariant?: 'ghost' | 'pill'
|
||||
}
|
||||
|
||||
function getStatusDot(status?: string) {
|
||||
@@ -42,31 +50,49 @@ export const AgentSelector: FC<AgentSelectorProps> = ({
|
||||
onSelectAgent,
|
||||
onCreateAgent,
|
||||
status,
|
||||
triggerVariant = 'ghost',
|
||||
}) => {
|
||||
const [open, setOpen] = useState(false)
|
||||
const selectedAgent = agents.find(
|
||||
(agent) => agent.agentId === selectedAgentId,
|
||||
)
|
||||
|
||||
const triggerNode =
|
||||
triggerVariant === 'pill' ? (
|
||||
<button
|
||||
type="button"
|
||||
className={cn(
|
||||
'inline-flex h-6 max-w-[180px] items-center gap-1.5 rounded-full border border-border bg-accent/40 pr-2 pl-2.5 text-[11.5px] text-foreground transition-colors',
|
||||
'hover:border-border hover:bg-accent/70 data-[state=open]:border-border data-[state=open]:bg-accent/70',
|
||||
)}
|
||||
>
|
||||
<span className={cn('size-1.5 rounded-full', getStatusDot(status))} />
|
||||
<span className="truncate font-medium font-mono text-[11.5px] tracking-[-0.01em]">
|
||||
{selectedAgent?.name ?? 'Select agent'}
|
||||
</span>
|
||||
<ChevronDown className="size-3 shrink-0 text-muted-foreground" />
|
||||
</button>
|
||||
) : (
|
||||
<Button
|
||||
variant="ghost"
|
||||
className={cn(
|
||||
'flex items-center gap-2 rounded-lg px-3 py-1.5 font-medium text-sm transition-all',
|
||||
'bg-transparent text-muted-foreground hover:bg-accent hover:text-accent-foreground',
|
||||
'data-[state=open]:bg-accent',
|
||||
)}
|
||||
>
|
||||
<Bot className="h-4 w-4" />
|
||||
<span className={cn('size-2 rounded-full', getStatusDot(status))} />
|
||||
<span className="max-w-32 truncate">
|
||||
{selectedAgent?.name ?? 'Select agent'}
|
||||
</span>
|
||||
<ChevronDown className="h-3 w-3" />
|
||||
</Button>
|
||||
)
|
||||
|
||||
return (
|
||||
<Popover open={open} onOpenChange={setOpen}>
|
||||
<PopoverTrigger asChild>
|
||||
<Button
|
||||
variant="ghost"
|
||||
className={cn(
|
||||
'flex items-center gap-2 rounded-lg px-3 py-1.5 font-medium text-sm transition-all',
|
||||
'bg-transparent text-muted-foreground hover:bg-accent hover:text-accent-foreground',
|
||||
'data-[state=open]:bg-accent',
|
||||
)}
|
||||
>
|
||||
<Bot className="h-4 w-4" />
|
||||
<span className={cn('size-2 rounded-full', getStatusDot(status))} />
|
||||
<span className="max-w-32 truncate">
|
||||
{selectedAgent?.name ?? 'Select agent'}
|
||||
</span>
|
||||
<ChevronDown className="h-3 w-3" />
|
||||
</Button>
|
||||
</PopoverTrigger>
|
||||
<PopoverTrigger asChild>{triggerNode}</PopoverTrigger>
|
||||
<PopoverContent side="bottom" align="start" className="w-72 p-0">
|
||||
<Command>
|
||||
<CommandInput placeholder="Search agents..." className="h-9" />
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
import { Bot, Loader2, RefreshCw } from 'lucide-react'
|
||||
import { type FC, useEffect, useRef } from 'react'
|
||||
import { type FC, Fragment, useEffect, useRef } from 'react'
|
||||
import {
|
||||
Conversation,
|
||||
ConversationContent,
|
||||
ConversationScrollButton,
|
||||
} from '@/components/ai-elements/conversation'
|
||||
import type { AgentConversationTurn } from '@/lib/agent-conversations/types'
|
||||
import type { ProducedFilesRailGroup } from '@/lib/agent-files'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { FileCardStrip } from './agent-conversation.file-card-strip'
|
||||
import { ClawChatMessage } from './ClawChatMessage'
|
||||
import { ConversationMessage } from './ConversationMessage'
|
||||
import type { ClawChatMessage as ClawChatMessageModel } from './claw-chat-types'
|
||||
@@ -15,6 +17,29 @@ interface ClawChatProps {
|
||||
agentName: string
|
||||
historyMessages: ClawChatMessageModel[]
|
||||
turns: AgentConversationTurn[]
|
||||
/**
|
||||
* Persisted turns that still need to render their FileCardStrip
|
||||
* because the history items they were filtered against don't
|
||||
* carry produced-files data. Rendered between history and the
|
||||
* live `turns` so the strip lands at the bottom of the
|
||||
* corresponding assistant turn.
|
||||
*/
|
||||
stripOnlyTurns?: AgentConversationTurn[]
|
||||
/**
|
||||
* Maps each assistant history message id → the produced-files
|
||||
* group that came from its turn. Built by
|
||||
* `mapHistoryToProducedFilesGroups` upstream so the strip
|
||||
* renders directly under the matching message instead of
|
||||
* stacking at the conversation tail.
|
||||
*/
|
||||
filesByAssistantId?: Map<string, ProducedFilesRailGroup>
|
||||
/**
|
||||
* Produced-files groups that didn't match any persisted history
|
||||
* pair (e.g. orphaned turns where history loaded after the
|
||||
* group was attributed). Rendered at the conversation tail as
|
||||
* a fallback so the user can still see them.
|
||||
*/
|
||||
tailStripGroups?: ReadonlyArray<ProducedFilesRailGroup>
|
||||
streaming: boolean
|
||||
isInitialLoading: boolean
|
||||
error: Error | null
|
||||
@@ -22,6 +47,8 @@ interface ClawChatProps {
|
||||
isFetchingNextPage: boolean
|
||||
onFetchNextPage: () => void
|
||||
onRetry: () => void
|
||||
/** Wired through to the inline file-card strip on each assistant turn. */
|
||||
onOpenOutputsRail?: ((turnId?: string | null) => void) | null
|
||||
className?: string
|
||||
}
|
||||
|
||||
@@ -78,6 +105,9 @@ export const ClawChat: FC<ClawChatProps> = ({
|
||||
agentName,
|
||||
historyMessages,
|
||||
turns,
|
||||
stripOnlyTurns,
|
||||
filesByAssistantId,
|
||||
tailStripGroups,
|
||||
streaming,
|
||||
isInitialLoading,
|
||||
error,
|
||||
@@ -85,6 +115,7 @@ export const ClawChat: FC<ClawChatProps> = ({
|
||||
isFetchingNextPage,
|
||||
onFetchNextPage,
|
||||
onRetry,
|
||||
onOpenOutputsRail,
|
||||
className,
|
||||
}) => {
|
||||
const topSentinelRef = useRef<HTMLDivElement>(null)
|
||||
@@ -147,14 +178,44 @@ export const ClawChat: FC<ClawChatProps> = ({
|
||||
Start of conversation
|
||||
</div>
|
||||
) : null}
|
||||
{historyMessages.map((message) => (
|
||||
<ClawChatMessage key={message.id} message={message} />
|
||||
{historyMessages.map((message) => {
|
||||
const matched = filesByAssistantId?.get(message.id)
|
||||
return (
|
||||
<Fragment key={message.id}>
|
||||
<ClawChatMessage message={message} />
|
||||
{matched ? (
|
||||
<FileCardStrip
|
||||
turnId={matched.turnId}
|
||||
files={matched.files}
|
||||
onOpenRail={onOpenOutputsRail ?? (() => {})}
|
||||
/>
|
||||
) : null}
|
||||
</Fragment>
|
||||
)
|
||||
})}
|
||||
{(tailStripGroups ?? []).map((group) => (
|
||||
<FileCardStrip
|
||||
key={`tail-strip-${group.turnId}`}
|
||||
turnId={group.turnId}
|
||||
files={group.files}
|
||||
onOpenRail={onOpenOutputsRail ?? (() => {})}
|
||||
/>
|
||||
))}
|
||||
{(stripOnlyTurns ?? []).map((turn) => (
|
||||
<ConversationMessage
|
||||
key={`strip-${turn.id}`}
|
||||
turn={turn}
|
||||
streaming={false}
|
||||
stripOnly
|
||||
onOpenOutputsRail={onOpenOutputsRail}
|
||||
/>
|
||||
))}
|
||||
{turns.map((turn, index) => (
|
||||
<ConversationMessage
|
||||
key={turn.id}
|
||||
turn={turn}
|
||||
streaming={streaming && index === turns.length - 1}
|
||||
onOpenOutputsRail={onOpenOutputsRail}
|
||||
/>
|
||||
))}
|
||||
{error ? (
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { ArrowLeft, Home } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
import type { FC, ReactNode } from 'react'
|
||||
import { Badge } from '@/components/ui/badge'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { formatRelativeTime } from '@/entrypoints/app/agents/agent-display.helpers'
|
||||
@@ -20,6 +20,8 @@ interface ConversationHeaderProps {
|
||||
backTarget: 'home' | 'page'
|
||||
onGoHome: () => void
|
||||
onPinToggle: (next: boolean) => void
|
||||
/** Optional trailing slot — currently used for the Outputs rail toggle. */
|
||||
headerExtra?: ReactNode
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -40,6 +42,7 @@ export const ConversationHeader: FC<ConversationHeaderProps> = ({
|
||||
backTarget,
|
||||
onGoHome,
|
||||
onPinToggle,
|
||||
headerExtra,
|
||||
}) => {
|
||||
const BackIcon = backTarget === 'home' ? Home : ArrowLeft
|
||||
const adapter = agent?.adapter ?? fallbackAdapter
|
||||
@@ -90,16 +93,21 @@ export const ConversationHeader: FC<ConversationHeaderProps> = ({
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex shrink-0 flex-col items-end gap-1">
|
||||
<StatusPill
|
||||
status={status}
|
||||
hasActiveTurn={Boolean(agent?.activeTurnId)}
|
||||
/>
|
||||
<div className="flex h-4 items-center text-[11px] text-muted-foreground">
|
||||
<span className="truncate">
|
||||
{metaParts.length > 0 ? metaParts.join(' · ') : '\u00A0'}
|
||||
</span>
|
||||
<div className="flex shrink-0 items-center gap-3">
|
||||
<div className="flex shrink-0 flex-col items-end gap-1">
|
||||
<StatusPill
|
||||
status={status}
|
||||
hasActiveTurn={Boolean(agent?.activeTurnId)}
|
||||
/>
|
||||
<div className="flex h-4 items-center text-[11px] text-muted-foreground">
|
||||
<span className="truncate">
|
||||
{metaParts.length > 0 ? metaParts.join(' · ') : '\u00A0'}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
{headerExtra ? (
|
||||
<div className="flex shrink-0 items-center">{headerExtra}</div>
|
||||
) : null}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -164,7 +164,16 @@ function VoiceButton({
|
||||
)
|
||||
}
|
||||
|
||||
function ContextControls({
|
||||
/**
|
||||
* Calm-composer footer shared by both `/home` (`variant="home"`) and
|
||||
* the chat surface at `/agents/:agentId` (`variant="conversation"`).
|
||||
* Pill-shaped chips on an internal dashed divider, with a right-
|
||||
* aligned keyboard hint. The agent selector is conditional via
|
||||
* `showAgentSelector`: home shows it as a filled pill on the left,
|
||||
* the chat surface hides it (the agent is locked once you're in the
|
||||
* conversation).
|
||||
*/
|
||||
function CalmContextControls({
|
||||
agents,
|
||||
onCreateAgent,
|
||||
onSelectAgent,
|
||||
@@ -201,110 +210,128 @@ function ContextControls({
|
||||
)?.is_authenticated
|
||||
})
|
||||
|
||||
const showApps = supports(Feature.MANAGED_MCP_SUPPORT)
|
||||
const showWorkspace = supports(Feature.WORKSPACE_FOLDER_SUPPORT)
|
||||
|
||||
return (
|
||||
<div className="flex items-center justify-between border-border/40 border-t px-4 py-2.5">
|
||||
<div className="flex items-center gap-1">
|
||||
{showAgentSelector ? (
|
||||
<div className="mx-3 flex items-center gap-1 border-border/60 border-t border-dashed py-2">
|
||||
{showAgentSelector ? (
|
||||
<>
|
||||
<AgentSelector
|
||||
agents={agents}
|
||||
selectedAgentId={selectedAgentId}
|
||||
onSelectAgent={onSelectAgent}
|
||||
onCreateAgent={onCreateAgent}
|
||||
status={status}
|
||||
triggerVariant="pill"
|
||||
/>
|
||||
) : null}
|
||||
{supports(Feature.WORKSPACE_FOLDER_SUPPORT) ? (
|
||||
<WorkspaceSelector>
|
||||
<Button
|
||||
variant="ghost"
|
||||
className={cn(
|
||||
'flex items-center gap-2 rounded-lg px-3 py-1.5 font-medium text-sm transition-all',
|
||||
'bg-transparent text-muted-foreground hover:bg-accent hover:text-accent-foreground',
|
||||
'data-[state=open]:bg-accent',
|
||||
)}
|
||||
>
|
||||
<Folder className="h-4 w-4" />
|
||||
<span>{selectedFolder?.name || 'Add workspace'}</span>
|
||||
<ChevronDown className="h-3 w-3" />
|
||||
</Button>
|
||||
</WorkspaceSelector>
|
||||
) : null}
|
||||
<TabPickerPopover
|
||||
variant="selector"
|
||||
selectedTabs={selectedTabs}
|
||||
onToggleTab={onToggleTab}
|
||||
>
|
||||
<Button
|
||||
className={cn(
|
||||
'flex items-center gap-2 rounded-lg px-3 py-1.5 font-medium text-sm transition-all',
|
||||
selectedTabs.length > 0
|
||||
? 'bg-[var(--accent-orange)]! text-white shadow-sm'
|
||||
: 'bg-transparent text-muted-foreground hover:bg-accent hover:text-accent-foreground',
|
||||
'data-[state=open]:bg-accent',
|
||||
)}
|
||||
<span
|
||||
aria-hidden="true"
|
||||
className="mx-1 inline-block h-3.5 w-px shrink-0 bg-border"
|
||||
/>
|
||||
</>
|
||||
) : null}
|
||||
{showWorkspace ? (
|
||||
<WorkspaceSelector>
|
||||
<button
|
||||
type="button"
|
||||
className="inline-flex h-6 items-center gap-1.5 rounded-full px-2.5 text-[11.5px] text-muted-foreground transition-colors hover:bg-accent hover:text-foreground data-[state=open]:bg-accent data-[state=open]:text-foreground"
|
||||
>
|
||||
<Layers className="h-4 w-4" />
|
||||
<span>Tabs</span>
|
||||
</Button>
|
||||
</TabPickerPopover>
|
||||
<Button
|
||||
<Folder className="size-3" />
|
||||
<span>Workspace</span>
|
||||
<span className="font-mono text-[10.5px] text-muted-foreground/70">
|
||||
{selectedFolder?.name ?? 'none'}
|
||||
</span>
|
||||
</button>
|
||||
</WorkspaceSelector>
|
||||
) : null}
|
||||
<TabPickerPopover
|
||||
variant="selector"
|
||||
selectedTabs={selectedTabs}
|
||||
onToggleTab={onToggleTab}
|
||||
>
|
||||
<button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
onClick={onAttachClick}
|
||||
disabled={attachDisabled || !attachmentsEnabled}
|
||||
title="Attach files"
|
||||
className={cn(
|
||||
'flex items-center gap-2 rounded-lg px-3 py-1.5 font-medium text-sm transition-all',
|
||||
'bg-transparent text-muted-foreground hover:bg-accent hover:text-accent-foreground',
|
||||
'inline-flex h-6 items-center gap-1.5 rounded-full px-2.5 text-[11.5px] transition-colors data-[state=open]:bg-accent data-[state=open]:text-foreground',
|
||||
selectedTabs.length > 0
|
||||
? 'bg-[var(--accent-orange)] text-white hover:bg-[var(--accent-orange)]/90'
|
||||
: 'text-muted-foreground hover:bg-accent hover:text-foreground',
|
||||
)}
|
||||
>
|
||||
<Paperclip className="h-4 w-4" />
|
||||
<span>Attach</span>
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{supports(Feature.MANAGED_MCP_SUPPORT) ? (
|
||||
<div className="ml-auto flex items-center gap-1.5">
|
||||
<AppSelector side="bottom">
|
||||
<Button
|
||||
variant="ghost"
|
||||
className={cn(
|
||||
'flex items-center gap-2 rounded-lg px-3 py-1.5 font-medium text-sm transition-all',
|
||||
'bg-transparent text-muted-foreground hover:bg-accent hover:text-accent-foreground',
|
||||
'data-[state=open]:bg-accent',
|
||||
)}
|
||||
>
|
||||
<div className="flex items-center -space-x-1.5">
|
||||
<Layers className="size-3" />
|
||||
<span>Tabs</span>
|
||||
<span
|
||||
className={cn(
|
||||
'font-mono text-[10.5px]',
|
||||
selectedTabs.length > 0
|
||||
? 'text-white/80'
|
||||
: 'text-muted-foreground/70',
|
||||
)}
|
||||
>
|
||||
{selectedTabs.length}
|
||||
</span>
|
||||
</button>
|
||||
</TabPickerPopover>
|
||||
<button
|
||||
type="button"
|
||||
onClick={onAttachClick}
|
||||
disabled={attachDisabled || !attachmentsEnabled}
|
||||
title="Attach files"
|
||||
className="inline-flex h-6 items-center gap-1.5 rounded-full px-2.5 text-[11.5px] text-muted-foreground transition-colors hover:bg-accent hover:text-foreground disabled:cursor-not-allowed disabled:opacity-50"
|
||||
>
|
||||
<Paperclip className="size-3" />
|
||||
<span>Attach</span>
|
||||
</button>
|
||||
{showApps ? (
|
||||
<AppSelector side="bottom">
|
||||
<button
|
||||
type="button"
|
||||
className="inline-flex h-6 items-center gap-1.5 rounded-full px-2.5 text-[11.5px] text-muted-foreground transition-colors hover:bg-accent hover:text-foreground data-[state=open]:bg-accent data-[state=open]:text-foreground"
|
||||
>
|
||||
{connectedManagedServers.length > 0 ? (
|
||||
<span className="flex items-center -space-x-1.5">
|
||||
{connectedManagedServers.slice(0, 4).map((server) => (
|
||||
<div
|
||||
<span
|
||||
key={server.id}
|
||||
className="rounded-full ring-2 ring-card"
|
||||
>
|
||||
<McpServerIcon
|
||||
serverName={server.managedServerName ?? ''}
|
||||
size={16}
|
||||
size={12}
|
||||
/>
|
||||
</div>
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
{connectedManagedServers.length > 4 ? (
|
||||
<span className="text-xs">
|
||||
+{connectedManagedServers.length - 4}
|
||||
</span>
|
||||
) : null}
|
||||
<span>Apps</span>
|
||||
<ChevronDown className="h-3 w-3" />
|
||||
</Button>
|
||||
</AppSelector>
|
||||
</div>
|
||||
</span>
|
||||
) : (
|
||||
<FileText className="size-3" />
|
||||
)}
|
||||
<span>Apps</span>
|
||||
<ChevronDown className="size-3" />
|
||||
</button>
|
||||
</AppSelector>
|
||||
) : null}
|
||||
<div className="ml-auto inline-flex shrink-0 items-center gap-1.5 text-[11px] text-muted-foreground/70">
|
||||
<kbd className="inline-flex h-4 min-w-4 items-center justify-center rounded border border-border bg-accent/30 px-1 font-mono text-[10px] text-muted-foreground">
|
||||
↵
|
||||
</kbd>
|
||||
<span>to run</span>
|
||||
<span className="text-muted-foreground/40">·</span>
|
||||
<kbd className="inline-flex h-4 min-w-4 items-center justify-center rounded border border-border bg-accent/30 px-1 font-mono text-[10px] text-muted-foreground">
|
||||
⇧
|
||||
</kbd>
|
||||
<kbd className="inline-flex h-4 min-w-4 items-center justify-center rounded border border-border bg-accent/30 px-1 font-mono text-[10px] text-muted-foreground">
|
||||
↵
|
||||
</kbd>
|
||||
<span>new line</span>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function HomeShell({ children }: { children: ReactNode }) {
|
||||
return (
|
||||
<div className="overflow-hidden rounded-[1.55rem] border border-border/60 bg-card/95 shadow-sm">
|
||||
<div className="overflow-hidden rounded-[1.55rem] border border-border/60 bg-card/95 shadow-sm transition-[border-color,box-shadow] duration-150 focus-within:border-[var(--accent-orange)]/40 focus-within:shadow-[0_0_0_4px_color-mix(in_oklch,var(--accent-orange)_15%,transparent),0_1px_2px_rgba(15,23,42,0.04)]">
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
@@ -312,7 +339,7 @@ function HomeShell({ children }: { children: ReactNode }) {
|
||||
|
||||
function ConversationShell({ children }: { children: ReactNode }) {
|
||||
return (
|
||||
<div className="overflow-hidden rounded-[1.35rem] border border-border/50 bg-background/95 shadow-[0_10px_30px_rgba(15,23,42,0.06)] backdrop-blur-md">
|
||||
<div className="overflow-hidden rounded-[1.35rem] border border-border/50 bg-background/95 shadow-[0_10px_30px_rgba(15,23,42,0.06)] backdrop-blur-md transition-[border-color,box-shadow] duration-150 focus-within:border-[var(--accent-orange)]/40 focus-within:shadow-[0_0_0_4px_color-mix(in_oklch,var(--accent-orange)_15%,transparent),0_10px_30px_rgba(15,23,42,0.06)]">
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
@@ -542,7 +569,7 @@ export const ConversationInput: FC<ConversationInputProps> = ({
|
||||
}
|
||||
disabled={disabled || voice.isTranscribing}
|
||||
className={cn(
|
||||
'resize-none border-none bg-transparent px-0 text-[15px] shadow-none focus-visible:ring-0',
|
||||
'resize-none border-none bg-transparent px-0 text-[15px] shadow-none focus-visible:ring-0 dark:bg-transparent',
|
||||
'[field-sizing:fixed]',
|
||||
variant === 'home'
|
||||
? 'min-h-[40px] py-2 leading-6'
|
||||
@@ -583,7 +610,7 @@ export const ConversationInput: FC<ConversationInputProps> = ({
|
||||
{voice.error}
|
||||
</div>
|
||||
) : null}
|
||||
<ContextControls
|
||||
<CalmContextControls
|
||||
agents={agents}
|
||||
onCreateAgent={onCreateAgent}
|
||||
onSelectAgent={onSelectAgent}
|
||||
|
||||
@@ -22,10 +22,26 @@ import type {
|
||||
AgentConversationTurn,
|
||||
ToolEntry,
|
||||
} from '@/lib/agent-conversations/types'
|
||||
import { FileCardStrip } from './agent-conversation.file-card-strip'
|
||||
|
||||
interface ConversationMessageProps {
|
||||
turn: AgentConversationTurn
|
||||
streaming: boolean
|
||||
/**
|
||||
* Forwarded to the inline file-card strip's "View" / "+N"
|
||||
* button. Wired up by AgentCommandConversation so the strip can
|
||||
* deep-link straight into the Outputs rail at the matching turn
|
||||
* group. `null` here disables the strip's deep-link affordance
|
||||
* — the cards still open the preview Sheet directly.
|
||||
*/
|
||||
onOpenOutputsRail?: ((turnId?: string | null) => void) | null
|
||||
/**
|
||||
* Render only the trailing FileCardStrip for this turn — used
|
||||
* when the turn's user / assistant text is already rendered
|
||||
* elsewhere (e.g. by `ClawChatMessage` from persisted history)
|
||||
* but the produced-files affordance would otherwise be lost.
|
||||
*/
|
||||
stripOnly?: boolean
|
||||
}
|
||||
|
||||
interface RenderEntry {
|
||||
@@ -88,9 +104,22 @@ function ToolStatusIcon({ status }: { status: ToolEntry['status'] }) {
|
||||
export const ConversationMessage: FC<ConversationMessageProps> = ({
|
||||
turn,
|
||||
streaming,
|
||||
onOpenOutputsRail,
|
||||
stripOnly,
|
||||
}) => {
|
||||
const entries = useMemo(() => buildRenderEntries(turn), [turn])
|
||||
|
||||
if (stripOnly) {
|
||||
if (!turn.producedFiles || turn.producedFiles.length === 0) return null
|
||||
return (
|
||||
<FileCardStrip
|
||||
turnId={turn.turnId ?? null}
|
||||
files={turn.producedFiles}
|
||||
onOpenRail={onOpenOutputsRail ?? (() => {})}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="space-y-3">
|
||||
<Message from="user">
|
||||
@@ -185,6 +214,14 @@ export const ConversationMessage: FC<ConversationMessageProps> = ({
|
||||
</Message>
|
||||
)}
|
||||
|
||||
{turn.producedFiles && turn.producedFiles.length > 0 ? (
|
||||
<FileCardStrip
|
||||
turnId={turn.turnId ?? null}
|
||||
files={turn.producedFiles}
|
||||
onOpenRail={onOpenOutputsRail ?? (() => {})}
|
||||
/>
|
||||
) : null}
|
||||
|
||||
{!turn.done && turn.parts.length === 0 && streaming && (
|
||||
<div className="flex gap-2">
|
||||
<div className="flex size-7 shrink-0 items-center justify-center rounded-full bg-[var(--accent-orange)] text-white">
|
||||
|
||||
@@ -0,0 +1,124 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* @deprecated Replaced by `FileCardStrip` in
|
||||
* `agent-conversation.file-card-strip.tsx`. Kept temporarily so
|
||||
* any in-flight callers don't fail to import; remove in a
|
||||
* follow-up once nothing external references it.
|
||||
*
|
||||
* Compact "Files produced" card rendered under an assistant turn.
|
||||
*/
|
||||
|
||||
import { FileText, Image as ImageIcon, Paperclip } from 'lucide-react'
|
||||
import { type FC, useMemo, useState } from 'react'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { basenameOf, formatFileSize, inferFileKind } from '@/lib/agent-files'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { FilePreviewSheet } from './agent-conversation.file-preview-sheet'
|
||||
|
||||
export interface ProducedFileLike {
|
||||
id: string
|
||||
path: string
|
||||
size: number
|
||||
}
|
||||
|
||||
interface ArtifactCardProps {
|
||||
files: ReadonlyArray<ProducedFileLike>
|
||||
className?: string
|
||||
}
|
||||
|
||||
const MAX_INLINE_ROWS = 4
|
||||
|
||||
export const ArtifactCard: FC<ArtifactCardProps> = ({ files, className }) => {
|
||||
const [openFileId, setOpenFileId] = useState<string | null>(null)
|
||||
const [expanded, setExpanded] = useState(false)
|
||||
|
||||
const sortedFiles = useMemo(
|
||||
() => [...files].sort((a, b) => a.path.localeCompare(b.path)),
|
||||
[files],
|
||||
)
|
||||
|
||||
if (sortedFiles.length === 0) return null
|
||||
|
||||
const visible = expanded ? sortedFiles : sortedFiles.slice(0, MAX_INLINE_ROWS)
|
||||
const hiddenCount = sortedFiles.length - visible.length
|
||||
const openFile = sortedFiles.find((file) => file.id === openFileId) ?? null
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
'rounded-xl border border-border/60 bg-card/50 px-3 py-2.5',
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<div className="mb-2 flex items-center gap-2 text-muted-foreground text-xs">
|
||||
<Paperclip className="size-3.5" />
|
||||
<span className="font-medium text-foreground">
|
||||
{sortedFiles.length === 1
|
||||
? '1 file produced'
|
||||
: `${sortedFiles.length} files produced`}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<ul className="flex flex-col gap-1">
|
||||
{visible.map((file) => (
|
||||
<li key={file.id}>
|
||||
<ArtifactRow file={file} onOpen={() => setOpenFileId(file.id)} />
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
|
||||
{hiddenCount > 0 ? (
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
className="mt-1.5 h-7 px-2 text-xs"
|
||||
onClick={() => setExpanded(true)}
|
||||
>
|
||||
Show {hiddenCount} more
|
||||
</Button>
|
||||
) : null}
|
||||
|
||||
<FilePreviewSheet
|
||||
fileId={openFile?.id ?? null}
|
||||
filePath={openFile?.path ?? null}
|
||||
open={Boolean(openFileId)}
|
||||
onOpenChange={(next) => {
|
||||
if (!next) setOpenFileId(null)
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function ArtifactRow({
|
||||
file,
|
||||
onOpen,
|
||||
}: {
|
||||
file: ProducedFileLike
|
||||
onOpen: () => void
|
||||
}) {
|
||||
const name = basenameOf(file.path)
|
||||
const kind = inferFileKind(file.path)
|
||||
const Icon = kind === 'image' ? ImageIcon : FileText
|
||||
|
||||
return (
|
||||
<button
|
||||
type="button"
|
||||
onClick={onOpen}
|
||||
className={cn(
|
||||
'flex w-full items-center gap-2 rounded-md px-2 py-1.5 text-left text-sm transition-colors',
|
||||
'hover:bg-accent/60 focus:bg-accent/60 focus:outline-hidden',
|
||||
)}
|
||||
>
|
||||
<Icon className="size-3.5 shrink-0 text-muted-foreground" />
|
||||
<span className="min-w-0 flex-1 truncate font-medium">{name}</span>
|
||||
<span className="shrink-0 text-muted-foreground text-xs tabular-nums">
|
||||
{formatFileSize(file.size)}
|
||||
</span>
|
||||
</button>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,163 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* "Files produced" strip rendered at the bottom of any assistant
|
||||
* turn that produced files (openclaw only). Replaces Phase 5.3's
|
||||
* row-list ArtifactCard with small horizontal cards for a lighter
|
||||
* visual treatment.
|
||||
*
|
||||
* Click semantics:
|
||||
* - Card → opens FilePreviewSheet directly (preview + download).
|
||||
* - View → emits onOpenRail(turnId); the parent opens the rail
|
||||
* and scrolls to the matching turn group.
|
||||
* - +N → same as View (the user is asking to see what was
|
||||
* overflowed).
|
||||
*/
|
||||
|
||||
import { ChevronRight, FileText, Image as ImageIcon } from 'lucide-react'
|
||||
import { type FC, useMemo, useState } from 'react'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { basenameOf, formatFileSize, inferFileKind } from '@/lib/agent-files'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { FilePreviewSheet } from './agent-conversation.file-preview-sheet'
|
||||
|
||||
export interface CardStripFile {
|
||||
id: string
|
||||
path: string
|
||||
size: number
|
||||
}
|
||||
|
||||
interface FileCardStripProps {
|
||||
/**
|
||||
* The turn id that produced these files. Forwarded to
|
||||
* `onOpenRail` so the rail can scroll/expand the matching group.
|
||||
* Optional because the live `produced_files` event lands before
|
||||
* the harness has stamped a server-issued turn id on the
|
||||
* optimistic turn — in that brief window, View falls back to
|
||||
* just opening the rail at the top.
|
||||
*/
|
||||
turnId?: string | null
|
||||
files: ReadonlyArray<CardStripFile>
|
||||
/** Caller wires this to `setOutputsRailOpen(true)` + deep-link. */
|
||||
onOpenRail: (turnId?: string | null) => void
|
||||
className?: string
|
||||
}
|
||||
|
||||
const MAX_VISIBLE = 4
|
||||
|
||||
export const FileCardStrip: FC<FileCardStripProps> = ({
|
||||
turnId,
|
||||
files,
|
||||
onOpenRail,
|
||||
className,
|
||||
}) => {
|
||||
const [openFileId, setOpenFileId] = useState<string | null>(null)
|
||||
|
||||
const sortedFiles = useMemo(
|
||||
() => [...files].sort((a, b) => a.path.localeCompare(b.path)),
|
||||
[files],
|
||||
)
|
||||
|
||||
if (sortedFiles.length === 0) return null
|
||||
|
||||
const visible = sortedFiles.slice(0, MAX_VISIBLE)
|
||||
const hiddenCount = sortedFiles.length - visible.length
|
||||
const openFile = sortedFiles.find((file) => file.id === openFileId) ?? null
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
'rounded-xl border border-border/60 bg-card/50 px-3 py-2.5',
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<div className="mb-2 flex items-center gap-2">
|
||||
<span className="text-muted-foreground text-xs">
|
||||
{sortedFiles.length === 1
|
||||
? 'File produced'
|
||||
: `Files produced (${sortedFiles.length})`}
|
||||
</span>
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
className="ml-auto h-7 gap-1 px-2 text-xs"
|
||||
onClick={() => onOpenRail(turnId ?? null)}
|
||||
>
|
||||
View
|
||||
<ChevronRight className="size-3" />
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{visible.map((file) => (
|
||||
<FileCard
|
||||
key={file.id}
|
||||
file={file}
|
||||
onOpen={() => setOpenFileId(file.id)}
|
||||
/>
|
||||
))}
|
||||
{hiddenCount > 0 ? (
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => onOpenRail(turnId ?? null)}
|
||||
className={cn(
|
||||
'flex h-[56px] min-w-[56px] shrink-0 items-center justify-center rounded-lg border border-border/60 px-3 text-muted-foreground text-xs',
|
||||
'transition-colors hover:border-border hover:bg-accent/40 hover:text-foreground',
|
||||
'focus:outline-hidden focus-visible:ring-2 focus-visible:ring-[var(--accent-orange)]',
|
||||
)}
|
||||
title={`See ${hiddenCount} more in the Outputs rail`}
|
||||
>
|
||||
+{hiddenCount}
|
||||
</button>
|
||||
) : null}
|
||||
</div>
|
||||
|
||||
<FilePreviewSheet
|
||||
fileId={openFile?.id ?? null}
|
||||
filePath={openFile?.path ?? null}
|
||||
open={Boolean(openFileId)}
|
||||
onOpenChange={(next) => {
|
||||
if (!next) setOpenFileId(null)
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function FileCard({
|
||||
file,
|
||||
onOpen,
|
||||
}: {
|
||||
file: CardStripFile
|
||||
onOpen: () => void
|
||||
}) {
|
||||
const name = basenameOf(file.path)
|
||||
const kind = inferFileKind(file.path)
|
||||
const Icon = kind === 'image' ? ImageIcon : FileText
|
||||
|
||||
return (
|
||||
<button
|
||||
type="button"
|
||||
onClick={onOpen}
|
||||
title={file.path}
|
||||
className={cn(
|
||||
'flex h-[56px] w-[140px] shrink-0 flex-col justify-between rounded-lg border border-border/60 bg-background px-2.5 py-1.5 text-left',
|
||||
'transition-colors hover:border-border hover:bg-accent/40',
|
||||
'focus:outline-hidden focus-visible:ring-2 focus-visible:ring-[var(--accent-orange)]',
|
||||
)}
|
||||
>
|
||||
<div className="flex min-w-0 items-center gap-1.5">
|
||||
<Icon className="size-3.5 shrink-0 text-muted-foreground" />
|
||||
<span className="min-w-0 flex-1 truncate font-medium text-xs">
|
||||
{name}
|
||||
</span>
|
||||
</div>
|
||||
<span className="text-[11px] text-muted-foreground tabular-nums">
|
||||
{formatFileSize(file.size)}
|
||||
</span>
|
||||
</button>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,283 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Shared preview drawer used by the inline artifact card AND the
|
||||
* Outputs rail. Branches on the FilePreview discriminated union and
|
||||
* renders the appropriate body. Always opens via a controlled
|
||||
* `open`/`onOpenChange` pair so the parent owns the selected file.
|
||||
*/
|
||||
|
||||
import { Download, FileWarning, Loader2 } from 'lucide-react'
|
||||
import { type FC, useEffect, useMemo, useRef } from 'react'
|
||||
import { toast } from 'sonner'
|
||||
import { MessageResponse } from '@/components/ai-elements/message'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { ScrollArea } from '@/components/ui/scroll-area'
|
||||
import {
|
||||
Sheet,
|
||||
SheetContent,
|
||||
SheetDescription,
|
||||
SheetHeader,
|
||||
SheetTitle,
|
||||
} from '@/components/ui/sheet'
|
||||
import { Skeleton } from '@/components/ui/skeleton'
|
||||
import {
|
||||
basenameOf,
|
||||
buildFileDownloadUrl,
|
||||
extensionOf,
|
||||
type FilePreview,
|
||||
formatFileSize,
|
||||
useFilePreview,
|
||||
} from '@/lib/agent-files'
|
||||
import { useAgentServerUrl } from '@/lib/browseros/useBrowserOSProviders'
|
||||
import { cn } from '@/lib/utils'
|
||||
|
||||
interface FilePreviewSheetProps {
|
||||
fileId: string | null
|
||||
filePath: string | null
|
||||
open: boolean
|
||||
onOpenChange: (open: boolean) => void
|
||||
}
|
||||
|
||||
const MARKDOWN_EXTENSIONS = new Set(['md', 'markdown', 'mdx'])
|
||||
|
||||
export const FilePreviewSheet: FC<FilePreviewSheetProps> = ({
|
||||
fileId,
|
||||
filePath,
|
||||
open,
|
||||
onOpenChange,
|
||||
}) => {
|
||||
const { baseUrl } = useAgentServerUrl()
|
||||
const { preview, loading, error } = useFilePreview(fileId, open)
|
||||
|
||||
const fileName = filePath ? basenameOf(filePath) : 'File preview'
|
||||
const downloadUrl = useMemo(() => {
|
||||
if (!baseUrl || !fileId) return null
|
||||
return buildFileDownloadUrl(baseUrl, fileId)
|
||||
}, [baseUrl, fileId])
|
||||
|
||||
// Surface preview-load failures in a toast in addition to the
|
||||
// inline error block — the inline UI lives at the bottom of the
|
||||
// sheet and is easy to miss when scrolled into the body.
|
||||
const lastToastedFileIdRef = useRef<string | null>(null)
|
||||
useEffect(() => {
|
||||
if (!open) {
|
||||
lastToastedFileIdRef.current = null
|
||||
return
|
||||
}
|
||||
if (!error || !fileId) return
|
||||
if (lastToastedFileIdRef.current === fileId) return
|
||||
lastToastedFileIdRef.current = fileId
|
||||
toast.error('Could not load preview', { description: error.message })
|
||||
}, [open, error, fileId])
|
||||
|
||||
const handleDownload = () => {
|
||||
if (!downloadUrl) {
|
||||
toast.error("Couldn't reach the agent server", {
|
||||
description: 'Reconnect to BrowserOS and try again.',
|
||||
})
|
||||
return
|
||||
}
|
||||
// Manually trigger the download so any future failure (e.g. the
|
||||
// server returns 404 because the file was removed) can be
|
||||
// surfaced via toast — the bare <a download> path swallows
|
||||
// these errors silently.
|
||||
const link = document.createElement('a')
|
||||
link.href = downloadUrl
|
||||
link.download = fileName
|
||||
link.rel = 'noopener'
|
||||
document.body.appendChild(link)
|
||||
link.click()
|
||||
link.remove()
|
||||
}
|
||||
|
||||
return (
|
||||
<Sheet open={open} onOpenChange={onOpenChange}>
|
||||
<SheetContent
|
||||
side="right"
|
||||
className="flex w-full flex-col gap-0 p-0 sm:max-w-xl"
|
||||
>
|
||||
<SheetHeader className="border-border/60 border-b px-5 py-4">
|
||||
<SheetTitle className="truncate pr-8">{fileName}</SheetTitle>
|
||||
<SheetDescription className="truncate">
|
||||
{filePath ?? ''}
|
||||
</SheetDescription>
|
||||
</SheetHeader>
|
||||
|
||||
<ScrollArea className="min-h-0 flex-1">
|
||||
<div className="px-5 py-4">
|
||||
{loading ? (
|
||||
<PreviewSkeleton />
|
||||
) : error ? (
|
||||
<PreviewError message={error.message} />
|
||||
) : preview ? (
|
||||
<PreviewBody
|
||||
preview={preview}
|
||||
filePath={filePath}
|
||||
downloadUrl={downloadUrl}
|
||||
/>
|
||||
) : null}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
|
||||
{fileId ? (
|
||||
<div className="border-border/60 border-t bg-background/90 px-5 py-3 backdrop-blur">
|
||||
<Button
|
||||
type="button"
|
||||
size="sm"
|
||||
className="w-full gap-2"
|
||||
onClick={handleDownload}
|
||||
>
|
||||
<Download className="size-3.5" />
|
||||
Download
|
||||
</Button>
|
||||
</div>
|
||||
) : null}
|
||||
</SheetContent>
|
||||
</Sheet>
|
||||
)
|
||||
}
|
||||
|
||||
function PreviewSkeleton() {
|
||||
return (
|
||||
<div className="flex flex-col gap-2">
|
||||
<div className="flex items-center gap-2 text-muted-foreground text-xs">
|
||||
<Loader2 className="size-3.5 animate-spin" />
|
||||
Loading preview...
|
||||
</div>
|
||||
<Skeleton className="h-4 w-3/4" />
|
||||
<Skeleton className="h-4 w-full" />
|
||||
<Skeleton className="h-4 w-5/6" />
|
||||
<Skeleton className="h-4 w-2/3" />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function PreviewError({ message }: { message: string }) {
|
||||
return (
|
||||
<div className="flex flex-col items-start gap-2 rounded-lg border border-destructive/30 bg-destructive/5 px-3 py-2 text-destructive text-sm">
|
||||
<div className="flex items-center gap-2 font-medium">
|
||||
<FileWarning className="size-4" />
|
||||
Could not load preview
|
||||
</div>
|
||||
<p className="text-destructive/80 text-xs">{message}</p>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function PreviewBody({
|
||||
preview,
|
||||
filePath,
|
||||
downloadUrl,
|
||||
}: {
|
||||
preview: FilePreview
|
||||
filePath: string | null
|
||||
downloadUrl: string | null
|
||||
}) {
|
||||
if (preview.kind === 'missing') {
|
||||
return (
|
||||
<div className="rounded-lg border border-border/60 bg-muted/40 px-4 py-6 text-center text-muted-foreground text-sm">
|
||||
This file is no longer in the workspace. The agent may have moved or
|
||||
deleted it after the turn finished.
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
if (preview.kind === 'image') {
|
||||
return (
|
||||
<div className="flex flex-col gap-3">
|
||||
<PreviewMeta preview={preview} />
|
||||
<div className="overflow-hidden rounded-lg border border-border/60 bg-muted/30">
|
||||
<img
|
||||
src={preview.dataUrl}
|
||||
alt={filePath ?? 'preview'}
|
||||
className="block max-h-[60vh] w-full object-contain"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
if (preview.kind === 'pdf') {
|
||||
return (
|
||||
<div className="flex flex-col gap-3">
|
||||
<PreviewMeta preview={preview} />
|
||||
<div className="rounded-lg border border-border/60 bg-muted/40 px-4 py-6 text-center text-muted-foreground text-sm">
|
||||
PDF previews aren't supported inline yet. Use Download to open this
|
||||
file in your default PDF viewer.
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
if (preview.kind === 'binary') {
|
||||
return (
|
||||
<div className="flex flex-col gap-3">
|
||||
<PreviewMeta preview={preview} />
|
||||
<div className="rounded-lg border border-border/60 bg-muted/40 px-4 py-6 text-center text-muted-foreground text-sm">
|
||||
No inline preview for this file type.
|
||||
{downloadUrl ? ' Use Download to save it locally.' : null}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return <TextPreviewBody preview={preview} filePath={filePath} />
|
||||
}
|
||||
|
||||
function TextPreviewBody({
|
||||
preview,
|
||||
filePath,
|
||||
}: {
|
||||
preview: Extract<FilePreview, { kind: 'text' }>
|
||||
filePath: string | null
|
||||
}) {
|
||||
const ext = filePath ? extensionOf(filePath).toLowerCase() : ''
|
||||
const renderAsMarkdown = MARKDOWN_EXTENSIONS.has(ext)
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-3">
|
||||
<PreviewMeta preview={preview} />
|
||||
{renderAsMarkdown ? (
|
||||
<div
|
||||
className={cn(
|
||||
'prose prose-sm dark:prose-invert max-w-none break-words rounded-lg border border-border/60 bg-muted/30 px-4 py-3',
|
||||
"[&_[data-streamdown='code-block']]:!w-full [&_[data-streamdown='code-block']]:overflow-x-auto",
|
||||
)}
|
||||
>
|
||||
<MessageResponse mode="static" parseIncompleteMarkdown={false}>
|
||||
{preview.snippet}
|
||||
</MessageResponse>
|
||||
</div>
|
||||
) : (
|
||||
<pre className="overflow-x-auto rounded-lg border border-border/60 bg-muted/30 px-3 py-2 text-xs leading-relaxed">
|
||||
<code className="font-mono text-foreground">{preview.snippet}</code>
|
||||
</pre>
|
||||
)}
|
||||
{preview.truncated ? (
|
||||
<div className="text-muted-foreground text-xs">
|
||||
Showing the first part of this file. Download to see the full
|
||||
contents.
|
||||
</div>
|
||||
) : null}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function PreviewMeta({
|
||||
preview,
|
||||
}: {
|
||||
preview: Exclude<FilePreview, { kind: 'missing' }>
|
||||
}) {
|
||||
return (
|
||||
<div className="flex flex-wrap items-center gap-x-3 gap-y-1 text-muted-foreground text-xs">
|
||||
<span className="font-medium text-foreground">
|
||||
{formatFileSize(preview.size)}
|
||||
</span>
|
||||
<span>·</span>
|
||||
<span className="font-mono">{preview.mimeType || 'unknown'}</span>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,338 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Per-agent right-side "Outputs" panel. Lists every file the harness
|
||||
* has attributed to this agent, grouped by the turn that produced
|
||||
* them. Click a row to open the shared preview Sheet.
|
||||
*
|
||||
* Lifecycle:
|
||||
* - Open/closed state is controlled by the parent and persisted via
|
||||
* `useOutputsRailOpen(agentId)` so each agent remembers its
|
||||
* preference independently.
|
||||
* - Data refreshes whenever a turn finishes (the conversation hook
|
||||
* fires `useInvalidateAgentOutputs` from its finally block).
|
||||
* - Manual "Refresh" button is wired to `useRefreshAgentOutputs`
|
||||
* for users who navigate in mid-turn.
|
||||
*/
|
||||
|
||||
import {
|
||||
ChevronDown,
|
||||
ChevronRight,
|
||||
FileText,
|
||||
Image as ImageIcon,
|
||||
Inbox,
|
||||
Loader2,
|
||||
PanelRightClose,
|
||||
RefreshCw,
|
||||
} from 'lucide-react'
|
||||
import { type FC, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { toast } from 'sonner'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import {
|
||||
Collapsible,
|
||||
CollapsibleContent,
|
||||
CollapsibleTrigger,
|
||||
} from '@/components/ui/collapsible'
|
||||
import { ScrollArea } from '@/components/ui/scroll-area'
|
||||
import { Skeleton } from '@/components/ui/skeleton'
|
||||
import {
|
||||
basenameOf,
|
||||
formatFileSize,
|
||||
inferFileKind,
|
||||
type ProducedFilesRailGroup,
|
||||
useAgentOutputs,
|
||||
useRefreshAgentOutputs,
|
||||
} from '@/lib/agent-files'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { FilePreviewSheet } from './agent-conversation.file-preview-sheet'
|
||||
|
||||
interface OutputsRailProps {
|
||||
agentId: string
|
||||
onClose: () => void
|
||||
/**
|
||||
* When set, the rail scrolls the matching `RailTurnGroup` into
|
||||
* view and force-opens its `Collapsible`. Used by the inline
|
||||
* file-card strip's "View" / "+N" deep-link path. Cleared by
|
||||
* the parent (via `onFocusTurnConsumed`) once the rail has
|
||||
* acknowledged the deep-link so subsequent renders don't keep
|
||||
* re-scrolling the same group.
|
||||
*/
|
||||
focusTurnId?: string | null
|
||||
onFocusTurnConsumed?: () => void
|
||||
}
|
||||
|
||||
const RAIL_LOCAL_STORAGE_PREFIX = 'browseros:outputs-rail:'
|
||||
|
||||
/**
|
||||
* Controlled open/close state with per-agent localStorage memory.
|
||||
* Returns a tuple compatible with React's useState shape so the
|
||||
* parent can pass it straight into the rail without an extra effect.
|
||||
*/
|
||||
export function useOutputsRailOpen(
|
||||
agentId: string,
|
||||
): [boolean, (next: boolean) => void] {
|
||||
const [open, setOpen] = useState(false)
|
||||
|
||||
useEffect(() => {
|
||||
if (typeof window === 'undefined' || !agentId) return
|
||||
try {
|
||||
const stored = window.localStorage.getItem(
|
||||
`${RAIL_LOCAL_STORAGE_PREFIX}${agentId}`,
|
||||
)
|
||||
setOpen(stored === '1')
|
||||
} catch {
|
||||
// localStorage may be unavailable (private mode, locked-down
|
||||
// contexts) — fall back to closed.
|
||||
}
|
||||
}, [agentId])
|
||||
|
||||
const update = (next: boolean) => {
|
||||
setOpen(next)
|
||||
if (typeof window === 'undefined' || !agentId) return
|
||||
try {
|
||||
window.localStorage.setItem(
|
||||
`${RAIL_LOCAL_STORAGE_PREFIX}${agentId}`,
|
||||
next ? '1' : '0',
|
||||
)
|
||||
} catch {
|
||||
// Best-effort persistence.
|
||||
}
|
||||
}
|
||||
|
||||
return [open, update]
|
||||
}
|
||||
|
||||
export const OutputsRail: FC<OutputsRailProps> = ({
|
||||
agentId,
|
||||
onClose,
|
||||
focusTurnId,
|
||||
onFocusTurnConsumed,
|
||||
}) => {
|
||||
const { groups, loading, error } = useAgentOutputs(agentId)
|
||||
const refresh = useRefreshAgentOutputs(agentId)
|
||||
|
||||
const [openFile, setOpenFile] = useState<{
|
||||
id: string
|
||||
path: string
|
||||
} | null>(null)
|
||||
|
||||
const totalFiles = useMemo(
|
||||
() => groups.reduce((sum, group) => sum + group.files.length, 0),
|
||||
[groups],
|
||||
)
|
||||
|
||||
return (
|
||||
<aside className="flex h-full min-h-0 w-full flex-col border-border/50 border-l bg-background">
|
||||
<header className="flex shrink-0 items-center gap-2 border-border/50 border-b px-3 py-3">
|
||||
<span className="font-semibold text-[13px] uppercase tracking-wide">
|
||||
Outputs
|
||||
</span>
|
||||
{totalFiles > 0 ? (
|
||||
<span className="text-muted-foreground text-xs tabular-nums">
|
||||
{totalFiles}
|
||||
</span>
|
||||
) : null}
|
||||
<div className="ml-auto flex items-center gap-1">
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className="size-7"
|
||||
onClick={() =>
|
||||
refresh.mutate(undefined, {
|
||||
onError: (err) =>
|
||||
toast.error('Refresh failed', {
|
||||
description:
|
||||
err instanceof Error ? err.message : String(err),
|
||||
}),
|
||||
})
|
||||
}
|
||||
disabled={refresh.isPending}
|
||||
title="Refresh"
|
||||
>
|
||||
{refresh.isPending ? (
|
||||
<Loader2 className="size-3.5 animate-spin" />
|
||||
) : (
|
||||
<RefreshCw className="size-3.5" />
|
||||
)}
|
||||
</Button>
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className="size-7"
|
||||
onClick={onClose}
|
||||
title="Hide outputs"
|
||||
>
|
||||
<PanelRightClose className="size-3.5" />
|
||||
</Button>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<ScrollArea className="min-h-0 flex-1">
|
||||
<div className="px-2 py-2">
|
||||
{loading && groups.length === 0 ? (
|
||||
<RailSkeleton />
|
||||
) : error ? (
|
||||
<RailError message={error.message} />
|
||||
) : groups.length === 0 ? (
|
||||
<RailEmpty />
|
||||
) : (
|
||||
<ul className="flex flex-col gap-2">
|
||||
{groups.map((group) => (
|
||||
<li key={group.turnId}>
|
||||
<RailTurnGroup
|
||||
group={group}
|
||||
focused={
|
||||
Boolean(focusTurnId) && focusTurnId === group.turnId
|
||||
}
|
||||
onFocusConsumed={onFocusTurnConsumed}
|
||||
onOpenFile={(file) =>
|
||||
setOpenFile({ id: file.id, path: file.path })
|
||||
}
|
||||
/>
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
)}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
|
||||
<FilePreviewSheet
|
||||
fileId={openFile?.id ?? null}
|
||||
filePath={openFile?.path ?? null}
|
||||
open={Boolean(openFile)}
|
||||
onOpenChange={(next) => {
|
||||
if (!next) setOpenFile(null)
|
||||
}}
|
||||
/>
|
||||
</aside>
|
||||
)
|
||||
}
|
||||
|
||||
function RailTurnGroup({
|
||||
group,
|
||||
focused,
|
||||
onFocusConsumed,
|
||||
onOpenFile,
|
||||
}: {
|
||||
group: ProducedFilesRailGroup
|
||||
focused: boolean
|
||||
onFocusConsumed?: () => void
|
||||
onOpenFile: (file: { id: string; path: string }) => void
|
||||
}) {
|
||||
const [open, setOpen] = useState(true)
|
||||
const headerLabel = group.turnPrompt.trim() || 'Turn'
|
||||
const containerRef = useRef<HTMLDivElement>(null)
|
||||
|
||||
// Deep-link consumption: when the parent passes `focused=true`,
|
||||
// expand the collapsible (in case the user had collapsed it
|
||||
// earlier) and scroll into view. Fire `onFocusConsumed` so the
|
||||
// parent can drop the URL param and we don't re-scroll on every
|
||||
// render after that.
|
||||
useEffect(() => {
|
||||
if (!focused) return
|
||||
setOpen(true)
|
||||
containerRef.current?.scrollIntoView({
|
||||
behavior: 'smooth',
|
||||
block: 'nearest',
|
||||
})
|
||||
onFocusConsumed?.()
|
||||
}, [focused, onFocusConsumed])
|
||||
|
||||
return (
|
||||
<div ref={containerRef}>
|
||||
<Collapsible open={open} onOpenChange={setOpen}>
|
||||
<CollapsibleTrigger
|
||||
className={cn(
|
||||
'flex w-full items-center gap-1.5 rounded-md px-1.5 py-1 text-left text-muted-foreground text-xs',
|
||||
'transition-colors hover:bg-accent/40 hover:text-foreground',
|
||||
)}
|
||||
>
|
||||
{open ? (
|
||||
<ChevronDown className="size-3 shrink-0" />
|
||||
) : (
|
||||
<ChevronRight className="size-3 shrink-0" />
|
||||
)}
|
||||
<span className="min-w-0 flex-1 truncate font-medium">
|
||||
{headerLabel}
|
||||
</span>
|
||||
<span className="shrink-0 tabular-nums">{group.files.length}</span>
|
||||
</CollapsibleTrigger>
|
||||
<CollapsibleContent>
|
||||
<ul className="mt-1 ml-1 flex flex-col gap-0.5 border-border/40 border-l pl-2">
|
||||
{group.files.map((file) => (
|
||||
<li key={file.id}>
|
||||
<RailFileRow file={file} onOpen={() => onOpenFile(file)} />
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
</CollapsibleContent>
|
||||
</Collapsible>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function RailFileRow({
|
||||
file,
|
||||
onOpen,
|
||||
}: {
|
||||
file: ProducedFilesRailGroup['files'][number]
|
||||
onOpen: () => void
|
||||
}) {
|
||||
const name = basenameOf(file.path)
|
||||
const kind = inferFileKind(file.path)
|
||||
const Icon = kind === 'image' ? ImageIcon : FileText
|
||||
|
||||
return (
|
||||
<button
|
||||
type="button"
|
||||
onClick={onOpen}
|
||||
className={cn(
|
||||
'flex w-full items-center gap-2 rounded-md px-1.5 py-1 text-left text-xs transition-colors',
|
||||
'hover:bg-accent/60 focus:bg-accent/60 focus:outline-hidden',
|
||||
)}
|
||||
title={file.path}
|
||||
>
|
||||
<Icon className="size-3 shrink-0 text-muted-foreground" />
|
||||
<span className="min-w-0 flex-1 truncate">{name}</span>
|
||||
<span className="shrink-0 text-muted-foreground tabular-nums">
|
||||
{formatFileSize(file.size)}
|
||||
</span>
|
||||
</button>
|
||||
)
|
||||
}
|
||||
|
||||
function RailSkeleton() {
|
||||
return (
|
||||
<div className="flex flex-col gap-2 px-1.5 py-1">
|
||||
<Skeleton className="h-4 w-1/2" />
|
||||
<Skeleton className="h-4 w-3/4" />
|
||||
<Skeleton className="h-4 w-2/3" />
|
||||
<Skeleton className="h-4 w-5/6" />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function RailEmpty() {
|
||||
return (
|
||||
<div className="mx-2 my-3 flex flex-col items-center gap-1.5 rounded-lg border border-border/60 border-dashed bg-muted/20 px-3 py-6 text-center text-muted-foreground text-xs">
|
||||
<Inbox className="size-4" />
|
||||
<p className="font-medium">No outputs yet</p>
|
||||
<p className="text-[11px] text-muted-foreground/70 leading-snug">
|
||||
Files this agent creates will appear here, grouped by the turn that made
|
||||
them.
|
||||
</p>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function RailError({ message }: { message: string }) {
|
||||
return (
|
||||
<div className="mx-2 my-3 rounded-lg border border-destructive/30 bg-destructive/5 px-3 py-2 text-destructive text-xs">
|
||||
{message}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
import type { OpenClawChatHistoryMessage } from '@/entrypoints/app/agents/useOpenClaw'
|
||||
import type { AgentConversationTurn } from '@/lib/agent-conversations/types'
|
||||
import type { ProducedFilesRailGroup } from '@/lib/agent-files'
|
||||
|
||||
export type ClawChatRole = 'user' | 'assistant'
|
||||
|
||||
@@ -234,6 +235,30 @@ export function filterTurnsPersistedInHistory(
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Persisted turns that still carry `producedFiles` — once history
|
||||
* reloads, the assistant text is rendered by `ClawChatMessage` and
|
||||
* the optimistic turn is filtered out by
|
||||
* `filterTurnsPersistedInHistory`. The historical message has no
|
||||
* `producedFiles` field (history items don't carry that), so the
|
||||
* inline file-card strip would vanish on history reload.
|
||||
*
|
||||
* Returning these here lets the caller render a strip-only entry
|
||||
* after the corresponding history bubble — full message stays as
|
||||
* the persisted history pair, but the produced-files affordance
|
||||
* survives.
|
||||
*/
|
||||
export function selectStripOnlyTurns(
|
||||
turns: AgentConversationTurn[],
|
||||
historyMessages: ClawChatMessage[],
|
||||
): AgentConversationTurn[] {
|
||||
return turns.filter(
|
||||
(turn) =>
|
||||
Boolean(turn.producedFiles && turn.producedFiles.length > 0) &&
|
||||
isTurnPersistedInHistory(turn, historyMessages),
|
||||
)
|
||||
}
|
||||
|
||||
function isTurnPersistedInHistory(
|
||||
turn: AgentConversationTurn,
|
||||
historyMessages: ClawChatMessage[],
|
||||
@@ -285,3 +310,59 @@ function getClawMessageText(message: ClawChatMessage): string {
|
||||
.join('')
|
||||
.trim()
|
||||
}
|
||||
|
||||
function firstNonBlankLine(value: string): string {
|
||||
for (const raw of value.split('\n')) {
|
||||
const trimmed = raw.trim()
|
||||
if (trimmed) return trimmed
|
||||
}
|
||||
return ''
|
||||
}
|
||||
|
||||
/**
|
||||
* Map each assistant history message to the produced-files group
|
||||
* that came from its turn. Match key is `group.turnPrompt` (first
|
||||
* non-blank line of the user prompt that initiated the turn) vs.
|
||||
* the first non-blank line of the user message that immediately
|
||||
* preceded this assistant message — the same shape the server
|
||||
* emits when storing turnPrompt.
|
||||
*
|
||||
* Walks history forward (oldest-first per `flattenHistoryPages`)
|
||||
* and consumes groups in chronological order. A group can only
|
||||
* match once — if two turns share the same prompt the earlier
|
||||
* one wins, and the later assistant message stays unassociated
|
||||
* (those land back in `tailStripGroups` at the conversation tail).
|
||||
*/
|
||||
export function mapHistoryToProducedFilesGroups(
|
||||
historyMessages: ClawChatMessage[],
|
||||
groups: ReadonlyArray<ProducedFilesRailGroup>,
|
||||
): {
|
||||
byAssistantMessageId: Map<string, ProducedFilesRailGroup>
|
||||
unmatched: ProducedFilesRailGroup[]
|
||||
} {
|
||||
const byAssistantMessageId = new Map<string, ProducedFilesRailGroup>()
|
||||
if (groups.length === 0) {
|
||||
return { byAssistantMessageId, unmatched: [] }
|
||||
}
|
||||
// Oldest-first so the iteration order matches history.
|
||||
const remaining = [...groups].sort((a, b) => a.createdAt - b.createdAt)
|
||||
|
||||
let pendingPrompt: string | null = null
|
||||
for (const message of historyMessages) {
|
||||
if (message.role === 'user') {
|
||||
pendingPrompt = firstNonBlankLine(getClawMessageText(message))
|
||||
continue
|
||||
}
|
||||
if (message.role !== 'assistant' || !pendingPrompt) continue
|
||||
const matchIndex = remaining.findIndex(
|
||||
(group) => group.turnPrompt === pendingPrompt,
|
||||
)
|
||||
if (matchIndex >= 0) {
|
||||
const [match] = remaining.splice(matchIndex, 1)
|
||||
byAssistantMessageId.set(message.id, match)
|
||||
}
|
||||
pendingPrompt = null
|
||||
}
|
||||
|
||||
return { byAssistantMessageId, unmatched: remaining }
|
||||
}
|
||||
|
||||
@@ -10,9 +10,11 @@ import type { OpenClawChatHistoryMessage } from '@/entrypoints/app/agents/useOpe
|
||||
import type {
|
||||
AgentConversationTurn,
|
||||
AssistantPart,
|
||||
ConversationTurnFile,
|
||||
ToolEntry,
|
||||
UserAttachmentPreview,
|
||||
} from '@/lib/agent-conversations/types'
|
||||
import { useInvalidateAgentOutputs } from '@/lib/agent-files'
|
||||
import type { ServerAttachmentPayload } from '@/lib/attachments'
|
||||
import { consumeSSEStream } from '@/lib/sse'
|
||||
import { buildToolLabel } from '@/lib/tool-labels'
|
||||
@@ -53,6 +55,12 @@ export function useAgentConversation(
|
||||
) {
|
||||
const [turns, setTurns] = useState<AgentConversationTurn[]>([])
|
||||
const [streaming, setStreaming] = useState(false)
|
||||
const invalidateAgentOutputs = useInvalidateAgentOutputs()
|
||||
// Stable ref so the resume effect doesn't re-subscribe on every
|
||||
// render (the hook's returned callable is freshly closured each
|
||||
// time, but the underlying queryClient is stable).
|
||||
const invalidateAgentOutputsRef = useRef(invalidateAgentOutputs)
|
||||
invalidateAgentOutputsRef.current = invalidateAgentOutputs
|
||||
const sessionKeyRef = useRef(options.sessionKey ?? '')
|
||||
const historyRef = useRef<OpenClawChatHistoryMessage[]>(options.history ?? [])
|
||||
const textAccRef = useRef('')
|
||||
@@ -152,6 +160,17 @@ export function useAgentConversation(
|
||||
})
|
||||
}
|
||||
|
||||
const setProducedFilesOnCurrentTurn = (files: ConversationTurnFile[]) => {
|
||||
setTurns((prev) => {
|
||||
const last = prev[prev.length - 1]
|
||||
if (!last) return prev
|
||||
// Replace, don't merge: the server's diff is authoritative for
|
||||
// the just-completed turn — duplicate events shouldn't grow the
|
||||
// list, and a re-attribution should overwrite an earlier one.
|
||||
return [...prev.slice(0, -1), { ...last, producedFiles: files }]
|
||||
})
|
||||
}
|
||||
|
||||
const upsertAgentHarnessTool = (event: AgentHarnessStreamEvent) => {
|
||||
if (event.type !== 'tool_call') return
|
||||
const rawName = event.title || event.rawType || 'tool call'
|
||||
@@ -208,6 +227,9 @@ export function useAgentConversation(
|
||||
case 'tool_call':
|
||||
upsertAgentHarnessTool(event)
|
||||
break
|
||||
case 'produced_files':
|
||||
setProducedFilesOnCurrentTurn(event.files)
|
||||
break
|
||||
case 'done':
|
||||
markCurrentTurnDone()
|
||||
break
|
||||
@@ -259,6 +281,7 @@ export function useAgentConversation(
|
||||
...prev,
|
||||
{
|
||||
id: crypto.randomUUID(),
|
||||
turnId: active.turnId,
|
||||
userText: active.prompt ?? '',
|
||||
parts: [],
|
||||
done: false,
|
||||
@@ -304,9 +327,14 @@ export function useAgentConversation(
|
||||
// When `cancelled` is true the next run will set these
|
||||
// itself, so resetting here would only cause a brief flicker.
|
||||
if (!cancelled && weStartedStream) {
|
||||
const finishedTurnId = turnIdRef.current
|
||||
turnIdRef.current = null
|
||||
lastSeqRef.current = null
|
||||
setStreaming(false)
|
||||
void invalidateAgentOutputsRef.current(
|
||||
agentId,
|
||||
finishedTurnId ?? undefined,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -318,6 +346,60 @@ export function useAgentConversation(
|
||||
}
|
||||
}, [agentId, activeTurnIdDep])
|
||||
|
||||
/**
|
||||
* Send the chat request and follow the 409-active-turn redirect
|
||||
* once. Pulled out of `send` to keep its cognitive complexity in
|
||||
* check — the retry adds a branch that biome counts heavily.
|
||||
*/
|
||||
const openSendStream = async (
|
||||
targetAgentId: string,
|
||||
text: string,
|
||||
attachments: ServerAttachmentPayload[],
|
||||
signal: AbortSignal,
|
||||
): Promise<Response> => {
|
||||
const initial = await chatWithHarnessAgent(
|
||||
targetAgentId,
|
||||
text,
|
||||
signal,
|
||||
attachments,
|
||||
)
|
||||
if (initial.status !== 409) return initial
|
||||
// 409 means the server already has an active turn for this agent
|
||||
// (a previous tab kicked one off and we're a fresh mount that
|
||||
// missed the resume window). Attach to it instead of double-sending.
|
||||
const body = (await initial.json()) as { turnId?: string }
|
||||
if (!body.turnId) return initial
|
||||
return attachToHarnessTurn(targetAgentId, {
|
||||
turnId: body.turnId,
|
||||
signal,
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Pull session-key / turn-id off response headers and propagate to
|
||||
* refs + the optimistic turn. Stamping `turnId` here lets the
|
||||
* inline artifact card fall back to /files/turn/<id> on a resumed
|
||||
* mount that missed the live `produced_files` event.
|
||||
*/
|
||||
const applyResponseHeadersToTurn = (response: Response) => {
|
||||
const responseSessionKey =
|
||||
response.headers.get('X-Session-Key') ??
|
||||
response.headers.get('X-Session-Id')
|
||||
if (responseSessionKey) {
|
||||
sessionKeyRef.current = responseSessionKey
|
||||
onSessionKeyChangeRef.current?.(responseSessionKey)
|
||||
}
|
||||
const responseTurnId = response.headers.get('X-Turn-Id')
|
||||
if (!responseTurnId) return
|
||||
turnIdRef.current = responseTurnId
|
||||
lastSeqRef.current = null
|
||||
setTurns((prev) => {
|
||||
const last = prev[prev.length - 1]
|
||||
if (!last) return prev
|
||||
return [...prev.slice(0, -1), { ...last, turnId: responseTurnId }]
|
||||
})
|
||||
}
|
||||
|
||||
const send = async (input: string | SendInput) => {
|
||||
const normalized: SendInput =
|
||||
typeof input === 'string' ? { text: input } : input
|
||||
@@ -346,37 +428,13 @@ export function useAgentConversation(
|
||||
streamAbortRef.current = abortController
|
||||
|
||||
try {
|
||||
let response = await chatWithHarnessAgent(
|
||||
const response = await openSendStream(
|
||||
agentId,
|
||||
trimmed,
|
||||
abortController.signal,
|
||||
attachments,
|
||||
abortController.signal,
|
||||
)
|
||||
// 409 means the server already has an active turn for this
|
||||
// agent (e.g. a previous tab kicked one off and we're a fresh
|
||||
// mount that missed the resume window). Attach to it instead of
|
||||
// double-sending.
|
||||
if (response.status === 409) {
|
||||
const body = (await response.json()) as { turnId?: string }
|
||||
if (body.turnId) {
|
||||
response = await attachToHarnessTurn(agentId, {
|
||||
turnId: body.turnId,
|
||||
signal: abortController.signal,
|
||||
})
|
||||
}
|
||||
}
|
||||
const responseSessionKey =
|
||||
response.headers.get('X-Session-Key') ??
|
||||
response.headers.get('X-Session-Id')
|
||||
if (responseSessionKey) {
|
||||
sessionKeyRef.current = responseSessionKey
|
||||
onSessionKeyChangeRef.current?.(responseSessionKey)
|
||||
}
|
||||
const responseTurnId = response.headers.get('X-Turn-Id')
|
||||
if (responseTurnId) {
|
||||
turnIdRef.current = responseTurnId
|
||||
lastSeqRef.current = null
|
||||
}
|
||||
applyResponseHeadersToTurn(response)
|
||||
if (!response.ok) {
|
||||
const err = await response.text()
|
||||
updateCurrentTurnParts((parts) => [
|
||||
@@ -404,10 +462,15 @@ export function useAgentConversation(
|
||||
if (streamAbortRef.current === abortController) {
|
||||
streamAbortRef.current = null
|
||||
}
|
||||
// Capture before nulling — the invalidation needs the turn id so
|
||||
// useAgentTurnFiles consumers also flush, not just the agent-wide
|
||||
// rail query.
|
||||
const finishedTurnId = turnIdRef.current
|
||||
turnIdRef.current = null
|
||||
lastSeqRef.current = null
|
||||
onCompleteRef.current?.()
|
||||
setStreaming(false)
|
||||
void invalidateAgentOutputs(agentId, finishedTurnId ?? undefined)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,21 @@ import type { AgentEntry } from './useOpenClaw'
|
||||
|
||||
export type HarnessAgentAdapter = 'claude' | 'codex' | 'openclaw'
|
||||
|
||||
/**
|
||||
* One file the harness attributed to the assistant turn that just
|
||||
* finished. Mirrors the server-side `ProducedFileEventEntry` shape so
|
||||
* the inline artifact card can render alongside the streamed text the
|
||||
* user just watched complete. Only present for openclaw adapter
|
||||
* turns; claude / codex don't produce these events in v1.
|
||||
*/
|
||||
export interface HarnessProducedFile {
|
||||
id: string
|
||||
/** Workspace-relative POSIX path. */
|
||||
path: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
export type AgentHarnessStreamEvent =
|
||||
| {
|
||||
type: 'text_delta'
|
||||
@@ -22,6 +37,10 @@ export type AgentHarnessStreamEvent =
|
||||
text: string
|
||||
rawType?: string
|
||||
}
|
||||
| {
|
||||
type: 'produced_files'
|
||||
files: HarnessProducedFile[]
|
||||
}
|
||||
| {
|
||||
type: 'done'
|
||||
text?: string
|
||||
|
||||
@@ -25,12 +25,18 @@ interface HarnessAgentsResponse {
|
||||
|
||||
export type { AgentHarnessStreamEvent }
|
||||
|
||||
const AGENT_QUERY_KEYS = {
|
||||
export const AGENT_QUERY_KEYS = {
|
||||
adapters: 'agent-harness-adapters',
|
||||
agents: 'agent-harness-agents',
|
||||
/** Outputs-rail data for one agent — `[agentOutputs, baseUrl, agentId]`. */
|
||||
agentOutputs: 'agent-harness-agent-outputs',
|
||||
/** Per-turn artifact-card files — `[agentTurnFiles, baseUrl, agentId, turnId]`. */
|
||||
agentTurnFiles: 'agent-harness-agent-turn-files',
|
||||
/** Single-file preview payload — `[filePreview, baseUrl, fileId]`. */
|
||||
filePreview: 'agent-harness-file-preview',
|
||||
} as const
|
||||
|
||||
async function agentsFetch<T>(
|
||||
export async function agentsFetch<T>(
|
||||
baseUrl: string,
|
||||
path: string,
|
||||
init?: RequestInit,
|
||||
|
||||
@@ -42,11 +42,34 @@ export interface UserAttachmentPreview {
|
||||
dataUrl?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Files attributed to this turn by the harness's per-turn workspace
|
||||
* diff. Populated either via the live `produced_files` SSE event or
|
||||
* (on resume) the `useAgentTurnFiles` fallback. Mirrors the wire
|
||||
* shape from `agent-harness-types.HarnessProducedFile` minus the
|
||||
* stream-only fields the inline card doesn't need.
|
||||
*/
|
||||
export interface ConversationTurnFile {
|
||||
id: string
|
||||
path: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
export interface AgentConversationTurn {
|
||||
id: string
|
||||
/**
|
||||
* Server-issued turn id, set as soon as the response headers arrive
|
||||
* (`X-Turn-Id`) for fresh sends, or from the active-turn payload on
|
||||
* resume. Required for the historic-files fallback fetch; absent on
|
||||
* the brief optimistic window before the first header.
|
||||
*/
|
||||
turnId?: string | null
|
||||
userText: string
|
||||
userAttachments?: UserAttachmentPreview[]
|
||||
parts: AssistantPart[]
|
||||
/** Files produced during this turn (openclaw only in v1). */
|
||||
producedFiles?: ConversationTurnFile[]
|
||||
done: boolean
|
||||
timestamp: number
|
||||
}
|
||||
|
||||
@@ -0,0 +1,126 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Pure helpers used by the artifact card and the Outputs rail.
|
||||
* Display formatting only — no React, no fetch, no DOM. Anything
|
||||
* stateful belongs in `./useAgentOutputs` or `./useFilePreview`.
|
||||
*/
|
||||
|
||||
import { buildAgentApiUrl } from '@/entrypoints/app/agents/agent-api-url'
|
||||
|
||||
/**
|
||||
* Coarse classification of a file's intended preview / icon path.
|
||||
* Mirrors the server-side `FilePreviewKind` minus `missing` — the
|
||||
* client only ever computes a kind for a row it already has.
|
||||
*/
|
||||
export type FileKind = 'text' | 'image' | 'pdf' | 'binary'
|
||||
|
||||
const TEXT_EXTENSIONS = new Set([
|
||||
'txt',
|
||||
'md',
|
||||
'markdown',
|
||||
'json',
|
||||
'jsonl',
|
||||
'csv',
|
||||
'tsv',
|
||||
'xml',
|
||||
'yaml',
|
||||
'yml',
|
||||
'toml',
|
||||
'ini',
|
||||
'log',
|
||||
'html',
|
||||
'htm',
|
||||
'css',
|
||||
'js',
|
||||
'mjs',
|
||||
'cjs',
|
||||
'ts',
|
||||
'tsx',
|
||||
'jsx',
|
||||
'py',
|
||||
'rb',
|
||||
'go',
|
||||
'rs',
|
||||
'java',
|
||||
'kt',
|
||||
'swift',
|
||||
'c',
|
||||
'h',
|
||||
'cpp',
|
||||
'hpp',
|
||||
'sh',
|
||||
'zsh',
|
||||
'bash',
|
||||
'sql',
|
||||
'svg',
|
||||
])
|
||||
|
||||
const IMAGE_EXTENSIONS = new Set([
|
||||
'png',
|
||||
'jpg',
|
||||
'jpeg',
|
||||
'gif',
|
||||
'webp',
|
||||
'bmp',
|
||||
'ico',
|
||||
'heic',
|
||||
'heif',
|
||||
])
|
||||
|
||||
/** Best-effort kind based on extension only. Server's preview API
|
||||
* is the source of truth for actual rendering — this is just for
|
||||
* picking an icon / sort hint without a network round-trip. */
|
||||
export function inferFileKind(path: string): FileKind {
|
||||
const ext = extensionOf(path).toLowerCase()
|
||||
if (ext === 'pdf') return 'pdf'
|
||||
if (IMAGE_EXTENSIONS.has(ext)) return 'image'
|
||||
if (TEXT_EXTENSIONS.has(ext)) return 'text'
|
||||
return 'binary'
|
||||
}
|
||||
|
||||
/** Plain extension without the leading dot. Empty string when none. */
|
||||
export function extensionOf(path: string): string {
|
||||
const dot = path.lastIndexOf('.')
|
||||
if (dot === -1) return ''
|
||||
const slash = path.lastIndexOf('/')
|
||||
if (dot < slash) return ''
|
||||
return path.slice(dot + 1)
|
||||
}
|
||||
|
||||
/** File name (final path segment), no directory prefix. */
|
||||
export function basenameOf(path: string): string {
|
||||
const slash = path.lastIndexOf('/')
|
||||
return slash === -1 ? path : path.slice(slash + 1)
|
||||
}
|
||||
|
||||
const SIZE_UNITS = ['B', 'KB', 'MB', 'GB', 'TB'] as const
|
||||
|
||||
/** "2.4 MB" / "340 KB" / "78 B" — for the artifact card's right-side
|
||||
* metadata. Not localised; the rail uses one space + the unit. */
|
||||
export function formatFileSize(bytes: number): string {
|
||||
if (!Number.isFinite(bytes) || bytes < 0) return '—'
|
||||
if (bytes < 1024) return `${bytes} ${SIZE_UNITS[0]}`
|
||||
let value = bytes
|
||||
let unit = 0
|
||||
while (value >= 1024 && unit < SIZE_UNITS.length - 1) {
|
||||
value /= 1024
|
||||
unit += 1
|
||||
}
|
||||
// 1-digit precision below 10, integer above — feels less noisy.
|
||||
const formatted = value < 10 ? value.toFixed(1) : Math.round(value).toString()
|
||||
return `${formatted} ${SIZE_UNITS[unit]}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the per-file download URL using the same agent-api root the
|
||||
* rest of the harness hits. Returned URL is already absolute.
|
||||
*/
|
||||
export function buildFileDownloadUrl(baseUrl: string, fileId: string): string {
|
||||
return buildAgentApiUrl(
|
||||
baseUrl,
|
||||
`/files/${encodeURIComponent(fileId)}/download`,
|
||||
)
|
||||
}
|
||||
32
packages/browseros-agent/apps/agent/lib/agent-files/index.ts
Normal file
32
packages/browseros-agent/apps/agent/lib/agent-files/index.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
export {
|
||||
basenameOf,
|
||||
buildFileDownloadUrl,
|
||||
extensionOf,
|
||||
type FileKind,
|
||||
formatFileSize,
|
||||
inferFileKind,
|
||||
} from './file-helpers'
|
||||
export type {
|
||||
BinaryFilePreview,
|
||||
FilePreview,
|
||||
FilePreviewKind,
|
||||
ImageFilePreview,
|
||||
MissingFilePreview,
|
||||
PdfFilePreview,
|
||||
ProducedFile,
|
||||
ProducedFilesRailGroup,
|
||||
TextFilePreview,
|
||||
} from './types'
|
||||
export {
|
||||
useAgentOutputs,
|
||||
useAgentTurnFiles,
|
||||
useInvalidateAgentOutputs,
|
||||
useRefreshAgentOutputs,
|
||||
} from './useAgentOutputs'
|
||||
export { useFilePreview } from './useFilePreview'
|
||||
75
packages/browseros-agent/apps/agent/lib/agent-files/types.ts
Normal file
75
packages/browseros-agent/apps/agent/lib/agent-files/types.ts
Normal file
@@ -0,0 +1,75 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Wire types shared by the inline artifact card and the per-agent
|
||||
* Outputs rail. These mirror `ProducedFileEntry` /
|
||||
* `ProducedFilesRailGroup` on the server and the `FilePreview`
|
||||
* discriminated union from `apps/server/src/api/services/openclaw/file-preview.ts`.
|
||||
*
|
||||
* The schema mirror is deliberate (vs sharing a workspace package)
|
||||
* because the server keeps the on-disk row shape — `agentDefinitionId`,
|
||||
* `sessionKey` — out of the wire payload. Dropping those columns at the
|
||||
* type boundary keeps the client honest about what it can refer to.
|
||||
*/
|
||||
|
||||
export interface ProducedFile {
|
||||
id: string
|
||||
/** Workspace-relative POSIX path. */
|
||||
path: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
/** Server clock when the file was first attributed to its turn. */
|
||||
createdAt: number
|
||||
detectedBy: 'diff' | 'tool'
|
||||
}
|
||||
|
||||
export interface ProducedFilesRailGroup {
|
||||
turnId: string
|
||||
/** First non-blank line of the user prompt that initiated this turn. */
|
||||
turnPrompt: string
|
||||
createdAt: number
|
||||
files: ProducedFile[]
|
||||
}
|
||||
|
||||
export type FilePreviewKind = 'text' | 'image' | 'pdf' | 'binary' | 'missing'
|
||||
|
||||
interface BasePreview {
|
||||
kind: FilePreviewKind
|
||||
mimeType: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
export interface TextFilePreview extends BasePreview {
|
||||
kind: 'text'
|
||||
snippet: string
|
||||
/** True when the on-disk file is larger than the server's snippet cap. */
|
||||
truncated: boolean
|
||||
}
|
||||
|
||||
export interface ImageFilePreview extends BasePreview {
|
||||
kind: 'image'
|
||||
/** Base64 data URL (incl. `data:` prefix). Suitable for `<img src>`. */
|
||||
dataUrl: string
|
||||
}
|
||||
|
||||
export interface PdfFilePreview extends BasePreview {
|
||||
kind: 'pdf'
|
||||
}
|
||||
|
||||
export interface BinaryFilePreview extends BasePreview {
|
||||
kind: 'binary'
|
||||
}
|
||||
|
||||
export interface MissingFilePreview {
|
||||
kind: 'missing'
|
||||
}
|
||||
|
||||
export type FilePreview =
|
||||
| TextFilePreview
|
||||
| ImageFilePreview
|
||||
| PdfFilePreview
|
||||
| BinaryFilePreview
|
||||
| MissingFilePreview
|
||||
@@ -0,0 +1,166 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* React Query hooks backing the per-agent Outputs rail and the
|
||||
* inline artifact card.
|
||||
*
|
||||
* Live updates: the consumer of `useAgentConversation` (see Phase 5)
|
||||
* is expected to call `useInvalidateAgentOutputs(agentId)` whenever
|
||||
* an assistant turn completes, so the rail picks up the new
|
||||
* `produced_files` rows the server attributed during that turn.
|
||||
* No SSE channel here — invalidation off the existing chat-stream
|
||||
* completion is enough for v1.
|
||||
*/
|
||||
|
||||
import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
|
||||
import {
|
||||
AGENT_QUERY_KEYS,
|
||||
agentsFetch,
|
||||
} from '@/entrypoints/app/agents/useAgents'
|
||||
import { useAgentServerUrl } from '@/lib/browseros/useBrowserOSProviders'
|
||||
import type { ProducedFile, ProducedFilesRailGroup } from './types'
|
||||
|
||||
interface OutputsResponse {
|
||||
groups: ProducedFilesRailGroup[]
|
||||
}
|
||||
|
||||
interface TurnFilesResponse {
|
||||
files: ProducedFile[]
|
||||
}
|
||||
|
||||
export function useAgentOutputs(agentId: string, enabled = true) {
|
||||
const {
|
||||
baseUrl,
|
||||
isLoading: urlLoading,
|
||||
error: urlError,
|
||||
} = useAgentServerUrl()
|
||||
|
||||
const query = useQuery<ProducedFilesRailGroup[], Error>({
|
||||
queryKey: [AGENT_QUERY_KEYS.agentOutputs, baseUrl, agentId],
|
||||
queryFn: async () => {
|
||||
const data = await agentsFetch<OutputsResponse>(
|
||||
baseUrl as string,
|
||||
`/${encodeURIComponent(agentId)}/files`,
|
||||
)
|
||||
return data.groups ?? []
|
||||
},
|
||||
enabled: Boolean(baseUrl) && !urlLoading && enabled && Boolean(agentId),
|
||||
})
|
||||
|
||||
return {
|
||||
groups: query.data ?? [],
|
||||
loading: query.isLoading || urlLoading,
|
||||
error: query.error ?? urlError,
|
||||
refetch: query.refetch,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Per-turn fetch for the inline artifact card. Used both as the
|
||||
* fallback when an SSE `produced_files` event was missed, and to
|
||||
* rehydrate a turn the user scrolled back to.
|
||||
*/
|
||||
export function useAgentTurnFiles(
|
||||
agentId: string,
|
||||
turnId: string | null,
|
||||
enabled = true,
|
||||
) {
|
||||
const {
|
||||
baseUrl,
|
||||
isLoading: urlLoading,
|
||||
error: urlError,
|
||||
} = useAgentServerUrl()
|
||||
|
||||
const query = useQuery<ProducedFile[], Error>({
|
||||
queryKey: [AGENT_QUERY_KEYS.agentTurnFiles, baseUrl, agentId, turnId],
|
||||
queryFn: async () => {
|
||||
const data = await agentsFetch<TurnFilesResponse>(
|
||||
baseUrl as string,
|
||||
`/${encodeURIComponent(agentId)}/files/turn/${encodeURIComponent(
|
||||
turnId as string,
|
||||
)}`,
|
||||
)
|
||||
return data.files ?? []
|
||||
},
|
||||
enabled:
|
||||
Boolean(baseUrl) &&
|
||||
!urlLoading &&
|
||||
enabled &&
|
||||
Boolean(agentId) &&
|
||||
Boolean(turnId),
|
||||
})
|
||||
|
||||
return {
|
||||
files: query.data ?? [],
|
||||
loading: query.isLoading || urlLoading,
|
||||
error: query.error ?? urlError,
|
||||
refetch: query.refetch,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a callable that invalidates outputs / turn-files queries
|
||||
* for one agent across any baseUrl. Call after an assistant turn
|
||||
* completes so the rail (and the inline file-card strip) pick up
|
||||
* the new attributed rows. Cheap when the queries aren't mounted
|
||||
* — react-query just marks the cached value stale.
|
||||
*
|
||||
* Implementation note: react-query's `invalidateQueries({ queryKey })`
|
||||
* does positional partial-match, so passing `undefined` as the
|
||||
* baseUrl placeholder does NOT match a cached `[…, baseUrl, …]`
|
||||
* key — the cache stayed stale. Use a predicate so we ignore the
|
||||
* baseUrl position entirely.
|
||||
*/
|
||||
export function useInvalidateAgentOutputs() {
|
||||
const queryClient = useQueryClient()
|
||||
return async (agentId: string, turnId?: string) => {
|
||||
await Promise.all([
|
||||
queryClient.invalidateQueries({
|
||||
predicate: (query) => {
|
||||
const key = query.queryKey
|
||||
return (
|
||||
Array.isArray(key) &&
|
||||
key[0] === AGENT_QUERY_KEYS.agentOutputs &&
|
||||
key[2] === agentId
|
||||
)
|
||||
},
|
||||
}),
|
||||
queryClient.invalidateQueries({
|
||||
predicate: (query) => {
|
||||
const key = query.queryKey
|
||||
if (
|
||||
!Array.isArray(key) ||
|
||||
key[0] !== AGENT_QUERY_KEYS.agentTurnFiles ||
|
||||
key[2] !== agentId
|
||||
) {
|
||||
return false
|
||||
}
|
||||
// When a turnId was supplied, scope to just that turn's
|
||||
// entry. Otherwise flush every cached turn for this agent.
|
||||
return turnId ? key[3] === turnId : true
|
||||
},
|
||||
}),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tiny mutation wrapper so the Outputs rail's "Refresh" button can
|
||||
* surface an `isPending` indicator while the new query is in flight.
|
||||
* No body — just triggers `refetch` on the rail's query for this
|
||||
* agent and resolves when it settles.
|
||||
*/
|
||||
export function useRefreshAgentOutputs(agentId: string) {
|
||||
const queryClient = useQueryClient()
|
||||
const { baseUrl } = useAgentServerUrl()
|
||||
return useMutation({
|
||||
mutationFn: async () => {
|
||||
await queryClient.refetchQueries({
|
||||
queryKey: [AGENT_QUERY_KEYS.agentOutputs, baseUrl, agentId],
|
||||
exact: true,
|
||||
})
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Single-file preview hook used by the inline artifact card and the
|
||||
* Outputs rail's preview Sheet. Always opt-in (`enabled`) — the
|
||||
* preview is fetched only when the user clicks a row, never
|
||||
* eagerly.
|
||||
*/
|
||||
|
||||
import { useQuery } from '@tanstack/react-query'
|
||||
import {
|
||||
AGENT_QUERY_KEYS,
|
||||
agentsFetch,
|
||||
} from '@/entrypoints/app/agents/useAgents'
|
||||
import { useAgentServerUrl } from '@/lib/browseros/useBrowserOSProviders'
|
||||
import type { FilePreview } from './types'
|
||||
|
||||
export function useFilePreview(fileId: string | null, enabled = true) {
|
||||
const {
|
||||
baseUrl,
|
||||
isLoading: urlLoading,
|
||||
error: urlError,
|
||||
} = useAgentServerUrl()
|
||||
|
||||
const query = useQuery<FilePreview, Error>({
|
||||
queryKey: [AGENT_QUERY_KEYS.filePreview, baseUrl, fileId],
|
||||
queryFn: async () => {
|
||||
return agentsFetch<FilePreview>(
|
||||
baseUrl as string,
|
||||
`/files/${encodeURIComponent(fileId as string)}/preview`,
|
||||
)
|
||||
},
|
||||
enabled: Boolean(baseUrl) && !urlLoading && enabled && Boolean(fileId),
|
||||
// Previews are immutable for a given fileId — once loaded, never
|
||||
// refetch on focus / reconnect. They go stale only when the
|
||||
// underlying file is removed (rare in v1; no rename / delete).
|
||||
staleTime: Infinity,
|
||||
gcTime: 5 * 60 * 1000,
|
||||
})
|
||||
|
||||
return {
|
||||
preview: query.data ?? null,
|
||||
loading: query.isLoading || urlLoading,
|
||||
error: query.error ?? urlError,
|
||||
refetch: query.refetch,
|
||||
}
|
||||
}
|
||||
@@ -36,6 +36,7 @@ const VERB_OVERRIDES: Record<string, string> = {
|
||||
hover: 'Hovered',
|
||||
hover_at: 'Hovered at coordinates',
|
||||
type_at: 'Typed at coordinates',
|
||||
type_text: 'Typed text',
|
||||
drag_at: 'Dragged',
|
||||
focus: 'Focused element',
|
||||
fill: 'Filled field',
|
||||
@@ -186,8 +187,8 @@ const SUBJECT_EXTRACTORS: Record<string, SubjectExtractor> = {
|
||||
find_files: (i) => quote(stringField(i, 'pattern', 'query')),
|
||||
|
||||
// Element interactions
|
||||
click: (i) => stringField(i, 'element'),
|
||||
hover: (i) => stringField(i, 'element'),
|
||||
click: (i) => stringField(i, 'prompt'),
|
||||
hover: (i) => stringField(i, 'prompt', 'element'),
|
||||
focus: (i) => stringField(i, 'element'),
|
||||
clear: (i) => stringField(i, 'element'),
|
||||
check: (i) => stringField(i, 'element'),
|
||||
@@ -199,6 +200,7 @@ const SUBJECT_EXTRACTORS: Record<string, SubjectExtractor> = {
|
||||
return target ?? truncate(text, 40)
|
||||
},
|
||||
press_key: (i) => stringField(i, 'key'),
|
||||
type_text: (i) => truncate(stringField(i, 'text'), 40),
|
||||
|
||||
// Coordinate-based input
|
||||
click_at: (i) => coords(i.x, i.y),
|
||||
|
||||
26
packages/browseros-agent/apps/eval/configs/legacy/agisdk-real-gpt-5-5-openrouter.json
vendored
Normal file
26
packages/browseros-agent/apps/eval/configs/legacy/agisdk-real-gpt-5-5-openrouter.json
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"agent": {
|
||||
"type": "single",
|
||||
"provider": "openai-compatible",
|
||||
"model": "openai/gpt-5.5",
|
||||
"apiKey": "OPENROUTER_API_KEY",
|
||||
"baseUrl": "https://openrouter.ai/api/v1",
|
||||
"supportsImages": true
|
||||
},
|
||||
"dataset": "../../data/agisdk-real.jsonl",
|
||||
"num_workers": 20,
|
||||
"restart_server_per_task": true,
|
||||
"browseros": {
|
||||
"server_url": "http://127.0.0.1:9110",
|
||||
"base_cdp_port": 9010,
|
||||
"base_server_port": 9110,
|
||||
"base_extension_port": 9310,
|
||||
"load_extensions": false,
|
||||
"headless": false
|
||||
},
|
||||
"captcha": {
|
||||
"api_key_env": "NOPECHA_API_KEY"
|
||||
},
|
||||
"graders": ["agisdk_state_diff"],
|
||||
"timeout_ms": 1800000
|
||||
}
|
||||
@@ -8,7 +8,7 @@
|
||||
"supportsImages": true
|
||||
},
|
||||
"dataset": "../../data/agisdk-real-smoke.jsonl",
|
||||
"num_workers": 1,
|
||||
"num_workers": 20,
|
||||
"restart_server_per_task": true,
|
||||
"browseros": {
|
||||
"server_url": "http://127.0.0.1:9110",
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"supportsImages": true
|
||||
},
|
||||
"dataset": "../../data/agisdk-real.jsonl",
|
||||
"num_workers": 4,
|
||||
"num_workers": 20,
|
||||
"restart_server_per_task": true,
|
||||
"browseros": {
|
||||
"server_url": "http://127.0.0.1:9110",
|
||||
|
||||
@@ -1,15 +1,21 @@
|
||||
{
|
||||
"agent": {
|
||||
"type": "single",
|
||||
"provider": "bedrock",
|
||||
"model": "global.anthropic.claude-opus-4-6-v1",
|
||||
"region": "AWS_REGION",
|
||||
"accessKeyId": "AWS_ACCESS_KEY_ID",
|
||||
"secretAccessKey": "AWS_SECRET_ACCESS_KEY",
|
||||
"supportsImages": true
|
||||
"provider": "openrouter",
|
||||
"model": "anthropic/claude-opus-4.6",
|
||||
"apiKey": "OPENROUTER_API_KEY",
|
||||
"supportsImages": true,
|
||||
"reasoning": {
|
||||
"enabled": true
|
||||
},
|
||||
"verbosity": "high",
|
||||
"providerRouting": {
|
||||
"only": ["amazon-bedrock"],
|
||||
"allowFallbacks": false
|
||||
}
|
||||
},
|
||||
"dataset": "../../data/agisdk-real.jsonl",
|
||||
"num_workers": 2,
|
||||
"num_workers": 10,
|
||||
"restart_server_per_task": true,
|
||||
"browseros": {
|
||||
"server_url": "http://127.0.0.1:9110",
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"type": "single"
|
||||
},
|
||||
"graders": ["agisdk_state_diff"],
|
||||
"workers": 1,
|
||||
"workers": 20,
|
||||
"restartBrowserPerTask": true,
|
||||
"timeoutMs": 1800000,
|
||||
"browseros": {
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"type": "single"
|
||||
},
|
||||
"graders": ["agisdk_state_diff"],
|
||||
"workers": 1,
|
||||
"workers": 20,
|
||||
"restartBrowserPerTask": true,
|
||||
"timeoutMs": 1800000,
|
||||
"browseros": {
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"type": "single"
|
||||
},
|
||||
"graders": ["agisdk_state_diff"],
|
||||
"workers": 1,
|
||||
"workers": 20,
|
||||
"restartBrowserPerTask": true,
|
||||
"timeoutMs": 1800000,
|
||||
"browseros": {
|
||||
|
||||
@@ -9,11 +9,52 @@ import { CdpBackend } from '@browseros/server/browser/backends/cdp'
|
||||
import { registry } from '@browseros/server/tools/registry'
|
||||
import { CaptchaWaiter } from '../capture/captcha-waiter'
|
||||
import { DEFAULT_TIMEOUT_MS } from '../constants'
|
||||
import type { TaskMetadata } from '../types'
|
||||
import type { TaskMetadata, UIMessageStreamEvent } from '../types'
|
||||
import {
|
||||
isProviderExecutionError,
|
||||
retryProviderErrors,
|
||||
} from '../utils/provider-error-retry'
|
||||
import { resolveProviderConfig } from '../utils/resolve-provider-config'
|
||||
import { withEvalTimeout } from '../utils/with-eval-timeout'
|
||||
import type { AgentContext, AgentEvaluator, AgentResult } from './types'
|
||||
|
||||
const EMPTY_TOOL_RESULT_STOP_CONTINUATION_LIMIT = 2
|
||||
|
||||
interface ToolLoopResultShape {
|
||||
text: string
|
||||
finishReason: string
|
||||
toolCalls: readonly unknown[]
|
||||
steps: ReadonlyArray<{
|
||||
toolResults: readonly unknown[]
|
||||
}>
|
||||
}
|
||||
|
||||
export function shouldContinueAfterEmptyToolResultStop(
|
||||
result: ToolLoopResultShape,
|
||||
): boolean {
|
||||
const previousStep = result.steps.at(-2)
|
||||
|
||||
return (
|
||||
result.finishReason === 'stop' &&
|
||||
result.text.trim().length === 0 &&
|
||||
result.toolCalls.length === 0 &&
|
||||
(previousStep?.toolResults.length ?? 0) > 0
|
||||
)
|
||||
}
|
||||
|
||||
export function buildEmptyToolResultStopContinuationPrompt(
|
||||
taskQuery: string,
|
||||
): string {
|
||||
return [
|
||||
'Continue the eval task from the current browser state.',
|
||||
'',
|
||||
'The previous model response stopped immediately after a tool result without issuing another tool call or a final answer. Do not stop after routine tool results. If the requested workflow is complete, respond with a brief completion message. Otherwise, inspect the page if needed and continue using tools.',
|
||||
'',
|
||||
'Original task:',
|
||||
taskQuery,
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
export class SingleAgentEvaluator implements AgentEvaluator {
|
||||
constructor(private ctx: AgentContext) {}
|
||||
|
||||
@@ -89,87 +130,128 @@ export class SingleAgentEvaluator implements AgentEvaluator {
|
||||
capture,
|
||||
async (signal) => {
|
||||
if (!agent) throw new Error('Agent was not initialized')
|
||||
// Format prompt with browser context so the agent knows what page it's on
|
||||
// (same formatting as chat-service.ts → formatUserMessage)
|
||||
const prompt = formatUserMessage(task.query, browserContext)
|
||||
const result = await agent.toolLoopAgent.generate({
|
||||
prompt,
|
||||
abortSignal: signal,
|
||||
const activeAgent = agent
|
||||
|
||||
experimental_onToolCallStart: ({ toolCall }) => {
|
||||
const input = toolCall.input as
|
||||
| Record<string, unknown>
|
||||
| undefined
|
||||
if (input && typeof input.page === 'number') {
|
||||
capture.setActivePageId(input.page)
|
||||
}
|
||||
},
|
||||
let continuationCount = 0
|
||||
let currentQuery = task.query
|
||||
|
||||
experimental_onToolCallFinish: async () => {
|
||||
try {
|
||||
if (captchaWaiter) {
|
||||
await captchaWaiter.waitIfCaptchaPresent(
|
||||
browser,
|
||||
capture.getActivePageId(),
|
||||
)
|
||||
}
|
||||
const screenshotNum = await capture.screenshot.capture(
|
||||
capture.getActivePageId(),
|
||||
)
|
||||
capture.emitEvent(task.query_id, {
|
||||
type: 'screenshot-captured',
|
||||
screenshot: screenshotNum,
|
||||
})
|
||||
} catch {
|
||||
// Screenshot failures are non-fatal
|
||||
}
|
||||
},
|
||||
for (;;) {
|
||||
// Format prompt with browser context so the agent knows what page it's on
|
||||
// (same formatting as chat-service.ts → formatUserMessage)
|
||||
const prompt = formatUserMessage(currentQuery, browserContext)
|
||||
const result = await retryProviderErrors(
|
||||
() =>
|
||||
activeAgent.toolLoopAgent.generate({
|
||||
prompt,
|
||||
abortSignal: signal,
|
||||
|
||||
onStepFinish: async ({ toolCalls, toolResults, text }) => {
|
||||
if (toolCalls) {
|
||||
for (const tc of toolCalls) {
|
||||
const inputEvent = {
|
||||
type: 'tool-input-available',
|
||||
toolCallId: tc.toolCallId,
|
||||
toolName: tc.toolName,
|
||||
input: tc.input,
|
||||
} as any
|
||||
await capture.messageLogger.logStreamEvent(inputEvent)
|
||||
capture.emitEvent(task.query_id, inputEvent)
|
||||
}
|
||||
}
|
||||
experimental_onToolCallStart: ({ toolCall }) => {
|
||||
const input = toolCall.input as
|
||||
| Record<string, unknown>
|
||||
| undefined
|
||||
if (input && typeof input.page === 'number') {
|
||||
capture.setActivePageId(input.page)
|
||||
}
|
||||
},
|
||||
|
||||
if (toolResults) {
|
||||
for (const tr of toolResults) {
|
||||
const outputEvent = {
|
||||
type: 'tool-output-available',
|
||||
toolCallId: tr.toolCallId,
|
||||
output: tr.output,
|
||||
} as any
|
||||
await capture.messageLogger.logStreamEvent(outputEvent)
|
||||
capture.emitEvent(task.query_id, outputEvent)
|
||||
}
|
||||
}
|
||||
experimental_onToolCallFinish: async () => {
|
||||
try {
|
||||
if (captchaWaiter) {
|
||||
await captchaWaiter.waitIfCaptchaPresent(
|
||||
browser,
|
||||
capture.getActivePageId(),
|
||||
)
|
||||
}
|
||||
const screenshotNum = await capture.screenshot.capture(
|
||||
capture.getActivePageId(),
|
||||
)
|
||||
capture.emitEvent(task.query_id, {
|
||||
type: 'screenshot-captured',
|
||||
screenshot: screenshotNum,
|
||||
})
|
||||
} catch {
|
||||
// Screenshot failures are non-fatal
|
||||
}
|
||||
},
|
||||
|
||||
if (text) {
|
||||
const textId = randomUUID()
|
||||
const startEvent = { type: 'text-start', id: textId } as any
|
||||
const deltaEvent = {
|
||||
type: 'text-delta',
|
||||
id: textId,
|
||||
delta: text,
|
||||
} as any
|
||||
const endEvent = { type: 'text-end', id: textId } as any
|
||||
await capture.messageLogger.logStreamEvent(startEvent)
|
||||
await capture.messageLogger.logStreamEvent(deltaEvent)
|
||||
await capture.messageLogger.logStreamEvent(endEvent)
|
||||
capture.emitEvent(task.query_id, deltaEvent)
|
||||
}
|
||||
},
|
||||
})
|
||||
onStepFinish: async ({ toolCalls, toolResults, text }) => {
|
||||
if (toolCalls) {
|
||||
for (const tc of toolCalls) {
|
||||
const inputEvent: UIMessageStreamEvent = {
|
||||
type: 'tool-input-available',
|
||||
toolCallId: tc.toolCallId,
|
||||
toolName: tc.toolName,
|
||||
input: tc.input,
|
||||
}
|
||||
await capture.messageLogger.logStreamEvent(inputEvent)
|
||||
capture.emitEvent(task.query_id, inputEvent)
|
||||
}
|
||||
}
|
||||
|
||||
finalText = result.text || null
|
||||
if (toolResults) {
|
||||
for (const tr of toolResults) {
|
||||
const outputEvent: UIMessageStreamEvent = {
|
||||
type: 'tool-output-available',
|
||||
toolCallId: tr.toolCallId,
|
||||
output: tr.output,
|
||||
}
|
||||
await capture.messageLogger.logStreamEvent(outputEvent)
|
||||
capture.emitEvent(task.query_id, outputEvent)
|
||||
}
|
||||
}
|
||||
|
||||
if (text) {
|
||||
const textId = randomUUID()
|
||||
const startEvent: UIMessageStreamEvent = {
|
||||
type: 'text-start',
|
||||
id: textId,
|
||||
}
|
||||
const deltaEvent: UIMessageStreamEvent = {
|
||||
type: 'text-delta',
|
||||
id: textId,
|
||||
delta: text,
|
||||
}
|
||||
const endEvent: UIMessageStreamEvent = {
|
||||
type: 'text-end',
|
||||
id: textId,
|
||||
}
|
||||
await capture.messageLogger.logStreamEvent(startEvent)
|
||||
await capture.messageLogger.logStreamEvent(deltaEvent)
|
||||
await capture.messageLogger.logStreamEvent(endEvent)
|
||||
capture.emitEvent(task.query_id, deltaEvent)
|
||||
}
|
||||
},
|
||||
}),
|
||||
{
|
||||
label: `single-agent ${task.query_id}`,
|
||||
signal,
|
||||
},
|
||||
)
|
||||
|
||||
if (!shouldContinueAfterEmptyToolResultStop(result)) {
|
||||
finalText = result.text || null
|
||||
break
|
||||
}
|
||||
|
||||
if (
|
||||
continuationCount >= EMPTY_TOOL_RESULT_STOP_CONTINUATION_LIMIT
|
||||
) {
|
||||
throw new Error(
|
||||
`Model stopped with empty output immediately after a tool result ${continuationCount + 1} times`,
|
||||
)
|
||||
}
|
||||
|
||||
continuationCount++
|
||||
capture.addWarning(
|
||||
'agent_execution',
|
||||
`Model stopped with empty output immediately after a tool result; continuing task (${continuationCount}/${EMPTY_TOOL_RESULT_STOP_CONTINUATION_LIMIT})`,
|
||||
)
|
||||
currentQuery = buildEmptyToolResultStopContinuationPrompt(
|
||||
task.query,
|
||||
)
|
||||
}
|
||||
},
|
||||
{ rethrowError: isProviderExecutionError },
|
||||
)
|
||||
|
||||
const endTime = Date.now()
|
||||
|
||||
@@ -18,6 +18,9 @@ export interface SuiteCliArgs {
|
||||
apiKey?: string
|
||||
baseUrl?: string
|
||||
publishTarget?: PublishTarget
|
||||
query?: string
|
||||
startUrl?: string
|
||||
outputDir?: string
|
||||
}
|
||||
|
||||
export interface RunCliArgs
|
||||
@@ -83,6 +86,9 @@ function parseSuiteLikeArgs(
|
||||
'api-key': { type: 'string' },
|
||||
'base-url': { type: 'string' },
|
||||
publish: { type: 'string' },
|
||||
query: { type: 'string' },
|
||||
'start-url': { type: 'string' },
|
||||
'output-dir': { type: 'string' },
|
||||
},
|
||||
})
|
||||
|
||||
@@ -104,6 +110,12 @@ function parseSuiteLikeArgs(
|
||||
if (apiKey) parsed.apiKey = apiKey
|
||||
const baseUrl = stringValue(values['base-url'])
|
||||
if (baseUrl) parsed.baseUrl = baseUrl
|
||||
const query = stringValue(values.query)
|
||||
if (query) parsed.query = query
|
||||
const startUrl = stringValue(values['start-url'])
|
||||
if (startUrl) parsed.startUrl = startUrl
|
||||
const outputDir = stringValue(values['output-dir'])
|
||||
if (outputDir) parsed.outputDir = outputDir
|
||||
|
||||
if (command === 'suite') {
|
||||
const target = publishTarget(stringValue(values.publish))
|
||||
|
||||
@@ -15,6 +15,9 @@ export async function runRunCommand(
|
||||
model: args.model,
|
||||
apiKey: args.apiKey,
|
||||
baseUrl: args.baseUrl,
|
||||
query: args.query,
|
||||
startUrl: args.startUrl,
|
||||
outputDir: args.outputDir,
|
||||
},
|
||||
deps,
|
||||
)
|
||||
|
||||
@@ -21,6 +21,9 @@ export interface SuiteCommandOptions {
|
||||
apiKey?: string
|
||||
baseUrl?: string
|
||||
publishTarget?: PublishTarget
|
||||
query?: string
|
||||
startUrl?: string
|
||||
outputDir?: string
|
||||
env?: Env
|
||||
}
|
||||
|
||||
@@ -179,11 +182,19 @@ export async function runSuiteCommand(
|
||||
const resolved = await resolveSuiteCommand(options)
|
||||
const runOptions: RunEvalOptions =
|
||||
resolved.kind === 'config'
|
||||
? { configPath: resolved.configPath }
|
||||
? {
|
||||
configPath: resolved.configPath,
|
||||
query: options.query,
|
||||
startUrl: options.startUrl,
|
||||
outputDir: options.outputDir,
|
||||
}
|
||||
: {
|
||||
configPath: resolved.suitePath,
|
||||
dataPath: resolved.datasetPath,
|
||||
config: resolved.evalConfig,
|
||||
query: options.query,
|
||||
startUrl: options.startUrl,
|
||||
outputDir: options.outputDir,
|
||||
}
|
||||
|
||||
const result = await runEval(runOptions)
|
||||
|
||||
@@ -15,6 +15,7 @@ Usage:
|
||||
bun run eval suite --suite <suite.json> --variant <id> [--publish r2]
|
||||
bun run eval run --config <config.json>
|
||||
bun run eval run --suite <suite.json> --variant <id>
|
||||
bun run eval run --config <config.json> --query "..." --start-url <url>
|
||||
bun run eval grade --run <results/run-dir>
|
||||
bun run eval publish --run <results/run-dir> --target r2
|
||||
bun run eval -c <config.json>
|
||||
|
||||
@@ -31,6 +31,16 @@ interface AgisdkEvaluatorOutput {
|
||||
per_criterion: unknown[]
|
||||
}
|
||||
|
||||
interface FailedAgisdkCriterion {
|
||||
index: number
|
||||
detail: string
|
||||
expected?: unknown
|
||||
actual?: unknown
|
||||
}
|
||||
|
||||
const MAX_REASONING_CRITERIA = 8
|
||||
const MAX_REASONING_DETAIL_CHARS = 700
|
||||
|
||||
export class AgisdkStateDiffGrader implements Grader {
|
||||
name = 'agisdk_state_diff'
|
||||
|
||||
@@ -99,15 +109,23 @@ export class AgisdkStateDiffGrader implements Grader {
|
||||
'stderr.txt',
|
||||
evaluation.stderr,
|
||||
)
|
||||
const failedCriteria = this.extractFailedCriteria(result.per_criterion)
|
||||
if (failedCriteria.length > 0) {
|
||||
await writeGraderJsonArtifact(
|
||||
input,
|
||||
this.name,
|
||||
'failed-criteria.json',
|
||||
failedCriteria,
|
||||
)
|
||||
}
|
||||
return {
|
||||
score: result.reward,
|
||||
pass: result.pass,
|
||||
reasoning:
|
||||
result.message ||
|
||||
(result.pass ? 'All criteria passed' : 'Some criteria failed'),
|
||||
reasoning: this.buildReasoning(result, failedCriteria),
|
||||
details: {
|
||||
reward: result.reward,
|
||||
per_criterion: result.per_criterion,
|
||||
failed_criteria: failedCriteria,
|
||||
origin,
|
||||
agisdk_task_id: taskId,
|
||||
},
|
||||
@@ -148,6 +166,69 @@ export class AgisdkStateDiffGrader implements Grader {
|
||||
return null
|
||||
}
|
||||
|
||||
private buildReasoning(
|
||||
result: AgisdkEvaluatorOutput,
|
||||
failedCriteria: FailedAgisdkCriterion[],
|
||||
): string {
|
||||
const base =
|
||||
result.message ||
|
||||
(result.pass ? 'All criteria passed' : 'Some criteria failed')
|
||||
|
||||
if (result.pass || failedCriteria.length === 0) return base
|
||||
|
||||
const shown = failedCriteria.slice(0, MAX_REASONING_CRITERIA)
|
||||
const lines = shown.map(
|
||||
(criterion) =>
|
||||
`${criterion.index + 1}. ${this.formatFailedCriterion(criterion)}`,
|
||||
)
|
||||
const remaining = failedCriteria.length - shown.length
|
||||
if (remaining > 0) {
|
||||
lines.push(`... ${remaining} more failed criteria`)
|
||||
}
|
||||
|
||||
return `${base}\nFailed criteria:\n${lines.join('\n')}`
|
||||
}
|
||||
|
||||
private extractFailedCriteria(
|
||||
perCriterion: unknown[],
|
||||
): FailedAgisdkCriterion[] {
|
||||
return perCriterion.flatMap((criterion, index) => {
|
||||
if (!criterion || typeof criterion !== 'object') return []
|
||||
const record = criterion as Record<string, unknown>
|
||||
if (record.passed === true) return []
|
||||
|
||||
const detail =
|
||||
typeof record.detail === 'string'
|
||||
? record.detail
|
||||
: this.stringifyCriterionValue(record.raw_detail ?? record)
|
||||
const failed: FailedAgisdkCriterion = {
|
||||
index,
|
||||
detail,
|
||||
}
|
||||
if ('expected_value' in record) failed.expected = record.expected_value
|
||||
if ('actual_value' in record) failed.actual = record.actual_value
|
||||
return [failed]
|
||||
})
|
||||
}
|
||||
|
||||
private formatFailedCriterion(criterion: FailedAgisdkCriterion): string {
|
||||
const parts = [criterion.detail]
|
||||
if ('expected' in criterion) {
|
||||
parts.push(`expected=${this.stringifyCriterionValue(criterion.expected)}`)
|
||||
}
|
||||
if ('actual' in criterion) {
|
||||
parts.push(`actual=${this.stringifyCriterionValue(criterion.actual)}`)
|
||||
}
|
||||
|
||||
const text = parts.join(' | ')
|
||||
if (text.length <= MAX_REASONING_DETAIL_CHARS) return text
|
||||
return `${text.slice(0, MAX_REASONING_DETAIL_CHARS)}... (+${text.length - MAX_REASONING_DETAIL_CHARS} chars)`
|
||||
}
|
||||
|
||||
private stringifyCriterionValue(value: unknown): string {
|
||||
return typeof value === 'string' ? value : JSON.stringify(value)
|
||||
}
|
||||
|
||||
private async fetchFinishState(
|
||||
origin: string,
|
||||
mcpEndpoint: string,
|
||||
|
||||
@@ -26,6 +26,18 @@ import sys
|
||||
_STRICT = os.environ.get("AGISDK_STRICT_STRINGS", "").lower() in ("1", "true", "yes")
|
||||
|
||||
|
||||
def _json_safe(value: object) -> object:
|
||||
try:
|
||||
json.dumps(value)
|
||||
return value
|
||||
except TypeError:
|
||||
if isinstance(value, dict):
|
||||
return {str(k): _json_safe(v) for k, v in value.items()}
|
||||
if isinstance(value, (list, tuple)):
|
||||
return [_json_safe(v) for v in value]
|
||||
return str(value)
|
||||
|
||||
|
||||
def _soft_string_match(detail: object) -> bool:
|
||||
"""Return True iff `detail` is `{actual_value, expected_value}` with both
|
||||
strings and a non-empty `expected_value` that is contained in `actual_value`
|
||||
@@ -87,7 +99,16 @@ def main():
|
||||
for r in results:
|
||||
passed = bool(r[0])
|
||||
detail = r[1] if len(r) > 1 else ""
|
||||
entry: dict = {"passed": passed, "detail": str(detail)}
|
||||
entry: dict = {
|
||||
"passed": passed,
|
||||
"detail": str(detail),
|
||||
"raw_detail": _json_safe(detail),
|
||||
}
|
||||
if isinstance(detail, dict):
|
||||
if "actual_value" in detail:
|
||||
entry["actual_value"] = _json_safe(detail.get("actual_value"))
|
||||
if "expected_value" in detail:
|
||||
entry["expected_value"] = _json_safe(detail.get("expected_value"))
|
||||
if not _STRICT and not passed and _soft_string_match(detail):
|
||||
entry["passed"] = True
|
||||
entry["softened"] = True
|
||||
|
||||
@@ -62,6 +62,12 @@ const CAPTCHA_EXT_DIR = join(
|
||||
'../../extensions/nopecha',
|
||||
)
|
||||
|
||||
export function resolveServerStartScript(
|
||||
env: Record<string, string | undefined> = process.env,
|
||||
): string {
|
||||
return env.BROWSEROS_EVAL_SERVER_START_SCRIPT || 'start:ci'
|
||||
}
|
||||
|
||||
export class BrowserOSAppManager {
|
||||
private ports: EvalPorts
|
||||
private chromeProc: Subprocess | null = null
|
||||
@@ -215,9 +221,10 @@ export class BrowserOSAppManager {
|
||||
}
|
||||
this.serverLogFd = logFd
|
||||
|
||||
// `start:ci` skips `--watch` (no file-watcher overhead in CI). Falls back
|
||||
// to the regular `start` script outside CI for the dev-watch experience.
|
||||
const startScript = process.env.CI ? 'start:ci' : 'start'
|
||||
// Eval servers must not use `start` because it runs Bun in watch mode; a
|
||||
// source edit during a long eval can restart the worker server before the
|
||||
// grader fetches /finish. Keep an escape hatch for local debugging.
|
||||
const startScript = resolveServerStartScript()
|
||||
this.serverProc = spawn({
|
||||
cmd: ['bun', 'run', '--filter', '@browseros/server', startScript],
|
||||
cwd: MONOREPO_ROOT,
|
||||
|
||||
@@ -256,10 +256,27 @@ function printTaskProgress(
|
||||
for (const [name, gr] of Object.entries(result.graderResults)) {
|
||||
const icon = gr.pass ? 'PASS' : 'FAIL'
|
||||
console.log(` ${name}: ${icon}`)
|
||||
if (!gr.pass && gr.reasoning) {
|
||||
printIndentedReasoning(gr.reasoning, ' ')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function printIndentedReasoning(
|
||||
reasoning: string,
|
||||
indent: string,
|
||||
maxLines = 12,
|
||||
): void {
|
||||
const lines = reasoning.trim().split('\n')
|
||||
for (const line of lines.slice(0, maxLines)) {
|
||||
console.log(`${indent}${line}`)
|
||||
}
|
||||
if (lines.length > maxLines) {
|
||||
console.log(`${indent}... ${lines.length - maxLines} more lines`)
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Summary
|
||||
// ============================================================================
|
||||
|
||||
211
packages/browseros-agent/apps/eval/src/utils/provider-error-retry.ts
vendored
Normal file
211
packages/browseros-agent/apps/eval/src/utils/provider-error-retry.ts
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
import { sleep } from './sleep'
|
||||
|
||||
const DEFAULT_PROVIDER_ERROR_RETRIES = 5
|
||||
const DEFAULT_PROVIDER_ERROR_RETRY_WINDOW_MS = 10_000
|
||||
const DEFAULT_RATE_LIMIT_RETRIES = 8
|
||||
const DEFAULT_RATE_LIMIT_RETRY_WINDOW_MS = 120_000
|
||||
const PROVIDER_ERROR_LOG_MAX_STRING_CHARS = 10_000
|
||||
const PROVIDER_ERROR_LOG_MAX_DEPTH = 5
|
||||
|
||||
const REDACTED_KEYS = /authorization|api[-_]?key|token|secret|cookie/i
|
||||
|
||||
export interface ProviderErrorRetryEvent {
|
||||
retryNumber: number
|
||||
maxRetries: number
|
||||
delayMs: number
|
||||
error: unknown
|
||||
}
|
||||
|
||||
export interface ProviderErrorRetryOptions {
|
||||
label: string
|
||||
signal?: AbortSignal
|
||||
retries?: number
|
||||
windowMs?: number
|
||||
rateLimitRetries?: number
|
||||
rateLimitWindowMs?: number
|
||||
onRetry?: (event: ProviderErrorRetryEvent) => void
|
||||
}
|
||||
|
||||
function readStringProperty(value: unknown, key: string): string | undefined {
|
||||
if (!value || typeof value !== 'object') return undefined
|
||||
const raw = (value as Record<string, unknown>)[key]
|
||||
return typeof raw === 'string' ? raw : undefined
|
||||
}
|
||||
|
||||
function readArrayProperty(value: unknown, key: string): unknown[] {
|
||||
if (!value || typeof value !== 'object') return []
|
||||
const raw = (value as Record<string, unknown>)[key]
|
||||
return Array.isArray(raw) ? raw : []
|
||||
}
|
||||
|
||||
function errorMarkers(error: unknown, seen = new Set<unknown>()): string[] {
|
||||
if (!error || seen.has(error)) return []
|
||||
seen.add(error)
|
||||
|
||||
const markers = [
|
||||
readStringProperty(error, 'name'),
|
||||
error instanceof Error ? error.message : undefined,
|
||||
].filter((value): value is string => !!value)
|
||||
|
||||
if (error && typeof error === 'object') {
|
||||
const record = error as Record<string, unknown>
|
||||
if ('isRetryable' in record) markers.push('isRetryable')
|
||||
if ('statusCode' in record) markers.push('statusCode')
|
||||
if ('statusCode' in record) markers.push(String(record.statusCode))
|
||||
if ('responseBody' in record) markers.push('responseBody')
|
||||
if ('responseBody' in record) markers.push(String(record.responseBody))
|
||||
if ('cause' in record) {
|
||||
markers.push(...errorMarkers(record.cause, seen))
|
||||
}
|
||||
}
|
||||
|
||||
for (const nestedError of readArrayProperty(error, 'errors')) {
|
||||
markers.push(...errorMarkers(nestedError, seen))
|
||||
}
|
||||
|
||||
return markers
|
||||
}
|
||||
|
||||
export function isProviderExecutionError(error: unknown): boolean {
|
||||
const markerText = errorMarkers(error).join('\n')
|
||||
return (
|
||||
isProviderRateLimitError(error) ||
|
||||
markerText.includes('Provider returned error') ||
|
||||
markerText.includes('APICallError') ||
|
||||
markerText.includes('AI_RetryError') ||
|
||||
markerText.includes('RetryError') ||
|
||||
markerText.includes('isRetryable') ||
|
||||
markerText.includes('statusCode') ||
|
||||
markerText.includes('responseBody')
|
||||
)
|
||||
}
|
||||
|
||||
export function isProviderRateLimitError(error: unknown): boolean {
|
||||
const markerText = errorMarkers(error).join('\n').toLowerCase()
|
||||
return (
|
||||
markerText.includes('rate limit') ||
|
||||
markerText.includes('rate-limit') ||
|
||||
markerText.includes('too many requests') ||
|
||||
markerText.includes('statuscode\n429') ||
|
||||
markerText.includes('\n429\n')
|
||||
)
|
||||
}
|
||||
|
||||
function errorMessage(error: unknown): string {
|
||||
return error instanceof Error ? error.message : String(error)
|
||||
}
|
||||
|
||||
function truncateString(value: string): string {
|
||||
if (value.length <= PROVIDER_ERROR_LOG_MAX_STRING_CHARS) return value
|
||||
return `${value.slice(0, PROVIDER_ERROR_LOG_MAX_STRING_CHARS)}... (+${value.length - PROVIDER_ERROR_LOG_MAX_STRING_CHARS} chars)`
|
||||
}
|
||||
|
||||
function serializeForLog(
|
||||
value: unknown,
|
||||
depth = 0,
|
||||
seen = new Set<unknown>(),
|
||||
): unknown {
|
||||
if (typeof value === 'string') return truncateString(value)
|
||||
if (value === null || typeof value !== 'object') return value
|
||||
if (seen.has(value)) return '[Circular]'
|
||||
if (depth >= PROVIDER_ERROR_LOG_MAX_DEPTH) return '[MaxDepth]'
|
||||
|
||||
seen.add(value)
|
||||
|
||||
if (value instanceof Error) {
|
||||
const serialized: Record<string, unknown> = {
|
||||
name: value.name,
|
||||
message: value.message,
|
||||
stack: value.stack,
|
||||
}
|
||||
|
||||
for (const key of Object.getOwnPropertyNames(value)) {
|
||||
if (key in serialized) continue
|
||||
serialized[key] = REDACTED_KEYS.test(key)
|
||||
? '[Redacted]'
|
||||
: serializeForLog(
|
||||
(value as unknown as Record<string, unknown>)[key],
|
||||
depth + 1,
|
||||
seen,
|
||||
)
|
||||
}
|
||||
|
||||
if ('cause' in value) {
|
||||
serialized.cause = serializeForLog(value.cause, depth + 1, seen)
|
||||
}
|
||||
|
||||
return serialized
|
||||
}
|
||||
|
||||
if (Array.isArray(value)) {
|
||||
return value.map((item) => serializeForLog(item, depth + 1, seen))
|
||||
}
|
||||
|
||||
const serialized: Record<string, unknown> = {}
|
||||
for (const [key, item] of Object.entries(value)) {
|
||||
serialized[key] = REDACTED_KEYS.test(key)
|
||||
? '[Redacted]'
|
||||
: serializeForLog(item, depth + 1, seen)
|
||||
}
|
||||
return serialized
|
||||
}
|
||||
|
||||
function logFinalProviderError(
|
||||
label: string,
|
||||
error: unknown,
|
||||
attempts: number,
|
||||
): void {
|
||||
console.error(
|
||||
`[provider-retry] ${label}: provider error persisted after ${attempts} attempts. Final error:\n${JSON.stringify(
|
||||
serializeForLog(error),
|
||||
null,
|
||||
2,
|
||||
)}`,
|
||||
)
|
||||
}
|
||||
|
||||
export async function retryProviderErrors<T>(
|
||||
operation: () => Promise<T>,
|
||||
options: ProviderErrorRetryOptions,
|
||||
): Promise<T> {
|
||||
const providerRetries = options.retries ?? DEFAULT_PROVIDER_ERROR_RETRIES
|
||||
const providerWindowMs =
|
||||
options.windowMs ?? DEFAULT_PROVIDER_ERROR_RETRY_WINDOW_MS
|
||||
const rateLimitRetries =
|
||||
options.rateLimitRetries ?? DEFAULT_RATE_LIMIT_RETRIES
|
||||
const rateLimitWindowMs =
|
||||
options.rateLimitWindowMs ?? DEFAULT_RATE_LIMIT_RETRY_WINDOW_MS
|
||||
|
||||
for (let attempt = 0; ; attempt++) {
|
||||
try {
|
||||
return await operation()
|
||||
} catch (error) {
|
||||
const isProviderError = isProviderExecutionError(error)
|
||||
if (options.signal?.aborted || !isProviderError) {
|
||||
throw error
|
||||
}
|
||||
|
||||
const isRateLimit = isProviderRateLimitError(error)
|
||||
const maxRetries = isRateLimit ? rateLimitRetries : providerRetries
|
||||
const windowMs = isRateLimit ? rateLimitWindowMs : providerWindowMs
|
||||
const delayMs = maxRetries > 0 ? Math.floor(windowMs / maxRetries) : 0
|
||||
|
||||
if (attempt >= maxRetries) {
|
||||
logFinalProviderError(options.label, error, attempt + 1)
|
||||
throw error
|
||||
}
|
||||
|
||||
const event = {
|
||||
retryNumber: attempt + 1,
|
||||
maxRetries,
|
||||
delayMs,
|
||||
error,
|
||||
}
|
||||
options.onRetry?.(event)
|
||||
console.warn(
|
||||
`[provider-retry] ${options.label}: retry ${event.retryNumber}/${maxRetries} in ${delayMs}ms after ${isRateLimit ? 'rate limit' : 'provider error'}: ${errorMessage(error)}`,
|
||||
)
|
||||
await sleep(delayMs, options.signal)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -13,10 +13,15 @@ export interface TimeoutResult<T> {
|
||||
terminationReason: TerminationReason
|
||||
}
|
||||
|
||||
export interface EvalTimeoutOptions {
|
||||
rethrowError?: (error: Error) => boolean
|
||||
}
|
||||
|
||||
export async function withEvalTimeout<T>(
|
||||
timeoutMs: number,
|
||||
capture: CaptureContext,
|
||||
fn: (signal: AbortSignal) => Promise<T>,
|
||||
options: EvalTimeoutOptions = {},
|
||||
): Promise<TimeoutResult<T>> {
|
||||
const abortController = new AbortController()
|
||||
const timeoutHandle = setTimeout(() => abortController.abort(), timeoutMs)
|
||||
@@ -39,6 +44,9 @@ export async function withEvalTimeout<T>(
|
||||
capture.addError('agent_execution', error.message, {
|
||||
stack: error.stack,
|
||||
})
|
||||
if (options.rethrowError?.(error)) {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
return { terminationReason }
|
||||
|
||||
50
packages/browseros-agent/apps/eval/tests/agents/single-agent.test.ts
vendored
Normal file
50
packages/browseros-agent/apps/eval/tests/agents/single-agent.test.ts
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
import { describe, expect, it } from 'bun:test'
|
||||
import {
|
||||
buildEmptyToolResultStopContinuationPrompt,
|
||||
shouldContinueAfterEmptyToolResultStop,
|
||||
} from '../../src/agents/single-agent'
|
||||
|
||||
describe('single-agent empty tool-result stop handling', () => {
|
||||
it('continues when the model emits an empty stop after a tool result', () => {
|
||||
expect(
|
||||
shouldContinueAfterEmptyToolResultStop({
|
||||
text: '',
|
||||
finishReason: 'stop',
|
||||
toolCalls: [],
|
||||
steps: [{ toolResults: [{ ok: true }] }, { toolResults: [] }],
|
||||
}),
|
||||
).toBe(true)
|
||||
})
|
||||
|
||||
it('does not continue for normal final text', () => {
|
||||
expect(
|
||||
shouldContinueAfterEmptyToolResultStop({
|
||||
text: 'Done',
|
||||
finishReason: 'stop',
|
||||
toolCalls: [],
|
||||
steps: [{ toolResults: [{ ok: true }] }, { toolResults: [] }],
|
||||
}),
|
||||
).toBe(false)
|
||||
})
|
||||
|
||||
it('does not continue when there was no previous tool result', () => {
|
||||
expect(
|
||||
shouldContinueAfterEmptyToolResultStop({
|
||||
text: '',
|
||||
finishReason: 'stop',
|
||||
toolCalls: [],
|
||||
steps: [{ toolResults: [] }],
|
||||
}),
|
||||
).toBe(false)
|
||||
})
|
||||
|
||||
it('builds a continuation prompt with the original task', () => {
|
||||
const prompt = buildEmptyToolResultStopContinuationPrompt(
|
||||
'Delete the target email.',
|
||||
)
|
||||
|
||||
expect(prompt).toContain('Continue the eval task')
|
||||
expect(prompt).toContain('Do not stop after routine tool results')
|
||||
expect(prompt).toContain('Delete the target email.')
|
||||
})
|
||||
})
|
||||
@@ -52,6 +52,28 @@ describe('parseEvalCliArgs', () => {
|
||||
})
|
||||
})
|
||||
|
||||
it('parses one-off query overrides for a single eval run', () => {
|
||||
expect(
|
||||
parseEvalCliArgs([
|
||||
'run',
|
||||
'--config',
|
||||
'configs/legacy/gui-click-amazon-smoke.json',
|
||||
'--query',
|
||||
'open the cart',
|
||||
'--start-url',
|
||||
'https://www.amazon.com/',
|
||||
'--output-dir',
|
||||
'/tmp/gui-click-eval',
|
||||
]),
|
||||
).toEqual({
|
||||
command: 'run',
|
||||
configPath: 'configs/legacy/gui-click-amazon-smoke.json',
|
||||
query: 'open the cart',
|
||||
startUrl: 'https://www.amazon.com/',
|
||||
outputDir: '/tmp/gui-click-eval',
|
||||
})
|
||||
})
|
||||
|
||||
it('rejects missing required command options with targeted errors', () => {
|
||||
expect(() => parseEvalCliArgs(['run'])).toThrow(
|
||||
'run requires --config or --suite',
|
||||
|
||||
@@ -116,6 +116,9 @@ describe('suite command', () => {
|
||||
suitePath,
|
||||
model: 'moonshotai/kimi-k2.5',
|
||||
provider: 'openai-compatible',
|
||||
query: 'Open Amazon cart',
|
||||
startUrl: 'https://www.amazon.com/',
|
||||
outputDir: '/tmp/gui-click-eval',
|
||||
env: {},
|
||||
},
|
||||
{
|
||||
@@ -132,5 +135,8 @@ describe('suite command', () => {
|
||||
expect(basename(calls[1].configPath)).toBe('agisdk-daily-10.json')
|
||||
expect(calls[1].config).toBeDefined()
|
||||
expect(calls[1].dataPath?.endsWith('tasks.jsonl')).toBe(true)
|
||||
expect(calls[1].query).toBe('Open Amazon cart')
|
||||
expect(calls[1].startUrl).toBe('https://www.amazon.com/')
|
||||
expect(calls[1].outputDir).toBe('/tmp/gui-click-eval')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -31,7 +31,14 @@ describe('AgisdkStateDiffGrader artifacts', () => {
|
||||
reward: 0,
|
||||
pass: false,
|
||||
message: 'Missing entree',
|
||||
per_criterion: [{ passed: false, detail: 'entree missing' }],
|
||||
per_criterion: [
|
||||
{
|
||||
passed: false,
|
||||
detail: 'cart item mismatch',
|
||||
expected_value: 'Entree',
|
||||
actual_value: 'Soup',
|
||||
},
|
||||
],
|
||||
},
|
||||
stderr: 'criterion log',
|
||||
})
|
||||
@@ -53,6 +60,17 @@ describe('AgisdkStateDiffGrader artifacts', () => {
|
||||
const result = await grader.grade(input)
|
||||
|
||||
expect(result.pass).toBe(false)
|
||||
expect(result.reasoning).toContain('Failed criteria:')
|
||||
expect(result.reasoning).toContain('expected=Entree')
|
||||
expect(result.reasoning).toContain('actual=Soup')
|
||||
expect(result.details?.failed_criteria).toEqual([
|
||||
{
|
||||
index: 0,
|
||||
detail: 'cart item mismatch',
|
||||
expected: 'Entree',
|
||||
actual: 'Soup',
|
||||
},
|
||||
])
|
||||
expect(
|
||||
JSON.parse(
|
||||
await readFile(
|
||||
@@ -69,6 +87,21 @@ describe('AgisdkStateDiffGrader artifacts', () => {
|
||||
),
|
||||
),
|
||||
).toMatchObject({ message: 'Missing entree' })
|
||||
expect(
|
||||
JSON.parse(
|
||||
await readFile(
|
||||
join(dir, 'grader-artifacts/agisdk_state_diff/failed-criteria.json'),
|
||||
'utf-8',
|
||||
),
|
||||
),
|
||||
).toEqual([
|
||||
{
|
||||
index: 0,
|
||||
detail: 'cart item mismatch',
|
||||
expected: 'Entree',
|
||||
actual: 'Soup',
|
||||
},
|
||||
])
|
||||
expect(
|
||||
await readFile(
|
||||
join(dir, 'grader-artifacts/agisdk_state_diff/stderr.txt'),
|
||||
|
||||
@@ -42,7 +42,7 @@ describe('adaptEvalConfigFile', () => {
|
||||
const kimi = await adaptEvalConfigFile(
|
||||
'apps/eval/configs/legacy/browseros-agent-kimi-k2-5-agisdk-real.json',
|
||||
)
|
||||
const opus = await adaptEvalConfigFile(
|
||||
const openrouterBedrockOpus = await adaptEvalConfigFile(
|
||||
'apps/eval/configs/legacy/browseros-agent-opus-4-6-agisdk-real.json',
|
||||
)
|
||||
|
||||
@@ -54,16 +54,22 @@ describe('adaptEvalConfigFile', () => {
|
||||
})
|
||||
expect(kimi.evalConfig.num_workers).toBe(3)
|
||||
|
||||
expect(opus.suite.id).toBe('browseros-agent-opus-4-6-agisdk-real')
|
||||
expect(opus.evalConfig.agent).toMatchObject({
|
||||
expect(openrouterBedrockOpus.suite.id).toBe(
|
||||
'browseros-agent-opus-4-6-agisdk-real',
|
||||
)
|
||||
expect(openrouterBedrockOpus.evalConfig.agent).toMatchObject({
|
||||
type: 'single',
|
||||
provider: 'bedrock',
|
||||
model: 'global.anthropic.claude-opus-4-6-v1',
|
||||
region: 'AWS_REGION',
|
||||
accessKeyId: 'AWS_ACCESS_KEY_ID',
|
||||
secretAccessKey: 'AWS_SECRET_ACCESS_KEY',
|
||||
provider: 'openrouter',
|
||||
model: 'anthropic/claude-opus-4.6',
|
||||
apiKey: 'OPENROUTER_API_KEY',
|
||||
reasoning: { enabled: true },
|
||||
verbosity: 'high',
|
||||
providerRouting: {
|
||||
only: ['amazon-bedrock'],
|
||||
allowFallbacks: false,
|
||||
},
|
||||
})
|
||||
expect(opus.evalConfig.num_workers).toBe(2)
|
||||
expect(openrouterBedrockOpus.evalConfig.num_workers).toBe(2)
|
||||
})
|
||||
|
||||
it('adapts claude-code configs without provider credentials', async () => {
|
||||
|
||||
137
packages/browseros-agent/apps/eval/tests/utils/provider-error-retry.test.ts
vendored
Normal file
137
packages/browseros-agent/apps/eval/tests/utils/provider-error-retry.test.ts
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
import { describe, expect, it } from 'bun:test'
|
||||
import {
|
||||
isProviderExecutionError,
|
||||
isProviderRateLimitError,
|
||||
retryProviderErrors,
|
||||
} from '../../src/utils/provider-error-retry'
|
||||
|
||||
function providerError(message = 'Provider returned error'): Error {
|
||||
const error = new Error(message)
|
||||
error.name = 'APICallError'
|
||||
;(error as unknown as Record<string, unknown>).statusCode = 500
|
||||
;(error as unknown as Record<string, unknown>).responseBody =
|
||||
'{"error":"upstream failed"}'
|
||||
return error
|
||||
}
|
||||
|
||||
function rateLimitError(): Error {
|
||||
const error = new Error('rate limit exceeded, please try again later')
|
||||
error.name = 'AI_APICallError'
|
||||
;(error as unknown as Record<string, unknown>).statusCode = 429
|
||||
return error
|
||||
}
|
||||
|
||||
async function withoutRetryWarnings<T>(fn: () => Promise<T>): Promise<T> {
|
||||
const originalWarn = console.warn
|
||||
const originalError = console.error
|
||||
console.warn = () => {}
|
||||
console.error = () => {}
|
||||
try {
|
||||
return await fn()
|
||||
} finally {
|
||||
console.warn = originalWarn
|
||||
console.error = originalError
|
||||
}
|
||||
}
|
||||
|
||||
describe('provider error retries', () => {
|
||||
it('detects provider errors from SDK-style markers', () => {
|
||||
expect(isProviderExecutionError(providerError())).toBe(true)
|
||||
expect(isProviderExecutionError(rateLimitError())).toBe(true)
|
||||
expect(isProviderRateLimitError(rateLimitError())).toBe(true)
|
||||
expect(
|
||||
isProviderExecutionError(
|
||||
new Error('rate limit exceeded, please try again later'),
|
||||
),
|
||||
).toBe(true)
|
||||
expect(isProviderExecutionError(new Error('regular tool failure'))).toBe(
|
||||
false,
|
||||
)
|
||||
})
|
||||
|
||||
it('retries provider errors and returns a later success', async () => {
|
||||
await withoutRetryWarnings(async () => {
|
||||
let calls = 0
|
||||
const result = await retryProviderErrors(
|
||||
async () => {
|
||||
calls++
|
||||
if (calls <= 3) throw providerError()
|
||||
return 'ok'
|
||||
},
|
||||
{ label: 'test', retries: 5, windowMs: 0 },
|
||||
)
|
||||
|
||||
expect(result).toBe('ok')
|
||||
expect(calls).toBe(4)
|
||||
})
|
||||
})
|
||||
|
||||
it('uses the rate-limit retry policy for provider rate limits', async () => {
|
||||
await withoutRetryWarnings(async () => {
|
||||
let calls = 0
|
||||
const result = await retryProviderErrors(
|
||||
async () => {
|
||||
calls++
|
||||
if (calls <= 2) throw rateLimitError()
|
||||
return 'ok'
|
||||
},
|
||||
{
|
||||
label: 'test',
|
||||
retries: 0,
|
||||
windowMs: 0,
|
||||
rateLimitRetries: 2,
|
||||
rateLimitWindowMs: 0,
|
||||
},
|
||||
)
|
||||
|
||||
expect(result).toBe('ok')
|
||||
expect(calls).toBe(3)
|
||||
})
|
||||
})
|
||||
|
||||
it('throws the final provider error after retries are exhausted', async () => {
|
||||
const originalWarn = console.warn
|
||||
const originalError = console.error
|
||||
const errorLogs: string[] = []
|
||||
console.warn = () => {}
|
||||
console.error = (message?: unknown) => {
|
||||
errorLogs.push(String(message))
|
||||
}
|
||||
|
||||
try {
|
||||
let calls = 0
|
||||
await expect(
|
||||
retryProviderErrors(
|
||||
async () => {
|
||||
calls++
|
||||
throw providerError()
|
||||
},
|
||||
{ label: 'test', retries: 5, windowMs: 0 },
|
||||
),
|
||||
).rejects.toThrow('Provider returned error')
|
||||
expect(calls).toBe(6)
|
||||
expect(errorLogs.join('\n')).toContain(
|
||||
'provider error persisted after 6 attempts',
|
||||
)
|
||||
expect(errorLogs.join('\n')).toContain('responseBody')
|
||||
expect(errorLogs.join('\n')).toContain('upstream failed')
|
||||
} finally {
|
||||
console.warn = originalWarn
|
||||
console.error = originalError
|
||||
}
|
||||
})
|
||||
|
||||
it('does not retry non-provider errors', async () => {
|
||||
let calls = 0
|
||||
await expect(
|
||||
retryProviderErrors(
|
||||
async () => {
|
||||
calls++
|
||||
throw new Error('tool failed')
|
||||
},
|
||||
{ label: 'test', retries: 5, windowMs: 0 },
|
||||
),
|
||||
).rejects.toThrow('tool failed')
|
||||
expect(calls).toBe(1)
|
||||
})
|
||||
})
|
||||
@@ -32,6 +32,11 @@ import { buildMemoryToolSet } from '../tools/memory/build-toolset'
|
||||
import type { ToolRegistry } from '../tools/tool-registry'
|
||||
import { CHAT_MODE_ALLOWED_TOOLS } from './chat-mode'
|
||||
import { createCompactionPrepareStep, type StepWithUsage } from './compaction'
|
||||
import {
|
||||
GUI_CLICK_ONLY_BROWSER_TOOL_NAMES,
|
||||
GUI_CLICK_ONLY_MODE,
|
||||
isGuiClickOnlyBrowserToolAllowed,
|
||||
} from './gui-click-only'
|
||||
import { buildMcpServerSpecs, createMcpClients } from './mcp-builder'
|
||||
import {
|
||||
getMessageNormalizationOptions,
|
||||
@@ -101,6 +106,7 @@ export class AiSdkAgent {
|
||||
session: {
|
||||
origin: config.resolvedConfig.origin,
|
||||
originPageId,
|
||||
suppressSnapshotOutputs: GUI_CLICK_ONLY_MODE,
|
||||
},
|
||||
aclRules: config.aclRules,
|
||||
}
|
||||
@@ -109,32 +115,48 @@ export class AiSdkAgent {
|
||||
toolContext,
|
||||
config.resolvedConfig.toolApprovalConfig,
|
||||
)
|
||||
const browserTools = config.resolvedConfig.chatMode
|
||||
? Object.fromEntries(
|
||||
Object.entries(allBrowserTools).filter(([name]) =>
|
||||
CHAT_MODE_ALLOWED_TOOLS.has(name),
|
||||
),
|
||||
)
|
||||
: allBrowserTools
|
||||
let browserTools = allBrowserTools
|
||||
if (GUI_CLICK_ONLY_MODE) {
|
||||
browserTools = Object.fromEntries(
|
||||
Object.entries(allBrowserTools).filter(([name]) =>
|
||||
isGuiClickOnlyBrowserToolAllowed(name),
|
||||
),
|
||||
)
|
||||
} else if (config.resolvedConfig.chatMode) {
|
||||
browserTools = Object.fromEntries(
|
||||
Object.entries(allBrowserTools).filter(([name]) =>
|
||||
CHAT_MODE_ALLOWED_TOOLS.has(name),
|
||||
),
|
||||
)
|
||||
}
|
||||
if (config.resolvedConfig.chatMode) {
|
||||
logger.info('Chat mode enabled, restricting to read-only browser tools', {
|
||||
allowedTools: Array.from(CHAT_MODE_ALLOWED_TOOLS),
|
||||
})
|
||||
}
|
||||
if (GUI_CLICK_ONLY_MODE) {
|
||||
logger.info('GUI click-only mode enabled, restricting browser tools', {
|
||||
allowedTools: Array.from(GUI_CLICK_ONLY_BROWSER_TOOL_NAMES),
|
||||
})
|
||||
}
|
||||
|
||||
// Get Klavis tools from shared background handle (no per-session connection).
|
||||
// Only expose when user has enabled servers — matches old per-session gating.
|
||||
const klavisTools =
|
||||
!GUI_CLICK_ONLY_MODE &&
|
||||
config.klavisRef?.handle &&
|
||||
config.browserContext?.enabledMcpServers?.length
|
||||
? buildKlavisToolSet(config.klavisRef.handle)
|
||||
: {}
|
||||
|
||||
// Connect custom (non-Klavis) MCP servers per-session
|
||||
const specs = await buildMcpServerSpecs({
|
||||
browserContext: config.browserContext,
|
||||
})
|
||||
const { clients, tools: customMcpTools } = await createMcpClients(specs)
|
||||
const { clients, tools: customMcpTools } = GUI_CLICK_ONLY_MODE
|
||||
? { clients: [] as Array<{ close(): Promise<void> }>, tools: {} }
|
||||
: await createMcpClients(
|
||||
await buildMcpServerSpecs({
|
||||
browserContext: config.browserContext,
|
||||
}),
|
||||
)
|
||||
const collidingToolNames = Object.keys(customMcpTools).filter(
|
||||
(name) => name in klavisTools,
|
||||
)
|
||||
@@ -183,12 +205,15 @@ export class AiSdkAgent {
|
||||
|
||||
// Add filesystem tools — skip in chat mode (read-only) and when no workspace is selected
|
||||
const filesystemTools =
|
||||
!config.resolvedConfig.chatMode && config.resolvedConfig.workingDir
|
||||
!GUI_CLICK_ONLY_MODE &&
|
||||
!config.resolvedConfig.chatMode &&
|
||||
config.resolvedConfig.workingDir
|
||||
? buildFilesystemToolSet(config.resolvedConfig.workingDir)
|
||||
: {}
|
||||
const memoryTools = config.resolvedConfig.chatMode
|
||||
? {}
|
||||
: buildMemoryToolSet()
|
||||
const memoryTools =
|
||||
config.resolvedConfig.chatMode || GUI_CLICK_ONLY_MODE
|
||||
? {}
|
||||
: buildMemoryToolSet()
|
||||
const tools = {
|
||||
...browserTools,
|
||||
...externalMcpTools,
|
||||
@@ -212,6 +237,15 @@ export class AiSdkAgent {
|
||||
) {
|
||||
excludeSections.push('nudges')
|
||||
}
|
||||
if (GUI_CLICK_ONLY_MODE) {
|
||||
excludeSections.push(
|
||||
'external-integrations',
|
||||
'memory-and-identity',
|
||||
'workspace',
|
||||
'skills',
|
||||
'nudges',
|
||||
)
|
||||
}
|
||||
const soulContent = await readSoul()
|
||||
const isBootstrap = await isSoulBootstrap()
|
||||
|
||||
@@ -233,6 +267,8 @@ export class AiSdkAgent {
|
||||
declinedApps: config.resolvedConfig.declinedApps,
|
||||
skillsCatalog,
|
||||
origin: config.resolvedConfig.origin,
|
||||
guiClickOnly: GUI_CLICK_ONLY_MODE,
|
||||
evalMode: config.resolvedConfig.evalMode,
|
||||
})
|
||||
|
||||
// Configure compaction for context window management
|
||||
@@ -267,6 +303,45 @@ export class AiSdkAgent {
|
||||
tools,
|
||||
stopWhen: [stepCountIs(AGENT_LIMITS.MAX_TURNS)],
|
||||
prepareStep,
|
||||
onFinish: (event) => {
|
||||
const previousStep = event.steps.at(-2)
|
||||
const totalToolCalls = event.steps.reduce(
|
||||
(sum, step) => sum + step.toolCalls.length,
|
||||
0,
|
||||
)
|
||||
const totalToolResults = event.steps.reduce(
|
||||
(sum, step) => sum + step.toolResults.length,
|
||||
0,
|
||||
)
|
||||
|
||||
logger.info('Agent tool loop finished', {
|
||||
conversationId: config.resolvedConfig.conversationId,
|
||||
provider: config.resolvedConfig.provider,
|
||||
model: config.resolvedConfig.model,
|
||||
finishReason: event.finishReason,
|
||||
rawFinishReason: event.rawFinishReason,
|
||||
stepNumber: event.stepNumber,
|
||||
stepCount: event.steps.length,
|
||||
finalTextLength: event.text.length,
|
||||
emptyFinalText: event.text.trim().length === 0,
|
||||
lastStepToolCallCount: event.toolCalls.length,
|
||||
lastStepToolResultCount: event.toolResults.length,
|
||||
previousStepFinishReason: previousStep?.finishReason,
|
||||
previousStepToolCallCount: previousStep?.toolCalls.length,
|
||||
previousStepToolResultCount: previousStep?.toolResults.length,
|
||||
endedAfterToolResult:
|
||||
event.toolCalls.length === 0 &&
|
||||
(previousStep?.toolResults.length ?? 0) > 0,
|
||||
totalToolCalls,
|
||||
totalToolResults,
|
||||
totalInputTokens: event.totalUsage.inputTokens,
|
||||
totalOutputTokens: event.totalUsage.outputTokens,
|
||||
totalTokens: event.totalUsage.totalTokens,
|
||||
finalStepInputTokens: event.usage.inputTokens,
|
||||
finalStepOutputTokens: event.usage.outputTokens,
|
||||
finalStepTotalTokens: event.usage.totalTokens,
|
||||
})
|
||||
},
|
||||
...(isChatGPTPro && {
|
||||
providerOptions: {
|
||||
openai: {
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
export const GUI_CLICK_ONLY_MODE = true
|
||||
|
||||
export const GUI_CLICK_ONLY_BROWSER_TOOL_NAMES = new Set([
|
||||
'click',
|
||||
'hover',
|
||||
'scroll',
|
||||
'type_text',
|
||||
'take_screenshot',
|
||||
'get_active_page',
|
||||
'list_pages',
|
||||
'navigate_page',
|
||||
'new_page',
|
||||
'close_page',
|
||||
])
|
||||
|
||||
export function isGuiClickOnlyBrowserToolAllowed(name: string): boolean {
|
||||
return GUI_CLICK_ONLY_BROWSER_TOOL_NAMES.has(name)
|
||||
}
|
||||
@@ -19,6 +19,10 @@ type ToolResultContentPart = Extract<
|
||||
type UserMessagePart = Exclude<UserContent, string>[number]
|
||||
type UserMediaPart = Extract<UserMessagePart, ImagePart | FilePart>
|
||||
|
||||
const MAX_SCREENSHOTS_IN_MODEL_HISTORY = 3
|
||||
const SCREENSHOT_HISTORY_PLACEHOLDER =
|
||||
'<screenshot omitted from context: keeping latest 3 screenshots>'
|
||||
|
||||
export interface MessageNormalizationOptions {
|
||||
supportsImages: boolean
|
||||
supportsMediaInToolResults: boolean
|
||||
@@ -113,6 +117,90 @@ function toolResultContentPartToUserMedia(
|
||||
}
|
||||
}
|
||||
|
||||
function isScreenshotToolResult(part: ToolResultPart): boolean {
|
||||
return (
|
||||
part.type === 'tool-result' &&
|
||||
typeof part.toolName === 'string' &&
|
||||
(part.toolName.includes('screenshot') || part.toolName === 'snapshot')
|
||||
)
|
||||
}
|
||||
|
||||
function isImageToolResultContentPart(part: ToolResultContentPart): boolean {
|
||||
switch (part.type) {
|
||||
case 'media':
|
||||
case 'image-data':
|
||||
case 'file-data':
|
||||
return part.mediaType.startsWith('image/')
|
||||
case 'image-url':
|
||||
case 'image-file-id':
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
function pruneScreenshotHistory(messages: ModelMessage[]): ModelMessage[] {
|
||||
let remainingScreenshots = MAX_SCREENSHOTS_IN_MODEL_HISTORY
|
||||
let changed = false
|
||||
const pruned = [...messages]
|
||||
|
||||
for (
|
||||
let messageIndex = messages.length - 1;
|
||||
messageIndex >= 0;
|
||||
messageIndex--
|
||||
) {
|
||||
const message = messages[messageIndex]
|
||||
if (message.role !== 'tool') continue
|
||||
|
||||
let messageChanged = false
|
||||
const content = [...message.content]
|
||||
|
||||
for (let partIndex = content.length - 1; partIndex >= 0; partIndex--) {
|
||||
const part = content[partIndex]
|
||||
if (
|
||||
part.type !== 'tool-result' ||
|
||||
part.output.type !== 'content' ||
|
||||
!isScreenshotToolResult(part)
|
||||
) {
|
||||
continue
|
||||
}
|
||||
|
||||
let partChanged = false
|
||||
const value = [...part.output.value]
|
||||
|
||||
for (let valueIndex = value.length - 1; valueIndex >= 0; valueIndex--) {
|
||||
if (!isImageToolResultContentPart(value[valueIndex])) continue
|
||||
|
||||
if (remainingScreenshots > 0) {
|
||||
remainingScreenshots--
|
||||
continue
|
||||
}
|
||||
|
||||
value[valueIndex] = {
|
||||
type: 'text',
|
||||
text: SCREENSHOT_HISTORY_PLACEHOLDER,
|
||||
}
|
||||
partChanged = true
|
||||
}
|
||||
|
||||
if (!partChanged) continue
|
||||
|
||||
content[partIndex] = {
|
||||
...part,
|
||||
output: { ...part.output, value },
|
||||
}
|
||||
messageChanged = true
|
||||
}
|
||||
|
||||
if (!messageChanged) continue
|
||||
|
||||
pruned[messageIndex] = { ...message, content }
|
||||
changed = true
|
||||
}
|
||||
|
||||
return changed ? pruned : messages
|
||||
}
|
||||
|
||||
function normalizeToolMessageForModel(
|
||||
message: ToolModelMessage,
|
||||
supportsImages: boolean,
|
||||
@@ -178,14 +266,16 @@ export function normalizeMessagesForModel(
|
||||
messages: ModelMessage[],
|
||||
options: MessageNormalizationOptions,
|
||||
): ModelMessage[] {
|
||||
const screenshotPrunedMessages = pruneScreenshotHistory(messages)
|
||||
|
||||
if (options.supportsMediaInToolResults) {
|
||||
return messages
|
||||
return screenshotPrunedMessages
|
||||
}
|
||||
|
||||
let changed = false
|
||||
const normalized: ModelMessage[] = []
|
||||
|
||||
for (const message of messages) {
|
||||
for (const message of screenshotPrunedMessages) {
|
||||
if (message.role !== 'tool') {
|
||||
normalized.push(message)
|
||||
continue
|
||||
@@ -201,5 +291,5 @@ export function normalizeMessagesForModel(
|
||||
normalized.push(...replacement)
|
||||
}
|
||||
|
||||
return changed ? normalized : messages
|
||||
return changed ? normalized : screenshotPrunedMessages
|
||||
}
|
||||
|
||||
@@ -31,6 +31,14 @@ function getRoleAndMode(
|
||||
_exclude: Set<string>,
|
||||
options?: BuildSystemPromptOptions,
|
||||
): string {
|
||||
if (options?.guiClickOnly) {
|
||||
return `<role>
|
||||
You are BrowserOS running in an experimental GUI click model mode. Page clicks are mediated through the \`click\` tool, which uses a visual model to choose coordinates from the current screenshot.
|
||||
|
||||
Your tool surface is intentionally small: open or manage pages, then interact with visible page targets through GUI-backed clicks. You cannot read page content or inspect elements through DOM, accessibility-tree, snapshot, page-text, link-extraction, or script-evaluation tools in this mode.
|
||||
</role>`
|
||||
}
|
||||
|
||||
const hasWorkspace = !!options?.workspaceDir
|
||||
|
||||
let role: string
|
||||
@@ -124,6 +132,23 @@ function getCapabilities(
|
||||
): string {
|
||||
const hasWorkspace = !!options?.workspaceDir
|
||||
|
||||
if (options?.guiClickOnly) {
|
||||
return `<capabilities>
|
||||
## Your Capabilities
|
||||
|
||||
### Browser Control
|
||||
Use these browser tools under the GUI click model constraint:
|
||||
- \`click\` captures the current page screenshot internally, asks the GUI click model where to click based on your prompt, then executes that coordinate click. Make sure to be brief, concise and capture the semantic essence of where you want to click.
|
||||
- \`hover\` captures the current page screenshot internally, asks the GUI model where to hover based on your prompt, then moves the cursor there.
|
||||
- \`type_text\` types into the currently focused element. Use it after \`click\` focuses a text field.
|
||||
- \`scroll\` scrolls the page viewport.
|
||||
- \`take_screenshot\` returns a visual screenshot for feedback. It does not expose DOM, accessibility tree, page text, links, or scripts.
|
||||
- \`get_active_page\`, \`list_pages\`, \`navigate_page\`, \`new_page\`, and \`close_page\` are available for opening and managing pages.
|
||||
|
||||
You cannot inspect the DOM, accessibility tree, snapshots, page text, links, or scripts. Use the Page ID from Browser Context directly and issue concise visual click prompts for page targets.
|
||||
</capabilities>`
|
||||
}
|
||||
|
||||
let capabilities = `<capabilities>
|
||||
## Your Capabilities
|
||||
|
||||
@@ -195,6 +220,22 @@ function getExecution(
|
||||
_exclude: Set<string>,
|
||||
options?: BuildSystemPromptOptions,
|
||||
): string {
|
||||
if (options?.guiClickOnly) {
|
||||
return `<execution>
|
||||
## Execution
|
||||
|
||||
- Use \`click\` for visible page targets. It is the only click path that should choose page coordinates.
|
||||
- Use \`hover\` for visible hover targets, \`type_text\` after focusing a field, and \`scroll\` to move the viewport.
|
||||
- Use \`take_screenshot\` when you need explicit visual feedback about the current page before choosing the next action.
|
||||
- After each \`click\` or \`hover\`, inspect the returned \`hitElement\` before choosing the next action. If it is null or does not match the intended target, use \`take_screenshot\` or retry with a more specific visual prompt.
|
||||
- Use \`new_page\` or \`navigate_page\` to open websites. Use \`get_active_page\`, \`list_pages\`, and \`close_page\` only when needed for page management.
|
||||
- Use the Page ID from Browser Context directly.
|
||||
- Do not try to observe the page with snapshots, DOM, accessibility trees, scripts, link extraction, or text extraction.
|
||||
- You are blind to page content except for explicit \`take_screenshot\` results. Make one concise visual click prompt at a time, then continue from your best estimate of the resulting page state.
|
||||
- If the task clearly cannot proceed without page observation, say what blocked you.
|
||||
</execution>`
|
||||
}
|
||||
|
||||
const isNewTab = options?.origin === 'newtab'
|
||||
|
||||
let executionContent = `<execution>
|
||||
@@ -283,6 +324,21 @@ function getToolSelection(
|
||||
_exclude: Set<string>,
|
||||
options?: BuildSystemPromptOptions,
|
||||
): string {
|
||||
if (options?.guiClickOnly) {
|
||||
return `<tool_selection>
|
||||
## Tool Selection
|
||||
|
||||
- Use \`click\` for visible page targets.
|
||||
- Use \`hover\` for hover menus or targets that reveal content.
|
||||
- Use \`type_text\` only after a prior GUI click likely focused a text input. Include a newline in \`text\` when you need to submit with Enter.
|
||||
- Use \`scroll\` to move the page viewport when the target is likely below or above the visible area.
|
||||
- Use \`take_screenshot\` when you need visual feedback before deciding what to click, type, hover, or scroll next.
|
||||
- The \`prompt\` argument should describe the visible target to click, for example: "click the search box", "click the Add to Cart button", or "click the first product result".
|
||||
- Use page-opening and page-management tools only to get to the website or manage tabs; they do not replace visual page clicking.
|
||||
- Do not request or rely on element IDs, snapshots, DOM nodes, page text, scripts, link extraction, or coordinate click tools.
|
||||
</tool_selection>`
|
||||
}
|
||||
|
||||
const isNewTab = options?.origin === 'newtab'
|
||||
|
||||
const navTable = isNewTab
|
||||
@@ -413,6 +469,19 @@ function getErrorRecovery(
|
||||
): string {
|
||||
const hasWorkspace = !!options?.workspaceDir
|
||||
|
||||
if (options?.guiClickOnly) {
|
||||
return `<error_recovery>
|
||||
## Error Recovery
|
||||
|
||||
### Browser interaction errors
|
||||
- If a click does not appear to make progress, try one more click with a more specific visual prompt.
|
||||
- After 2 failed attempts, describe the blocker and ask the user for guidance.
|
||||
|
||||
### Page errors
|
||||
- If you infer that a site is blocked by login, CAPTCHA, 2FA, geo-blocking, or payment confirmation, pause and ask the user to handle it.
|
||||
</error_recovery>`
|
||||
}
|
||||
|
||||
let recovery = `<error_recovery>
|
||||
## Error Recovery
|
||||
|
||||
@@ -455,6 +524,28 @@ function getErrorRecovery(
|
||||
return recovery
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// section: eval-mode
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
function getEvalMode(
|
||||
_exclude: Set<string>,
|
||||
options?: BuildSystemPromptOptions,
|
||||
): string {
|
||||
if (!options?.evalMode) return ''
|
||||
|
||||
return `<eval_mode>
|
||||
## Eval Mode
|
||||
|
||||
You are running inside a benchmark environment. The user's task is pre-authorized for the eval clone site.
|
||||
|
||||
- Do not ask the user for confirmation before clicking final submit, place-order, book, send, save, create, or similar completion buttons required by the task.
|
||||
- If the task asks you to complete a workflow and the visible page asks for fields the user did not specify, choose reasonable dummy values.
|
||||
- Complete the task end-to-end using the available tools, then report the outcome.
|
||||
- Only pause for credentials, CAPTCHA, 2FA, real payment confirmation, or blocked site access.
|
||||
</eval_mode>`
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// section: memory-and-identity
|
||||
// -----------------------------------------------------------------------------
|
||||
@@ -601,6 +692,20 @@ function getStyle(
|
||||
_exclude: Set<string>,
|
||||
options?: BuildSystemPromptOptions,
|
||||
): string {
|
||||
if (options?.guiClickOnly) {
|
||||
return `<style_rules>
|
||||
## Style
|
||||
|
||||
<tool_call_style>
|
||||
- Keep click prompts concise and visual.
|
||||
- Do not narrate routine clicks before calling the tool.
|
||||
</tool_call_style>
|
||||
|
||||
- Be concise.
|
||||
- Report blockers plainly when GUI clicks and page opening are insufficient.
|
||||
</style_rules>`
|
||||
}
|
||||
|
||||
const hasWorkspace = !!options?.workspaceDir
|
||||
|
||||
let style = `<style_rules>
|
||||
@@ -664,8 +769,9 @@ function getUserContext(
|
||||
'\nYou are running as a **scheduled background task** on a system-managed hidden page.'
|
||||
}
|
||||
|
||||
pageCtx +=
|
||||
'\n\n**CRITICAL RULES:**\n1. **Do NOT call `get_active_page` or `list_pages` to find your starting page.** Use the **page ID from the Browser Context** directly.'
|
||||
pageCtx += options?.guiClickOnly
|
||||
? '\n\n**CRITICAL RULE:** Use the **page ID from the Browser Context** directly when calling `click`.'
|
||||
: '\n\n**CRITICAL RULES:**\n1. **Do NOT call `get_active_page` or `list_pages` to find your starting page.** Use the **page ID from the Browser Context** directly.'
|
||||
|
||||
if (options?.isScheduledTask) {
|
||||
const pageRef = options.scheduledTaskPageId
|
||||
@@ -725,6 +831,7 @@ const promptSections: Record<string, PromptSectionFn> = {
|
||||
) => getToolSelection(_exclude, options),
|
||||
'external-integrations': getExternalIntegrations,
|
||||
'error-recovery': getErrorRecovery,
|
||||
'eval-mode': getEvalMode,
|
||||
'memory-and-identity': getMemoryAndIdentity,
|
||||
workspace: getWorkspace,
|
||||
skills: (_exclude: Set<string>, options?: BuildSystemPromptOptions) =>
|
||||
@@ -751,10 +858,21 @@ export interface BuildSystemPromptOptions {
|
||||
skillsCatalog?: string
|
||||
/** Where the chat session originates from — determines navigation behavior. */
|
||||
origin?: 'sidepanel' | 'newtab'
|
||||
/** Experimental mode: browser control is limited to GUI-backed click only. */
|
||||
guiClickOnly?: boolean
|
||||
/** Eval mode: benchmark tasks are pre-authorized within clone sites. */
|
||||
evalMode?: boolean
|
||||
}
|
||||
|
||||
export function buildSystemPrompt(options?: BuildSystemPromptOptions): string {
|
||||
const exclude = new Set(options?.exclude)
|
||||
if (options?.guiClickOnly) {
|
||||
exclude.add('external-integrations')
|
||||
exclude.add('memory-and-identity')
|
||||
exclude.add('workspace')
|
||||
exclude.add('skills')
|
||||
exclude.add('nudges')
|
||||
}
|
||||
|
||||
const sections = Object.entries(promptSections)
|
||||
.filter(([key]) => !exclude.has(key))
|
||||
|
||||
@@ -44,13 +44,46 @@ function createGoogleFactory(
|
||||
return createGoogleGenerativeAI({ apiKey: config.apiKey })
|
||||
}
|
||||
|
||||
function buildOpenRouterExtraBody(
|
||||
config: ResolvedAgentConfig,
|
||||
): Record<string, unknown> {
|
||||
const body: Record<string, unknown> = {}
|
||||
|
||||
if (config.reasoning?.enabled !== undefined) {
|
||||
body.reasoning = { enabled: config.reasoning.enabled }
|
||||
}
|
||||
if (config.verbosity !== undefined) body.verbosity = config.verbosity
|
||||
if (config.providerRouting !== undefined) {
|
||||
body.provider = {
|
||||
...(config.providerRouting.order !== undefined && {
|
||||
order: config.providerRouting.order,
|
||||
}),
|
||||
...(config.providerRouting.only !== undefined && {
|
||||
only: config.providerRouting.only,
|
||||
}),
|
||||
...(config.providerRouting.ignore !== undefined && {
|
||||
ignore: config.providerRouting.ignore,
|
||||
}),
|
||||
...(config.providerRouting.allowFallbacks !== undefined && {
|
||||
allow_fallbacks: config.providerRouting.allowFallbacks,
|
||||
}),
|
||||
...(config.providerRouting.requireParameters !== undefined && {
|
||||
require_parameters: config.providerRouting.requireParameters,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
function createOpenRouterFactory(
|
||||
config: ResolvedAgentConfig,
|
||||
): (modelId: string) => unknown {
|
||||
if (!config.apiKey) throw new Error('OpenRouter provider requires apiKey')
|
||||
const extraBody = buildOpenRouterExtraBody(config)
|
||||
return createOpenRouter({
|
||||
apiKey: config.apiKey,
|
||||
extraBody: { reasoning: {} },
|
||||
...(Object.keys(extraBody).length > 0 ? { extraBody } : {}),
|
||||
fetch: createOpenRouterCompatibleFetch(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -79,6 +79,7 @@ export function buildBrowserToolSet(
|
||||
content: result.content,
|
||||
isError: result.isError ?? false,
|
||||
metadata: result.metadata,
|
||||
structuredContent: result.structuredContent,
|
||||
}
|
||||
} catch (error) {
|
||||
const errorText =
|
||||
|
||||
@@ -4,7 +4,10 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
import type { ToolApprovalConfig } from '@browseros/shared/constants/tool-approval'
|
||||
import type { LLMProvider } from '@browseros/shared/schemas/llm'
|
||||
import type {
|
||||
LLMProvider,
|
||||
OpenRouterProviderRouting,
|
||||
} from '@browseros/shared/schemas/llm'
|
||||
|
||||
export interface ProviderConfig {
|
||||
provider: LLMProvider
|
||||
@@ -34,6 +37,9 @@ export interface ResolvedAgentConfig {
|
||||
accountId?: string
|
||||
reasoningEffort?: string
|
||||
reasoningSummary?: string
|
||||
reasoning?: { enabled?: boolean; maxTokens?: number; effort?: string }
|
||||
verbosity?: 'low' | 'medium' | 'high' | 'xhigh' | 'max'
|
||||
providerRouting?: OpenRouterProviderRouting
|
||||
contextWindowSize?: number
|
||||
userSystemPrompt?: string
|
||||
workingDir?: string
|
||||
|
||||
@@ -39,10 +39,13 @@ import {
|
||||
MessageQueueFullError,
|
||||
type OpenClawProvisioner,
|
||||
OpenClawProvisionerUnavailableError,
|
||||
type ProducedFileEntry,
|
||||
type ProducedFilesRailGroup,
|
||||
type QueuedMessage,
|
||||
TurnAlreadyActiveError,
|
||||
UnknownAgentError,
|
||||
} from '../services/agents/agent-harness-service'
|
||||
import type { FilePreview } from '../services/openclaw/file-preview'
|
||||
import type { OpenClawGatewayChatClient } from '../services/openclaw/openclaw-gateway-chat-client'
|
||||
import type { Env } from '../types'
|
||||
import { resolveBrowserContextPageIds } from '../utils/resolve-browser-context-page-ids'
|
||||
@@ -95,6 +98,23 @@ type AgentRouteService = {
|
||||
messageId: string
|
||||
}): Promise<boolean>
|
||||
listQueuedMessages(agentId: string): Promise<QueuedMessage[]>
|
||||
|
||||
// Files API — Phase 3 of TKT-762.
|
||||
listAgentFiles(
|
||||
agentId: string,
|
||||
options?: { limit?: number },
|
||||
): Promise<ProducedFilesRailGroup[]>
|
||||
listAgentFilesForTurn(
|
||||
agentId: string,
|
||||
turnId: string,
|
||||
): Promise<ProducedFileEntry[]>
|
||||
previewProducedFile(fileId: string): Promise<FilePreview | null>
|
||||
resolveProducedFileForDownload(fileId: string): Promise<{
|
||||
absolutePath: string
|
||||
fileName: string
|
||||
mimeType: string
|
||||
size: number
|
||||
} | null>
|
||||
}
|
||||
|
||||
type AgentRouteDeps = {
|
||||
@@ -146,260 +166,372 @@ export function createAgentRoutes(deps: AgentRouteDeps = {}) {
|
||||
// tests can swap in an alternate via deps if needed.
|
||||
const adapterHealth = deps.adapterHealth ?? new AdapterHealthChecker()
|
||||
|
||||
return new Hono<Env>()
|
||||
.get('/adapters', async (c) => {
|
||||
const adapters = await Promise.all(
|
||||
AGENT_ADAPTER_CATALOG.map(async (descriptor) => ({
|
||||
...descriptor,
|
||||
health: await adapterHealth.getHealth(descriptor.id),
|
||||
})),
|
||||
)
|
||||
return c.json({ adapters })
|
||||
})
|
||||
.get('/', async (c) => {
|
||||
// Single round-trip the agents page consumes: enriched agents
|
||||
// (status + lastUsedAt) plus the gateway lifecycle snapshot the
|
||||
// GatewayStatusBar / GatewayStateCards / ControlPlaneAlert used
|
||||
// to fetch from `/claw/status`. Lets the page poll one endpoint.
|
||||
const [agents, gateway] = await Promise.all([
|
||||
service.listAgentsWithActivity(),
|
||||
service.getGatewayStatus(),
|
||||
])
|
||||
return c.json({ agents, gateway })
|
||||
})
|
||||
.post('/', async (c) => {
|
||||
const parsed = await parseCreateAgentBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
try {
|
||||
return c.json({ agent: await service.createAgent(parsed) })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.post('/:agentId/sidepanel/chat', async (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const parsed = await parseSidepanelAgentChatBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
|
||||
try {
|
||||
const agent = await service.getAgent(agentId)
|
||||
if (!agent) return c.json({ error: 'Unknown agent' }, 404)
|
||||
|
||||
let browserContext = parsed.browserContext
|
||||
if (deps.browser) {
|
||||
browserContext = await resolveBrowserContextPageIds(
|
||||
deps.browser,
|
||||
browserContext,
|
||||
)
|
||||
}
|
||||
|
||||
const userContent = formatUserMessage(
|
||||
parsed.message,
|
||||
browserContext,
|
||||
parsed.selectedText,
|
||||
parsed.selectedTextSource,
|
||||
return (
|
||||
new Hono<Env>()
|
||||
.get('/adapters', async (c) => {
|
||||
const adapters = await Promise.all(
|
||||
AGENT_ADAPTER_CATALOG.map(async (descriptor) => ({
|
||||
...descriptor,
|
||||
health: await adapterHealth.getHealth(descriptor.id),
|
||||
})),
|
||||
)
|
||||
const message = parsed.userSystemPrompt?.trim()
|
||||
? `${parsed.userSystemPrompt.trim()}\n\n${userContent}`
|
||||
: userContent
|
||||
return c.json({ adapters })
|
||||
})
|
||||
.get('/', async (c) => {
|
||||
// Single round-trip the agents page consumes: enriched agents
|
||||
// (status + lastUsedAt) plus the gateway lifecycle snapshot the
|
||||
// GatewayStatusBar / GatewayStateCards / ControlPlaneAlert used
|
||||
// to fetch from `/claw/status`. Lets the page poll one endpoint.
|
||||
const [agents, gateway] = await Promise.all([
|
||||
service.listAgentsWithActivity(),
|
||||
service.getGatewayStatus(),
|
||||
])
|
||||
return c.json({ agents, gateway })
|
||||
})
|
||||
.post('/', async (c) => {
|
||||
const parsed = await parseCreateAgentBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
try {
|
||||
return c.json({ agent: await service.createAgent(parsed) })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.post('/:agentId/sidepanel/chat', async (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const parsed = await parseSidepanelAgentChatBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
|
||||
try {
|
||||
const agent = await service.getAgent(agentId)
|
||||
if (!agent) return c.json({ error: 'Unknown agent' }, 404)
|
||||
|
||||
let browserContext = parsed.browserContext
|
||||
if (deps.browser) {
|
||||
browserContext = await resolveBrowserContextPageIds(
|
||||
deps.browser,
|
||||
browserContext,
|
||||
)
|
||||
}
|
||||
|
||||
const userContent = formatUserMessage(
|
||||
parsed.message,
|
||||
browserContext,
|
||||
parsed.selectedText,
|
||||
parsed.selectedTextSource,
|
||||
)
|
||||
const message = parsed.userSystemPrompt?.trim()
|
||||
? `${parsed.userSystemPrompt.trim()}\n\n${userContent}`
|
||||
: userContent
|
||||
|
||||
let started: { turnId: string; frames: ReadableStream<TurnFrame> }
|
||||
try {
|
||||
started = await service.startTurn({
|
||||
agentId: agent.id,
|
||||
message,
|
||||
cwd: parsed.userWorkingDir,
|
||||
})
|
||||
} catch (err) {
|
||||
if (err instanceof TurnAlreadyActiveError) {
|
||||
return c.json(
|
||||
{
|
||||
error: 'Turn already active',
|
||||
turnId: err.turnId,
|
||||
attachUrl: `/agents/${agent.id}/chat/stream?turnId=${err.turnId}`,
|
||||
},
|
||||
409,
|
||||
)
|
||||
}
|
||||
throw err
|
||||
}
|
||||
|
||||
let didRequestCancel = false
|
||||
const cancelStartedTurn = () => {
|
||||
if (didRequestCancel) return
|
||||
didRequestCancel = true
|
||||
service.cancelTurn({
|
||||
agentId: agent.id,
|
||||
turnId: started.turnId,
|
||||
reason: 'sidepanel stream cancelled',
|
||||
})
|
||||
}
|
||||
if (c.req.raw.signal.aborted) {
|
||||
cancelStartedTurn()
|
||||
} else {
|
||||
c.req.raw.signal.addEventListener('abort', cancelStartedTurn, {
|
||||
once: true,
|
||||
})
|
||||
}
|
||||
|
||||
const events = turnFramesToAgentEvents(started.frames, {
|
||||
onCancel: cancelStartedTurn,
|
||||
})
|
||||
|
||||
return createAcpUIMessageStreamResponse(events, {
|
||||
headers: {
|
||||
'X-Session-Id': 'main',
|
||||
'X-Turn-Id': started.turnId,
|
||||
},
|
||||
})
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/:agentId', async (c) => {
|
||||
try {
|
||||
const agent = await service.getAgent(c.req.param('agentId'))
|
||||
if (!agent) return c.json({ error: 'Unknown agent' }, 404)
|
||||
return c.json({ agent })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.delete('/:agentId', async (c) => {
|
||||
try {
|
||||
return c.json({
|
||||
success: await service.deleteAgent(c.req.param('agentId')),
|
||||
})
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.patch('/:agentId', async (c) => {
|
||||
const parsed = await parseAgentPatchBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
try {
|
||||
const agent = await service.updateAgent(
|
||||
c.req.param('agentId'),
|
||||
parsed.patch,
|
||||
)
|
||||
if (!agent) return c.json({ error: 'Unknown agent' }, 404)
|
||||
return c.json({ agent })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/:agentId/sessions/main/history', async (c) => {
|
||||
try {
|
||||
return c.json(await service.getHistory(c.req.param('agentId')))
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.post('/:agentId/chat', async (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const parsed = await parseChatBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
|
||||
let started: { turnId: string; frames: ReadableStream<TurnFrame> }
|
||||
try {
|
||||
started = await service.startTurn({
|
||||
agentId: agent.id,
|
||||
message,
|
||||
cwd: parsed.userWorkingDir,
|
||||
agentId,
|
||||
message: parsed.message,
|
||||
attachments: parsed.attachments,
|
||||
cwd: parsed.cwd,
|
||||
})
|
||||
} catch (err) {
|
||||
if (err instanceof TurnAlreadyActiveError) {
|
||||
// Caller can attach via GET /chat/stream?turnId=… instead.
|
||||
return c.json(
|
||||
{
|
||||
error: 'Turn already active',
|
||||
turnId: err.turnId,
|
||||
attachUrl: `/agents/${agent.id}/chat/stream?turnId=${err.turnId}`,
|
||||
attachUrl: `/agents/${agentId}/chat/stream?turnId=${err.turnId}`,
|
||||
},
|
||||
409,
|
||||
)
|
||||
}
|
||||
throw err
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
|
||||
let didRequestCancel = false
|
||||
const cancelStartedTurn = () => {
|
||||
if (didRequestCancel) return
|
||||
didRequestCancel = true
|
||||
service.cancelTurn({
|
||||
agentId: agent.id,
|
||||
turnId: started.turnId,
|
||||
reason: 'sidepanel stream cancelled',
|
||||
})
|
||||
}
|
||||
if (c.req.raw.signal.aborted) {
|
||||
cancelStartedTurn()
|
||||
} else {
|
||||
c.req.raw.signal.addEventListener('abort', cancelStartedTurn, {
|
||||
once: true,
|
||||
})
|
||||
}
|
||||
|
||||
const events = turnFramesToAgentEvents(started.frames, {
|
||||
onCancel: cancelStartedTurn,
|
||||
return streamTurnFrames(c, started.frames, {
|
||||
turnId: started.turnId,
|
||||
})
|
||||
|
||||
return createAcpUIMessageStreamResponse(events, {
|
||||
headers: {
|
||||
'X-Session-Id': 'main',
|
||||
'X-Turn-Id': started.turnId,
|
||||
},
|
||||
})
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/:agentId', async (c) => {
|
||||
try {
|
||||
const agent = await service.getAgent(c.req.param('agentId'))
|
||||
if (!agent) return c.json({ error: 'Unknown agent' }, 404)
|
||||
return c.json({ agent })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.delete('/:agentId', async (c) => {
|
||||
try {
|
||||
return c.json({
|
||||
success: await service.deleteAgent(c.req.param('agentId')),
|
||||
})
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.patch('/:agentId', async (c) => {
|
||||
const parsed = await parseAgentPatchBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
try {
|
||||
const agent = await service.updateAgent(
|
||||
c.req.param('agentId'),
|
||||
parsed.patch,
|
||||
)
|
||||
if (!agent) return c.json({ error: 'Unknown agent' }, 404)
|
||||
return c.json({ agent })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/:agentId/sessions/main/history', async (c) => {
|
||||
try {
|
||||
return c.json(await service.getHistory(c.req.param('agentId')))
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.post('/:agentId/chat', async (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const parsed = await parseChatBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
|
||||
let started: { turnId: string; frames: ReadableStream<TurnFrame> }
|
||||
try {
|
||||
started = await service.startTurn({
|
||||
agentId,
|
||||
message: parsed.message,
|
||||
attachments: parsed.attachments,
|
||||
cwd: parsed.cwd,
|
||||
})
|
||||
} catch (err) {
|
||||
if (err instanceof TurnAlreadyActiveError) {
|
||||
// Caller can attach via GET /chat/stream?turnId=… instead.
|
||||
return c.json(
|
||||
{
|
||||
error: 'Turn already active',
|
||||
turnId: err.turnId,
|
||||
attachUrl: `/agents/${agentId}/chat/stream?turnId=${err.turnId}`,
|
||||
},
|
||||
409,
|
||||
)
|
||||
}
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
|
||||
return streamTurnFrames(c, started.frames, {
|
||||
turnId: started.turnId,
|
||||
})
|
||||
})
|
||||
.get('/:agentId/chat/active', (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const info = service.getActiveTurn(agentId, 'main')
|
||||
return c.json({ active: info })
|
||||
})
|
||||
.get('/:agentId/chat/stream', (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const url = new URL(c.req.url)
|
||||
const queryTurnId = url.searchParams.get('turnId')?.trim() || undefined
|
||||
const turnId =
|
||||
queryTurnId ?? service.getActiveTurn(agentId, 'main')?.turnId
|
||||
if (!turnId) {
|
||||
return c.json({ error: 'No active turn for this agent' }, 404)
|
||||
}
|
||||
const lastEventId =
|
||||
c.req.header('Last-Event-ID') ??
|
||||
url.searchParams.get('lastSeq') ??
|
||||
undefined
|
||||
const lastSeq = parseLastSeq(lastEventId)
|
||||
const frames = service.attachTurn({ turnId, lastSeq })
|
||||
if (!frames) {
|
||||
return c.json({ error: 'Unknown turn' }, 404)
|
||||
}
|
||||
return streamTurnFrames(c, frames, { turnId })
|
||||
})
|
||||
.post('/:agentId/chat/cancel', async (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const body = await readJsonBody(c)
|
||||
const turnId =
|
||||
'value' in body && typeof body.value.turnId === 'string'
|
||||
? body.value.turnId.trim() || undefined
|
||||
: undefined
|
||||
const reason =
|
||||
'value' in body && typeof body.value.reason === 'string'
|
||||
? body.value.reason
|
||||
: undefined
|
||||
const cancelled = service.cancelTurn({ agentId, turnId, reason })
|
||||
return c.json({ cancelled })
|
||||
})
|
||||
.get('/:agentId/queue', async (c) => {
|
||||
try {
|
||||
const queue = await service.listQueuedMessages(c.req.param('agentId'))
|
||||
return c.json({ queue })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.post('/:agentId/queue', async (c) => {
|
||||
const parsed = await parseEnqueueBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
try {
|
||||
const queued = await service.enqueueMessage({
|
||||
agentId: c.req.param('agentId'),
|
||||
message: parsed.message,
|
||||
attachments: parsed.attachments,
|
||||
})
|
||||
return c.json({ queued })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.delete('/:agentId/queue/:messageId', async (c) => {
|
||||
try {
|
||||
const removed = await service.removeQueuedMessage({
|
||||
agentId: c.req.param('agentId'),
|
||||
messageId: c.req.param('messageId'),
|
||||
})
|
||||
if (!removed) return c.json({ error: 'Queued message not found' }, 404)
|
||||
return c.json({ removed })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/:agentId/chat/active', (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const info = service.getActiveTurn(agentId, 'main')
|
||||
return c.json({ active: info })
|
||||
})
|
||||
.get('/:agentId/chat/stream', (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const url = new URL(c.req.url)
|
||||
const queryTurnId = url.searchParams.get('turnId')?.trim() || undefined
|
||||
const turnId =
|
||||
queryTurnId ?? service.getActiveTurn(agentId, 'main')?.turnId
|
||||
if (!turnId) {
|
||||
return c.json({ error: 'No active turn for this agent' }, 404)
|
||||
}
|
||||
const lastEventId =
|
||||
c.req.header('Last-Event-ID') ??
|
||||
url.searchParams.get('lastSeq') ??
|
||||
undefined
|
||||
const lastSeq = parseLastSeq(lastEventId)
|
||||
const frames = service.attachTurn({ turnId, lastSeq })
|
||||
if (!frames) {
|
||||
return c.json({ error: 'Unknown turn' }, 404)
|
||||
}
|
||||
return streamTurnFrames(c, frames, { turnId })
|
||||
})
|
||||
.post('/:agentId/chat/cancel', async (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const body = await readJsonBody(c)
|
||||
const turnId =
|
||||
'value' in body && typeof body.value.turnId === 'string'
|
||||
? body.value.turnId.trim() || undefined
|
||||
: undefined
|
||||
const reason =
|
||||
'value' in body && typeof body.value.reason === 'string'
|
||||
? body.value.reason
|
||||
: undefined
|
||||
const cancelled = service.cancelTurn({ agentId, turnId, reason })
|
||||
return c.json({ cancelled })
|
||||
})
|
||||
.get('/:agentId/queue', async (c) => {
|
||||
try {
|
||||
const queue = await service.listQueuedMessages(c.req.param('agentId'))
|
||||
return c.json({ queue })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.post('/:agentId/queue', async (c) => {
|
||||
const parsed = await parseEnqueueBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
try {
|
||||
const queued = await service.enqueueMessage({
|
||||
agentId: c.req.param('agentId'),
|
||||
message: parsed.message,
|
||||
attachments: parsed.attachments,
|
||||
})
|
||||
return c.json({ queued })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.delete('/:agentId/queue/:messageId', async (c) => {
|
||||
try {
|
||||
const removed = await service.removeQueuedMessage({
|
||||
agentId: c.req.param('agentId'),
|
||||
messageId: c.req.param('messageId'),
|
||||
})
|
||||
if (!removed)
|
||||
return c.json({ error: 'Queued message not found' }, 404)
|
||||
return c.json({ removed })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
|
||||
// ── Files (TKT-762) ────────────────────────────────────────────
|
||||
//
|
||||
// V1 surfaces files OpenClaw agents produce inside their workspace
|
||||
// dir (`~/.browseros/vm/openclaw/.openclaw/workspace[-<name>]/`)
|
||||
// as outputs, attributed back to the chat turn that produced them
|
||||
// by the per-turn workspace diff in
|
||||
// `agent-harness-service.runDetachedTurn`. Adapter-gated to
|
||||
// openclaw on the service side; for claude / codex these endpoints
|
||||
// simply return empty lists.
|
||||
//
|
||||
// The file-id-scoped endpoints (`/files/:fileId/{preview,download}`)
|
||||
// accept an opaque `fileId` and resolve the on-disk path
|
||||
// server-side, so the client never sees a raw path and traversal
|
||||
// is impossible by construction.
|
||||
|
||||
.get('/:agentId/files', async (c) => {
|
||||
try {
|
||||
const groups = await service.listAgentFiles(
|
||||
c.req.param('agentId'),
|
||||
parseAgentFilesLimit(c.req.query('limit')),
|
||||
)
|
||||
return c.json({ groups })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/:agentId/files/turn/:turnId', async (c) => {
|
||||
try {
|
||||
const files = await service.listAgentFilesForTurn(
|
||||
c.req.param('agentId'),
|
||||
c.req.param('turnId'),
|
||||
)
|
||||
return c.json({ files })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/files/:fileId/preview', async (c) => {
|
||||
try {
|
||||
const preview = await service.previewProducedFile(
|
||||
c.req.param('fileId'),
|
||||
)
|
||||
if (!preview || preview.kind === 'missing') {
|
||||
return c.json({ error: 'File not found' }, 404)
|
||||
}
|
||||
return c.json(preview)
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/files/:fileId/download', async (c) => {
|
||||
try {
|
||||
const resolved = await service.resolveProducedFileForDownload(
|
||||
c.req.param('fileId'),
|
||||
)
|
||||
if (!resolved) return c.json({ error: 'File not found' }, 404)
|
||||
|
||||
// Stream raw bytes via Bun's lazy file handle. Sets
|
||||
// Content-Disposition so browsers save instead of preview.
|
||||
const file = Bun.file(resolved.absolutePath)
|
||||
return new Response(file.stream(), {
|
||||
headers: {
|
||||
'Content-Type': resolved.mimeType,
|
||||
'Content-Length': String(resolved.size),
|
||||
'Content-Disposition': `attachment; ${encodeRfc6266Filename(resolved.fileName)}`,
|
||||
'Cache-Control': 'no-store',
|
||||
},
|
||||
})
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
/** Hard cap on `?limit=` for /agents/:id/files — guards against
|
||||
* a caller-supplied huge value forcing a per-agent table scan. */
|
||||
const MAX_FILES_LIMIT = 500
|
||||
|
||||
/**
|
||||
* Parse + clamp the `limit` query for /agents/:id/files. Returns
|
||||
* `undefined` when the param is absent or unparseable so the
|
||||
* service falls back to its own default.
|
||||
*/
|
||||
function parseAgentFilesLimit(
|
||||
raw: string | undefined,
|
||||
): { limit: number } | undefined {
|
||||
if (!raw) return undefined
|
||||
const parsed = Number.parseInt(raw, 10)
|
||||
if (!Number.isFinite(parsed)) return undefined
|
||||
return { limit: Math.min(Math.max(1, parsed), MAX_FILES_LIMIT) }
|
||||
}
|
||||
|
||||
/**
|
||||
* RFC 6266 / RFC 5987 filename attributes for `Content-Disposition`.
|
||||
* Returns the `filename="..."` attribute (always) plus a
|
||||
* percent-encoded `filename*=UTF-8''…` attribute when the name
|
||||
* contains non-ASCII characters, so browsers download with the
|
||||
* original name even on stricter HTTP clients.
|
||||
*/
|
||||
function encodeRfc6266Filename(filename: string): string {
|
||||
// Strip CRLFs and quotes (header injection guard).
|
||||
const safe = filename.replace(/["\r\n]/g, '_')
|
||||
// Detect non-ASCII; emit the RFC 5987 fallback attribute when
|
||||
// present. `encodeURIComponent` is the standard browser-safe
|
||||
// percent-encoder for this purpose.
|
||||
const hasNonAscii = /[^ -~]/.test(safe)
|
||||
if (!hasNonAscii) return `filename="${safe}"`
|
||||
return `filename="${safe}"; filename*=UTF-8''${encodeURIComponent(safe)}`
|
||||
}
|
||||
|
||||
function turnFramesToAgentEvents(
|
||||
|
||||
@@ -46,6 +46,7 @@ import {
|
||||
connectKlavisInBackground,
|
||||
type KlavisProxyRef,
|
||||
} from './services/klavis/strata-proxy'
|
||||
import { convertOpenClawHistoryToAgentHistory } from './services/openclaw/history-mapper'
|
||||
import { OpenClawGatewayChatClient } from './services/openclaw/openclaw-gateway-chat-client'
|
||||
import { getOpenClawService } from './services/openclaw/openclaw-service'
|
||||
import type { Env, HttpServerConfig } from './types'
|
||||
@@ -159,6 +160,15 @@ export async function createHttpServer(config: HttpServerConfig) {
|
||||
}))
|
||||
},
|
||||
getStatus: () => getOpenClawService().getStatus(),
|
||||
getAgentHistory: async (agentId) => {
|
||||
// Aggregated across the agent's main + every sub-session
|
||||
// (cron / hook / channel) so autonomous turns surface in
|
||||
// the chat panel alongside user-initiated ones.
|
||||
const raw = await getOpenClawService().getSessionHistory(
|
||||
`agent:${agentId}:main`,
|
||||
)
|
||||
return convertOpenClawHistoryToAgentHistory(agentId, raw)
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
|
||||
@@ -31,14 +31,27 @@ export {
|
||||
type QueuedMessageAttachment,
|
||||
} from '../../../lib/agents/message-queue'
|
||||
|
||||
import { basename } from 'node:path'
|
||||
import type {
|
||||
AgentHistoryPage,
|
||||
AgentRowSnapshot,
|
||||
AgentRuntime,
|
||||
AgentStreamEvent,
|
||||
} from '../../../lib/agents/types'
|
||||
import { getOpenClawDir } from '../../../lib/browseros-dir'
|
||||
import { logger } from '../../../lib/logger'
|
||||
import {
|
||||
buildFilePreview,
|
||||
detectMimeType,
|
||||
type FilePreview,
|
||||
} from '../openclaw/file-preview'
|
||||
import { getHostWorkspaceDir } from '../openclaw/openclaw-env'
|
||||
import type { OpenClawGatewayChatClient } from '../openclaw/openclaw-gateway-chat-client'
|
||||
import {
|
||||
type FileSnapshot,
|
||||
type ProducedFileRow,
|
||||
ProducedFilesStore,
|
||||
} from '../openclaw/produced-files-store'
|
||||
|
||||
export type AgentLiveness = 'working' | 'idle' | 'asleep' | 'error'
|
||||
|
||||
@@ -120,6 +133,15 @@ export interface OpenClawProvisioner {
|
||||
* gateway is not configured at all).
|
||||
*/
|
||||
getStatus?(): Promise<GatewayStatusSnapshot | null>
|
||||
/**
|
||||
* Optional. When wired, the harness uses this for `getHistory` on
|
||||
* openclaw-adapter agents so the chat panel sees autonomous
|
||||
* (cron / hook / channel) turns alongside user-typed turns. Without
|
||||
* this, history reads come from AcpxRuntime's local session record
|
||||
* which only contains user-initiated turns — autonomous activity
|
||||
* fires correctly but stays invisible to the panel.
|
||||
*/
|
||||
getAgentHistory?(agentId: string): Promise<AgentHistoryPage>
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -158,6 +180,14 @@ export class AgentHarnessService {
|
||||
private readonly openclawProvisioner: OpenClawProvisioner | null
|
||||
private readonly turnRegistry: TurnRegistry
|
||||
private readonly messageQueue: FileMessageQueue
|
||||
/**
|
||||
* Lazy-initialised so tests that swap in a fake `agentStore` don't
|
||||
* eagerly hit `getDb()` (which throws when the test harness hasn't
|
||||
* called `initializeDb`). Tests that exercise file attribution can
|
||||
* inject an explicit store via `deps.producedFilesStore`.
|
||||
*/
|
||||
private explicitProducedFilesStore: ProducedFilesStore | null = null
|
||||
private cachedProducedFilesStore: ProducedFilesStore | null = null
|
||||
private inFlightReconcile: Promise<void> | null = null
|
||||
// In-memory liveness tracker. Lost on server restart (acceptable —
|
||||
// `lastUsedAt` survives via the acpx session record's `lastUsedAt`,
|
||||
@@ -178,6 +208,7 @@ export class AgentHarnessService {
|
||||
openclawProvisioner?: OpenClawProvisioner
|
||||
turnRegistry?: TurnRegistry
|
||||
messageQueue?: FileMessageQueue
|
||||
producedFilesStore?: ProducedFilesStore
|
||||
} = {},
|
||||
) {
|
||||
this.agentStore = deps.agentStore ?? new DbAgentStore()
|
||||
@@ -191,6 +222,9 @@ export class AgentHarnessService {
|
||||
this.openclawProvisioner = deps.openclawProvisioner ?? null
|
||||
this.turnRegistry = deps.turnRegistry ?? new TurnRegistry()
|
||||
this.messageQueue = deps.messageQueue ?? new FileMessageQueue()
|
||||
if (deps.producedFilesStore) {
|
||||
this.explicitProducedFilesStore = deps.producedFilesStore
|
||||
}
|
||||
// Drain any agents whose queue file survived a restart. The check
|
||||
// for `getActiveFor` inside `maybeStartNextFromQueue` guards
|
||||
// against double-firing if the in-memory turn registry happens to
|
||||
@@ -599,9 +633,112 @@ export class AgentHarnessService {
|
||||
|
||||
async getHistory(agentId: string): Promise<AgentHistoryPage> {
|
||||
const agent = await this.requireAgent(agentId)
|
||||
// OpenClaw agents persist conversation in the gateway, not in the
|
||||
// AcpxRuntime's local session record. Reading the local record
|
||||
// would miss autonomous (cron / hook / channel) turns. Route
|
||||
// through the provisioner so the panel sees the full history.
|
||||
if (
|
||||
agent.adapter === 'openclaw' &&
|
||||
this.openclawProvisioner?.getAgentHistory
|
||||
) {
|
||||
return this.openclawProvisioner.getAgentHistory(agentId)
|
||||
}
|
||||
return this.runtime.getHistory({ agent, sessionId: 'main' })
|
||||
}
|
||||
|
||||
// ── Produced files (Files rail / inline artifact card) ───────────
|
||||
|
||||
/**
|
||||
* Outputs-rail data for one agent. Returns groups of files keyed
|
||||
* by the assistant turn that produced them, newest first. Empty
|
||||
* array when the agent hasn't produced anything yet, or when the
|
||||
* adapter doesn't track outputs (claude / codex — see Phase 2
|
||||
* commit).
|
||||
*/
|
||||
async listAgentFiles(
|
||||
agentId: string,
|
||||
options: { limit?: number } = {},
|
||||
): Promise<ProducedFilesRailGroup[]> {
|
||||
const agent = await this.requireAgent(agentId)
|
||||
const store = this.tryGetProducedFilesStore()
|
||||
if (!store) return []
|
||||
const rows = await store.listByAgent(agent.id, options)
|
||||
return store
|
||||
.groupByTurn(rows)
|
||||
.map(({ turnId, turnPrompt, createdAt, files }) => ({
|
||||
turnId,
|
||||
turnPrompt,
|
||||
createdAt,
|
||||
files: files.map(toProducedFileEntry),
|
||||
}))
|
||||
}
|
||||
|
||||
/**
|
||||
* Inline-card data for one assistant turn. Used by the SSE
|
||||
* `produced_files` event consumer to refresh metadata after the
|
||||
* turn completes; also handy for direct fetches by clients that
|
||||
* missed the live event.
|
||||
*/
|
||||
async listAgentFilesForTurn(
|
||||
agentId: string,
|
||||
turnId: string,
|
||||
): Promise<ProducedFileEntry[]> {
|
||||
await this.requireAgent(agentId)
|
||||
const store = this.tryGetProducedFilesStore()
|
||||
if (!store) return []
|
||||
const rows = await store.listByTurn(turnId)
|
||||
return rows.map(toProducedFileEntry)
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a preview payload for a single file. Returns null when the
|
||||
* file id is unknown OR the on-disk path no longer exists. The
|
||||
* route layer maps null → 404.
|
||||
*/
|
||||
async previewProducedFile(fileId: string): Promise<FilePreview | null> {
|
||||
const store = this.tryGetProducedFilesStore()
|
||||
if (!store) return null
|
||||
const row = await store.findById(fileId)
|
||||
if (!row) return null
|
||||
const agent = await this.agentStore.get(row.agentDefinitionId)
|
||||
if (!agent || agent.adapter !== 'openclaw') return null
|
||||
const workspaceDir = getHostWorkspaceDir(getOpenClawDir(), agent.name)
|
||||
const resolved = await store.resolveFilePath({ fileId, workspaceDir })
|
||||
if (!resolved) return null
|
||||
return buildFilePreview(resolved.absolutePath)
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a file id to an absolute on-disk path + metadata for the
|
||||
* download route to stream. Null when the file id is unknown or
|
||||
* the path escaped the workspace root (containment check happens
|
||||
* inside `producedFilesStore.resolveFilePath`).
|
||||
*/
|
||||
async resolveProducedFileForDownload(fileId: string): Promise<{
|
||||
absolutePath: string
|
||||
fileName: string
|
||||
mimeType: string
|
||||
size: number
|
||||
} | null> {
|
||||
const store = this.tryGetProducedFilesStore()
|
||||
if (!store) return null
|
||||
const row = await store.findById(fileId)
|
||||
if (!row) return null
|
||||
const agent = await this.agentStore.get(row.agentDefinitionId)
|
||||
if (!agent || agent.adapter !== 'openclaw') return null
|
||||
const workspaceDir = getHostWorkspaceDir(getOpenClawDir(), agent.name)
|
||||
const resolved = await store.resolveFilePath({ fileId, workspaceDir })
|
||||
if (!resolved) return null
|
||||
const mimeType = await detectMimeType(resolved.absolutePath)
|
||||
const fileName = basename(row.path)
|
||||
return {
|
||||
absolutePath: resolved.absolutePath,
|
||||
fileName,
|
||||
mimeType,
|
||||
size: row.size,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Kick off a new agent turn that survives the caller's HTTP lifetime.
|
||||
* Events are pushed into a per-turn buffer; the returned `frames`
|
||||
@@ -728,6 +865,26 @@ export class AgentHarnessService {
|
||||
const turn = this.turnRegistry.get(turnId)
|
||||
if (!turn) return
|
||||
let lastErrorMessage: string | undefined
|
||||
|
||||
// Bracket openclaw turns with a workspace snapshot so any file the
|
||||
// agent produces during the turn is attributable back to it (rail
|
||||
// + inline artifact UX). Adapter-gated for v1 — Claude / Codex
|
||||
// write to the user's host filesystem and don't need this; their
|
||||
// outputs are already visible via the user's own tools.
|
||||
const isOpenclaw = agent.adapter === 'openclaw'
|
||||
const workspaceDir = isOpenclaw ? this.resolveSafeWorkspaceDir(agent) : null
|
||||
const producedFilesStore = workspaceDir
|
||||
? this.tryGetProducedFilesStore()
|
||||
: null
|
||||
const workspaceSnapshot =
|
||||
workspaceDir && producedFilesStore
|
||||
? await this.snapshotWorkspaceForTurn(
|
||||
agent,
|
||||
workspaceDir,
|
||||
producedFilesStore,
|
||||
)
|
||||
: null
|
||||
|
||||
try {
|
||||
const upstream = await this.runtime.send({
|
||||
agent,
|
||||
@@ -782,6 +939,27 @@ export class AgentHarnessService {
|
||||
})
|
||||
}
|
||||
} finally {
|
||||
// Attribute any files the agent produced during this turn. We
|
||||
// run on success, error, AND inside `finally` so an upstream
|
||||
// failure mid-turn that still managed to write files doesn't
|
||||
// lose them. We skip only when the user explicitly cancelled —
|
||||
// in that case the side effects shouldn't be surfaced as
|
||||
// "outputs you asked for."
|
||||
if (
|
||||
workspaceDir &&
|
||||
workspaceSnapshot !== null &&
|
||||
producedFilesStore &&
|
||||
!turn.abortController.signal.aborted
|
||||
) {
|
||||
await this.attributeTurnFiles({
|
||||
producedFilesStore,
|
||||
workspaceDir,
|
||||
before: workspaceSnapshot,
|
||||
agent,
|
||||
turnId,
|
||||
turnPrompt: input.message,
|
||||
})
|
||||
}
|
||||
this.notifyTurnEnded(agent.id, {
|
||||
ok: lastErrorMessage === undefined,
|
||||
error: lastErrorMessage,
|
||||
@@ -789,6 +967,112 @@ export class AgentHarnessService {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the host-side workspace dir for an openclaw agent,
|
||||
* returning `null` when the agent's display name fails the
|
||||
* path-traversal guard. Logs a warning so the safety-disabled
|
||||
* case is observable in production.
|
||||
*/
|
||||
private resolveSafeWorkspaceDir(agent: AgentDefinition): string | null {
|
||||
try {
|
||||
return getHostWorkspaceDir(getOpenClawDir(), agent.name)
|
||||
} catch (err) {
|
||||
logger.warn('Skipping openclaw file attribution: unsafe agent name', {
|
||||
agentId: agent.id,
|
||||
agentName: agent.name,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pre-turn workspace snapshot. Returns `null` on any failure so
|
||||
* the rest of the turn flow continues without file attribution.
|
||||
*/
|
||||
private async snapshotWorkspaceForTurn(
|
||||
agent: AgentDefinition,
|
||||
workspaceDir: string,
|
||||
producedFilesStore: ProducedFilesStore,
|
||||
): Promise<FileSnapshot | null> {
|
||||
try {
|
||||
return await producedFilesStore.snapshotWorkspace(workspaceDir)
|
||||
} catch (err) {
|
||||
logger.warn(
|
||||
'Failed to snapshot openclaw workspace; file attribution disabled for this turn',
|
||||
{
|
||||
agentId: agent.id,
|
||||
workspaceDir,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
},
|
||||
)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Lazily resolve the produced-files store. Returns `null` if the
|
||||
* SQLite handle isn't initialised yet — keeps the harness usable in
|
||||
* tests + during early server boot, where chat turns are unlikely
|
||||
* but allowed.
|
||||
*/
|
||||
private tryGetProducedFilesStore(): ProducedFilesStore | null {
|
||||
if (this.explicitProducedFilesStore) return this.explicitProducedFilesStore
|
||||
if (this.cachedProducedFilesStore) return this.cachedProducedFilesStore
|
||||
try {
|
||||
this.cachedProducedFilesStore = new ProducedFilesStore()
|
||||
return this.cachedProducedFilesStore
|
||||
} catch (err) {
|
||||
logger.warn(
|
||||
'Produced-files store unavailable; turn-level file attribution disabled',
|
||||
{ error: err instanceof Error ? err.message : String(err) },
|
||||
)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Diff the workspace, persist new/modified files, and emit a
|
||||
* `produced_files` event so subscribers can render the inline
|
||||
* artifact card. Tolerant of all errors — a failure here must
|
||||
* never block the rest of the turn-end bookkeeping.
|
||||
*/
|
||||
private async attributeTurnFiles(input: {
|
||||
producedFilesStore: ProducedFilesStore
|
||||
workspaceDir: string
|
||||
before: FileSnapshot
|
||||
agent: AgentDefinition
|
||||
turnId: string
|
||||
turnPrompt: string
|
||||
}): Promise<void> {
|
||||
try {
|
||||
const rows = await input.producedFilesStore.finalizeTurn({
|
||||
agentDefinitionId: input.agent.id,
|
||||
sessionKey: input.agent.sessionKey,
|
||||
turnId: input.turnId,
|
||||
turnPrompt: input.turnPrompt,
|
||||
workspaceDir: input.workspaceDir,
|
||||
before: input.before,
|
||||
})
|
||||
if (rows.length === 0) return
|
||||
this.turnRegistry.pushEvent(input.turnId, {
|
||||
type: 'produced_files',
|
||||
files: rows.map((row) => ({
|
||||
id: row.id,
|
||||
path: row.path,
|
||||
size: row.size,
|
||||
mtimeMs: row.mtimeMs,
|
||||
})),
|
||||
})
|
||||
} catch (err) {
|
||||
logger.warn('Failed to attribute produced files for turn', {
|
||||
agentId: input.agent.id,
|
||||
turnId: input.turnId,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
private async requireAgent(agentId: string): Promise<AgentDefinition> {
|
||||
const agent = await this.agentStore.get(agentId)
|
||||
if (!agent) {
|
||||
@@ -860,3 +1144,38 @@ export class TurnAlreadyActiveError extends Error {
|
||||
this.name = 'TurnAlreadyActiveError'
|
||||
}
|
||||
}
|
||||
|
||||
// ── Files API DTO ────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Wire shape for one produced-file entry returned by the rail and
|
||||
* inline-card endpoints. Trimmed from the on-disk row — clients
|
||||
* never see `agentDefinitionId` or `sessionKey`.
|
||||
*/
|
||||
export interface ProducedFileEntry {
|
||||
id: string
|
||||
path: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
createdAt: number
|
||||
detectedBy: 'diff' | 'tool'
|
||||
}
|
||||
|
||||
export interface ProducedFilesRailGroup {
|
||||
turnId: string
|
||||
/** First non-blank line of the user prompt that initiated this turn. */
|
||||
turnPrompt: string
|
||||
createdAt: number
|
||||
files: ProducedFileEntry[]
|
||||
}
|
||||
|
||||
function toProducedFileEntry(row: ProducedFileRow): ProducedFileEntry {
|
||||
return {
|
||||
id: row.id,
|
||||
path: row.path,
|
||||
size: row.size,
|
||||
mtimeMs: row.mtimeMs,
|
||||
createdAt: row.createdAt,
|
||||
detectedBy: row.detectedBy,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,11 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import { createAgentUIStreamResponse, type UIMessage } from 'ai'
|
||||
import {
|
||||
createAgentUIStreamResponse,
|
||||
type FinishReason,
|
||||
type UIMessage,
|
||||
} from 'ai'
|
||||
import { AiSdkAgent } from '../../agent/ai-sdk-agent'
|
||||
import { formatUserMessage } from '../../agent/format-message'
|
||||
import {
|
||||
@@ -272,6 +276,9 @@ export class ChatService {
|
||||
|
||||
// Handle tool approval responses: patch the agent's messages and re-run
|
||||
if (request.toolApprovalResponses?.length) {
|
||||
let finishReason: FinishReason | undefined
|
||||
let rawFinishReason: string | undefined
|
||||
let stepNumber: number | undefined
|
||||
this.applyToolApprovalResponses(
|
||||
session.agent.messages,
|
||||
request.toolApprovalResponses,
|
||||
@@ -284,8 +291,21 @@ export class ChatService {
|
||||
agent: session.agent.toolLoopAgent,
|
||||
uiMessages: filterValidMessages(session.agent.messages),
|
||||
abortSignal,
|
||||
onStepFinish: (step) => {
|
||||
finishReason = step.finishReason
|
||||
rawFinishReason = step.rawFinishReason
|
||||
stepNumber = step.stepNumber
|
||||
},
|
||||
onFinish: async ({ messages }: { messages: UIMessage[] }) => {
|
||||
session.agent.messages = filterValidMessages(messages)
|
||||
logger.info('Agent execution complete', {
|
||||
conversationId: request.conversationId,
|
||||
totalMessages: messages.length,
|
||||
finishReason,
|
||||
rawFinishReason,
|
||||
stepNumber,
|
||||
isToolApprovalResponse: true,
|
||||
})
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -333,10 +353,19 @@ export class ChatService {
|
||||
: msg,
|
||||
)
|
||||
|
||||
let finishReason: FinishReason | undefined
|
||||
let rawFinishReason: string | undefined
|
||||
let stepNumber: number | undefined
|
||||
|
||||
return createAgentUIStreamResponse({
|
||||
agent: session.agent.toolLoopAgent,
|
||||
uiMessages: promptUiMessages,
|
||||
abortSignal,
|
||||
onStepFinish: (step) => {
|
||||
finishReason = step.finishReason
|
||||
rawFinishReason = step.rawFinishReason
|
||||
stepNumber = step.stepNumber
|
||||
},
|
||||
onFinish: async ({ messages }: { messages: UIMessage[] }) => {
|
||||
// The agent loop returns `messages` containing the prompt-
|
||||
// wrapped user text. Restore the raw form before persisting
|
||||
@@ -354,6 +383,9 @@ export class ChatService {
|
||||
logger.info('Agent execution complete', {
|
||||
conversationId: request.conversationId,
|
||||
totalMessages: restored.length,
|
||||
finishReason,
|
||||
rawFinishReason,
|
||||
stepNumber,
|
||||
})
|
||||
|
||||
if (session?.hiddenPageId) {
|
||||
|
||||
@@ -0,0 +1,335 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Helpers used by the `/claw/files/:id/preview` and
|
||||
* `/claw/files/:id/download` routes:
|
||||
*
|
||||
* - MIME-type detection (extension first, magic-byte fallback for
|
||||
* ambiguous extensions).
|
||||
* - Bounded text-snippet reader for inline previews.
|
||||
* - Image bytes reader for the rail's thumbnails.
|
||||
*
|
||||
* No streaming code lives here — the download route streams via Hono
|
||||
* directly. This module only handles the small in-memory reads the
|
||||
* preview UX needs.
|
||||
*/
|
||||
|
||||
import { open, stat } from 'node:fs/promises'
|
||||
import { extname } from 'node:path'
|
||||
|
||||
/** Hard cap on the inline text snippet returned by the preview API. */
|
||||
export const TEXT_PREVIEW_MAX_BYTES = 1 * 1024 * 1024 // 1 MB
|
||||
|
||||
/** Hard cap on inline image bytes returned as a base64 data URL. */
|
||||
export const IMAGE_PREVIEW_MAX_BYTES = 4 * 1024 * 1024 // 4 MB
|
||||
|
||||
const MIME_BY_EXTENSION: Record<string, string> = {
|
||||
'.txt': 'text/plain',
|
||||
'.md': 'text/markdown',
|
||||
'.markdown': 'text/markdown',
|
||||
'.json': 'application/json',
|
||||
'.jsonl': 'application/x-ndjson',
|
||||
'.csv': 'text/csv',
|
||||
'.tsv': 'text/tab-separated-values',
|
||||
'.xml': 'application/xml',
|
||||
'.yaml': 'application/yaml',
|
||||
'.yml': 'application/yaml',
|
||||
'.toml': 'application/toml',
|
||||
'.ini': 'text/plain',
|
||||
'.log': 'text/plain',
|
||||
'.html': 'text/html',
|
||||
'.htm': 'text/html',
|
||||
'.css': 'text/css',
|
||||
'.js': 'text/javascript',
|
||||
'.mjs': 'text/javascript',
|
||||
'.cjs': 'text/javascript',
|
||||
'.ts': 'text/typescript',
|
||||
'.tsx': 'text/typescript',
|
||||
'.jsx': 'text/javascript',
|
||||
'.py': 'text/x-python',
|
||||
'.rb': 'text/x-ruby',
|
||||
'.go': 'text/x-go',
|
||||
'.rs': 'text/x-rust',
|
||||
'.java': 'text/x-java',
|
||||
'.kt': 'text/x-kotlin',
|
||||
'.swift': 'text/x-swift',
|
||||
'.c': 'text/x-c',
|
||||
'.h': 'text/x-c',
|
||||
'.cpp': 'text/x-c++',
|
||||
'.hpp': 'text/x-c++',
|
||||
'.sh': 'application/x-sh',
|
||||
'.zsh': 'application/x-sh',
|
||||
'.bash': 'application/x-sh',
|
||||
'.sql': 'application/sql',
|
||||
'.png': 'image/png',
|
||||
'.jpg': 'image/jpeg',
|
||||
'.jpeg': 'image/jpeg',
|
||||
'.gif': 'image/gif',
|
||||
'.webp': 'image/webp',
|
||||
'.bmp': 'image/bmp',
|
||||
'.svg': 'image/svg+xml',
|
||||
'.ico': 'image/x-icon',
|
||||
'.heic': 'image/heic',
|
||||
'.heif': 'image/heif',
|
||||
'.pdf': 'application/pdf',
|
||||
'.zip': 'application/zip',
|
||||
'.tar': 'application/x-tar',
|
||||
'.gz': 'application/gzip',
|
||||
'.tgz': 'application/gzip',
|
||||
'.bz2': 'application/x-bzip2',
|
||||
'.7z': 'application/x-7z-compressed',
|
||||
'.mp3': 'audio/mpeg',
|
||||
'.wav': 'audio/wav',
|
||||
'.ogg': 'audio/ogg',
|
||||
'.mp4': 'video/mp4',
|
||||
'.webm': 'video/webm',
|
||||
'.mov': 'video/quicktime',
|
||||
'.docx':
|
||||
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
||||
'.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
||||
'.pptx':
|
||||
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
|
||||
}
|
||||
|
||||
/**
|
||||
* Magic-byte signatures for cases where the extension is missing or
|
||||
* misleading. Only covers the formats whose preview path differs from
|
||||
* the default binary path (text vs image vs PDF vs other).
|
||||
*/
|
||||
const MAGIC_BYTE_SIGNATURES: Array<{
|
||||
mime: string
|
||||
matches: (head: Uint8Array) => boolean
|
||||
}> = [
|
||||
{
|
||||
mime: 'image/png',
|
||||
matches: (h) =>
|
||||
h[0] === 0x89 &&
|
||||
h[1] === 0x50 &&
|
||||
h[2] === 0x4e &&
|
||||
h[3] === 0x47 &&
|
||||
h[4] === 0x0d &&
|
||||
h[5] === 0x0a,
|
||||
},
|
||||
{
|
||||
mime: 'image/jpeg',
|
||||
matches: (h) => h[0] === 0xff && h[1] === 0xd8 && h[2] === 0xff,
|
||||
},
|
||||
{
|
||||
mime: 'image/gif',
|
||||
matches: (h) =>
|
||||
h[0] === 0x47 && h[1] === 0x49 && h[2] === 0x46 && h[3] === 0x38,
|
||||
},
|
||||
{
|
||||
mime: 'image/webp',
|
||||
matches: (h) =>
|
||||
h[0] === 0x52 &&
|
||||
h[1] === 0x49 &&
|
||||
h[2] === 0x46 &&
|
||||
h[3] === 0x46 &&
|
||||
h[8] === 0x57 &&
|
||||
h[9] === 0x45 &&
|
||||
h[10] === 0x42 &&
|
||||
h[11] === 0x50,
|
||||
},
|
||||
{
|
||||
mime: 'application/pdf',
|
||||
matches: (h) =>
|
||||
h[0] === 0x25 && h[1] === 0x50 && h[2] === 0x44 && h[3] === 0x46,
|
||||
},
|
||||
]
|
||||
|
||||
const MAGIC_BYTE_PROBE_LEN = 12
|
||||
|
||||
/**
|
||||
* Best-effort MIME detection. Tries the extension map first, then
|
||||
* falls back to magic-byte sniffing for the formats whose preview
|
||||
* path differs from the default binary handling. Returns
|
||||
* `application/octet-stream` when we can't tell.
|
||||
*/
|
||||
export async function detectMimeType(absolutePath: string): Promise<string> {
|
||||
const fromExtension = MIME_BY_EXTENSION[extname(absolutePath).toLowerCase()]
|
||||
if (fromExtension) return fromExtension
|
||||
|
||||
let head: Uint8Array
|
||||
try {
|
||||
const handle = await open(absolutePath, 'r')
|
||||
try {
|
||||
const buffer = new Uint8Array(MAGIC_BYTE_PROBE_LEN)
|
||||
const { bytesRead } = await handle.read(
|
||||
buffer,
|
||||
0,
|
||||
MAGIC_BYTE_PROBE_LEN,
|
||||
0,
|
||||
)
|
||||
head = buffer.subarray(0, bytesRead)
|
||||
} finally {
|
||||
await handle.close()
|
||||
}
|
||||
} catch {
|
||||
return 'application/octet-stream'
|
||||
}
|
||||
|
||||
for (const sig of MAGIC_BYTE_SIGNATURES) {
|
||||
if (sig.matches(head)) return sig.mime
|
||||
}
|
||||
|
||||
if (looksLikeText(head)) return 'text/plain'
|
||||
return 'application/octet-stream'
|
||||
}
|
||||
|
||||
export type PreviewKind = 'text' | 'image' | 'pdf' | 'binary' | 'missing'
|
||||
|
||||
export interface BasePreview {
|
||||
kind: PreviewKind
|
||||
mimeType: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
export interface TextPreview extends BasePreview {
|
||||
kind: 'text'
|
||||
snippet: string
|
||||
/** True when the on-disk file is larger than `TEXT_PREVIEW_MAX_BYTES`. */
|
||||
truncated: boolean
|
||||
}
|
||||
|
||||
export interface ImagePreview extends BasePreview {
|
||||
kind: 'image'
|
||||
/** Base64 data URL (incl. `data:` prefix) suitable for `<img src>`. */
|
||||
dataUrl: string
|
||||
}
|
||||
|
||||
export interface PdfPreview extends BasePreview {
|
||||
kind: 'pdf'
|
||||
}
|
||||
|
||||
export interface BinaryPreview extends BasePreview {
|
||||
kind: 'binary'
|
||||
}
|
||||
|
||||
export interface MissingPreview {
|
||||
kind: 'missing'
|
||||
}
|
||||
|
||||
export type FilePreview =
|
||||
| TextPreview
|
||||
| ImagePreview
|
||||
| PdfPreview
|
||||
| BinaryPreview
|
||||
| MissingPreview
|
||||
|
||||
/**
|
||||
* Build a preview payload for the inline-card / rail preview Sheet.
|
||||
* Reads at most `TEXT_PREVIEW_MAX_BYTES` (text) or
|
||||
* `IMAGE_PREVIEW_MAX_BYTES` (image) into memory; everything else
|
||||
* returns a metadata-only `binary` preview and the UI offers a
|
||||
* download instead.
|
||||
*/
|
||||
export async function buildFilePreview(
|
||||
absolutePath: string,
|
||||
): Promise<FilePreview> {
|
||||
let stats: Awaited<ReturnType<typeof stat>>
|
||||
try {
|
||||
stats = await stat(absolutePath)
|
||||
} catch {
|
||||
return { kind: 'missing' }
|
||||
}
|
||||
|
||||
const mimeType = await detectMimeType(absolutePath)
|
||||
const base = {
|
||||
mimeType,
|
||||
size: stats.size,
|
||||
mtimeMs: stats.mtimeMs,
|
||||
} as const
|
||||
|
||||
if (mimeType === 'application/pdf') {
|
||||
return { kind: 'pdf', ...base }
|
||||
}
|
||||
|
||||
if (isTextMime(mimeType)) {
|
||||
return readTextPreview(absolutePath, base)
|
||||
}
|
||||
|
||||
if (isImageMime(mimeType)) {
|
||||
return readImagePreview(absolutePath, base)
|
||||
}
|
||||
|
||||
return { kind: 'binary', ...base }
|
||||
}
|
||||
|
||||
async function readTextPreview(
|
||||
absolutePath: string,
|
||||
base: { mimeType: string; size: number; mtimeMs: number },
|
||||
): Promise<TextPreview> {
|
||||
const handle = await open(absolutePath, 'r')
|
||||
try {
|
||||
const length = Math.min(base.size, TEXT_PREVIEW_MAX_BYTES)
|
||||
const buffer = new Uint8Array(length)
|
||||
const { bytesRead } = await handle.read(buffer, 0, length, 0)
|
||||
const snippet = new TextDecoder('utf-8', { fatal: false }).decode(
|
||||
buffer.subarray(0, bytesRead),
|
||||
)
|
||||
return {
|
||||
kind: 'text',
|
||||
...base,
|
||||
snippet,
|
||||
truncated: base.size > TEXT_PREVIEW_MAX_BYTES,
|
||||
}
|
||||
} finally {
|
||||
await handle.close()
|
||||
}
|
||||
}
|
||||
|
||||
async function readImagePreview(
|
||||
absolutePath: string,
|
||||
base: { mimeType: string; size: number; mtimeMs: number },
|
||||
): Promise<ImagePreview | BinaryPreview> {
|
||||
if (base.size > IMAGE_PREVIEW_MAX_BYTES) {
|
||||
// Too big to inline — let the user download.
|
||||
return { kind: 'binary', ...base }
|
||||
}
|
||||
const handle = await open(absolutePath, 'r')
|
||||
try {
|
||||
const buffer = new Uint8Array(base.size)
|
||||
await handle.read(buffer, 0, base.size, 0)
|
||||
const dataUrl = `data:${base.mimeType};base64,${Buffer.from(buffer).toString('base64')}`
|
||||
return { kind: 'image', ...base, dataUrl }
|
||||
} finally {
|
||||
await handle.close()
|
||||
}
|
||||
}
|
||||
|
||||
function isTextMime(mime: string): boolean {
|
||||
if (mime.startsWith('text/')) return true
|
||||
return (
|
||||
mime === 'application/json' ||
|
||||
mime === 'application/x-ndjson' ||
|
||||
mime === 'application/xml' ||
|
||||
mime === 'application/yaml' ||
|
||||
mime === 'application/toml' ||
|
||||
mime === 'application/sql' ||
|
||||
mime === 'application/x-sh'
|
||||
)
|
||||
}
|
||||
|
||||
function isImageMime(mime: string): boolean {
|
||||
return mime.startsWith('image/') && mime !== 'image/svg+xml'
|
||||
// SVG is text — let it go through the text path so users can read
|
||||
// markup, not view a base64 blob.
|
||||
}
|
||||
|
||||
/**
|
||||
* Crude text-vs-binary heuristic for files whose extension and magic
|
||||
* bytes both fail to identify them. Counts NUL bytes — text files
|
||||
* essentially never contain them; binaries usually do.
|
||||
*/
|
||||
function looksLikeText(head: Uint8Array): boolean {
|
||||
if (head.length === 0) return true
|
||||
let nulCount = 0
|
||||
for (const byte of head) {
|
||||
if (byte === 0) nulCount += 1
|
||||
}
|
||||
return nulCount === 0
|
||||
}
|
||||
@@ -0,0 +1,298 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Converts an aggregated OpenClaw session history (rich content blocks
|
||||
* across the agent's main + sub-sessions) into the flat AgentHistoryPage
|
||||
* shape the chat panel consumes.
|
||||
*
|
||||
* Input: OpenClawSessionHistory.messages — each message has `content`
|
||||
* that is either a string OR an array of typed blocks
|
||||
* ({type: 'text'|'thinking'|'toolCall'|'toolResult'}). The HTTP endpoint
|
||||
* returns the array form even though the type definition says string.
|
||||
*
|
||||
* Output: AgentHistoryEntry[] — flat text per entry, separate `reasoning`
|
||||
* and `toolCalls` fields the UI renders as collapsible sections.
|
||||
*
|
||||
* Tool result pairing: `toolCall` blocks emit on assistant messages;
|
||||
* the matching `toolResult` arrives in a later message (typically with
|
||||
* role 'tool' or 'toolResult'). We pair them by `toolCallId` so the
|
||||
* resulting AgentHistoryToolCall has both input and output.
|
||||
*/
|
||||
|
||||
import type {
|
||||
AgentHistoryEntry,
|
||||
AgentHistoryToolCall,
|
||||
} from '../../../lib/agents/agent-types'
|
||||
import type { AgentHistoryPage } from '../../../lib/agents/types'
|
||||
import type {
|
||||
OpenClawSessionHistory,
|
||||
OpenClawSessionHistoryMessage,
|
||||
} from './openclaw-http-client'
|
||||
|
||||
const CRON_PROMPT_PREFIX_PATTERN =
|
||||
/^\[cron:[0-9a-f-]+ ([^\]]+)\]\s*([\s\S]*?)\n*Current time:[^\n]*(?:\n[\s\S]*)?$/
|
||||
const CRON_DELIVERY_TRAILER =
|
||||
/\n*Use the message tool if you need to notify the user directly[\s\S]*$/
|
||||
const BROWSEROS_WORKING_DIR_PREFIX = /^\[Working directory:[^\]]*\]\n+/
|
||||
const BROWSEROS_ROLE_BLOCK = /<role>[\s\S]*?<\/role>\n+/
|
||||
const BROWSEROS_USER_REQUEST_BLOCK =
|
||||
/<user_request>\n?([\s\S]*?)\n?<\/user_request>/
|
||||
const BROWSEROS_SYSTEM_REMINDER_BLOCK =
|
||||
/\n*<system-reminder>[\s\S]*?<\/system-reminder>\s*$/
|
||||
const QUEUED_MARKER_LINE =
|
||||
/^\[Queued user message that arrived while the previous turn was still active\]\s*$/m
|
||||
const SUBAGENT_CONTEXT_PREFIX = /^\[Subagent Context\][\s\S]*$/
|
||||
|
||||
/**
|
||||
* Strip OpenClaw + BrowserOS scaffolding from a "user" message before
|
||||
* showing it in the chat panel. The raw prompts contain:
|
||||
*
|
||||
* - OpenClaw cron payload prefix:
|
||||
* `[cron:<uuid> <name>] <payload>\nCurrent time: ...\nUse the
|
||||
* message tool if you need to notify the user directly...`
|
||||
* - BrowserOS ACP prefix:
|
||||
* `[Working directory: ...]\n\n<role>...</role>\n\n<user_request>
|
||||
* <actual user text>\n</user_request>\n\n<system-reminder>...</system-reminder>`
|
||||
* - Queued-marker concatenation: when multiple prompts queue while a
|
||||
* turn is active, BrowserOS joins them with the marker line
|
||||
* `[Queued user message that arrived while the previous turn was
|
||||
* still active]`. We split on those markers and clean each chunk
|
||||
* independently, then re-join the non-empty results.
|
||||
* - Subagent context prefix: when an agent invokes a nested subagent,
|
||||
* OpenClaw seeds the subagent's session with `[Subagent Context]
|
||||
* You are running as a subagent (depth N/M). ...` followed by
|
||||
* internal task framing. The actual task lives in the system prompt;
|
||||
* this user message is pure scaffolding and gets dropped entirely.
|
||||
*
|
||||
* For each, we extract just the user-facing text. Non-matching messages
|
||||
* fall through unchanged so any future pattern we don't recognize stays
|
||||
* visible rather than getting silently dropped.
|
||||
*/
|
||||
export function cleanHistoryUserText(raw: string): string {
|
||||
if (!raw) return raw
|
||||
// Queued-marker case: this is structurally a multi-message blob, so
|
||||
// split first and recurse into each chunk. We keep the join character
|
||||
// narrow (single newline) so e.g. five cron payloads render as five
|
||||
// visually-separate lines rather than one wall of text.
|
||||
if (QUEUED_MARKER_LINE.test(raw)) {
|
||||
const chunks = raw
|
||||
.split(
|
||||
/^\[Queued user message that arrived while the previous turn was still active\]\s*$/m,
|
||||
)
|
||||
.map((chunk) => cleanSingleUserMessage(chunk))
|
||||
.filter((chunk) => chunk.length > 0)
|
||||
return chunks.join('\n')
|
||||
}
|
||||
return cleanSingleUserMessage(raw)
|
||||
}
|
||||
|
||||
function cleanSingleUserMessage(raw: string): string {
|
||||
const trimmed = raw.trim()
|
||||
if (!trimmed) return ''
|
||||
// Subagent context seed: pure scaffolding, drop entirely. The real
|
||||
// task lives in the subagent's system prompt; the user-message body
|
||||
// is just framing the model never produced.
|
||||
if (SUBAGENT_CONTEXT_PREFIX.test(trimmed)) {
|
||||
return ''
|
||||
}
|
||||
const cronMatch = CRON_PROMPT_PREFIX_PATTERN.exec(trimmed)
|
||||
if (cronMatch) {
|
||||
const payload = cronMatch[2] ?? ''
|
||||
return payload.replace(CRON_DELIVERY_TRAILER, '').trim()
|
||||
}
|
||||
let text = trimmed
|
||||
text = text.replace(BROWSEROS_WORKING_DIR_PREFIX, '')
|
||||
text = text.replace(BROWSEROS_ROLE_BLOCK, '')
|
||||
text = text.replace(BROWSEROS_SYSTEM_REMINDER_BLOCK, '')
|
||||
const userReq = BROWSEROS_USER_REQUEST_BLOCK.exec(text)
|
||||
if (userReq) {
|
||||
return (userReq[1] ?? '').trim()
|
||||
}
|
||||
return text.trim()
|
||||
}
|
||||
|
||||
type RichBlock =
|
||||
| { type: 'text'; text?: string }
|
||||
| { type: 'thinking'; thinking?: string; text?: string }
|
||||
| {
|
||||
type: 'toolCall'
|
||||
id?: string
|
||||
toolCallId?: string
|
||||
name?: string
|
||||
arguments?: unknown
|
||||
}
|
||||
| {
|
||||
type: 'toolResult'
|
||||
toolCallId?: string
|
||||
content?: unknown
|
||||
isError?: boolean
|
||||
}
|
||||
| { type: string; [key: string]: unknown }
|
||||
|
||||
// We hold the AgentHistoryToolCall reference itself in `pending` so a
|
||||
// later `toolResult` block mutates the same object that was already
|
||||
// pushed onto the assistant entry's `toolCalls` array.
|
||||
type PendingToolCall = AgentHistoryToolCall
|
||||
|
||||
export function convertOpenClawHistoryToAgentHistory(
|
||||
agentId: string,
|
||||
raw: OpenClawSessionHistory,
|
||||
): AgentHistoryPage {
|
||||
const items: AgentHistoryEntry[] = []
|
||||
// Resolved tool calls keyed by toolCallId — used to attach `output`
|
||||
// back to the assistant entry that issued the call once the tool
|
||||
// result arrives in a subsequent message.
|
||||
const pendingByToolCallId = new Map<string, PendingToolCall>()
|
||||
|
||||
let entryCounter = 0
|
||||
const nextId = () => `${agentId}:hist:${entryCounter++}`
|
||||
|
||||
for (const message of raw.messages) {
|
||||
const blocks = normalizeBlocks(message)
|
||||
const role = normalizeRole(message.role)
|
||||
|
||||
if (!role) {
|
||||
// 'system' / 'tool' messages aren't shown as their own chat entries;
|
||||
// tool results get folded into the assistant entry they complete.
|
||||
if (message.role === 'tool') {
|
||||
applyToolResults(blocks, pendingByToolCallId)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
const rawText = collectText(blocks).trim()
|
||||
const text = role === 'user' ? cleanHistoryUserText(rawText) : rawText
|
||||
const reasoningText = collectThinking(blocks).trim()
|
||||
const toolCallEntries = collectToolCalls(blocks, pendingByToolCallId)
|
||||
|
||||
// Skip empty entries. Two cases:
|
||||
// - User: cleaner returned empty after stripping scaffolding (e.g.
|
||||
// dropped Subagent Context message). No bubble to render.
|
||||
// - Assistant: model returned only thinking blocks (common with
|
||||
// MiniMax `thinking: minimal` for trivial prompts) and no text
|
||||
// or tools. The empty bubble + dangling reasoning collapsible
|
||||
// reads as broken UI; cleaner to drop the turn entirely.
|
||||
if (!text && toolCallEntries.length === 0) continue
|
||||
|
||||
const entry: AgentHistoryEntry = {
|
||||
id: message.messageId ?? nextId(),
|
||||
agentId,
|
||||
sessionId: 'main',
|
||||
role,
|
||||
text,
|
||||
createdAt: message.timestamp ?? 0,
|
||||
}
|
||||
if (reasoningText) {
|
||||
entry.reasoning = { text: reasoningText }
|
||||
}
|
||||
if (toolCallEntries.length > 0) {
|
||||
entry.toolCalls = toolCallEntries
|
||||
}
|
||||
|
||||
items.push(entry)
|
||||
}
|
||||
|
||||
return {
|
||||
agentId,
|
||||
sessionId: 'main',
|
||||
items,
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeBlocks(message: OpenClawSessionHistoryMessage): RichBlock[] {
|
||||
const content = (message as { content: unknown }).content
|
||||
if (typeof content === 'string') {
|
||||
return content ? [{ type: 'text', text: content }] : []
|
||||
}
|
||||
if (Array.isArray(content)) {
|
||||
return content as RichBlock[]
|
||||
}
|
||||
return []
|
||||
}
|
||||
|
||||
function normalizeRole(
|
||||
role: OpenClawSessionHistoryMessage['role'],
|
||||
): 'user' | 'assistant' | null {
|
||||
if (role === 'user' || role === 'assistant') return role
|
||||
return null
|
||||
}
|
||||
|
||||
function collectText(blocks: RichBlock[]): string {
|
||||
const parts: string[] = []
|
||||
for (const block of blocks) {
|
||||
if (block.type === 'text' && typeof block.text === 'string') {
|
||||
parts.push(block.text)
|
||||
}
|
||||
}
|
||||
return parts.join('\n')
|
||||
}
|
||||
|
||||
function collectThinking(blocks: RichBlock[]): string {
|
||||
const parts: string[] = []
|
||||
for (const block of blocks) {
|
||||
if (block.type === 'thinking') {
|
||||
const value =
|
||||
typeof block.thinking === 'string'
|
||||
? block.thinking
|
||||
: typeof block.text === 'string'
|
||||
? block.text
|
||||
: ''
|
||||
if (value) parts.push(value)
|
||||
}
|
||||
}
|
||||
return parts.join('\n\n')
|
||||
}
|
||||
|
||||
function collectToolCalls(
|
||||
blocks: RichBlock[],
|
||||
pending: Map<string, PendingToolCall>,
|
||||
): AgentHistoryToolCall[] {
|
||||
const out: AgentHistoryToolCall[] = []
|
||||
for (const block of blocks) {
|
||||
if (block.type !== 'toolCall') continue
|
||||
const callId =
|
||||
typeof block.toolCallId === 'string'
|
||||
? block.toolCallId
|
||||
: typeof block.id === 'string'
|
||||
? block.id
|
||||
: undefined
|
||||
if (!callId) continue
|
||||
const toolName = typeof block.name === 'string' ? block.name : 'unknown'
|
||||
const entry: AgentHistoryToolCall = {
|
||||
toolCallId: callId,
|
||||
toolName,
|
||||
status: 'completed',
|
||||
input: block.arguments,
|
||||
}
|
||||
out.push(entry)
|
||||
// Hold the same reference so a later toolResult mutates the entry
|
||||
// already pushed onto the assistant's toolCalls array.
|
||||
pending.set(callId, entry)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
function applyToolResults(
|
||||
blocks: RichBlock[],
|
||||
pending: Map<string, PendingToolCall>,
|
||||
): void {
|
||||
for (const block of blocks) {
|
||||
if (block.type !== 'toolResult') continue
|
||||
const callId =
|
||||
typeof block.toolCallId === 'string' ? block.toolCallId : undefined
|
||||
if (!callId) continue
|
||||
const entry = pending.get(callId)
|
||||
if (!entry) continue
|
||||
if (block.isError) {
|
||||
entry.status = 'failed'
|
||||
entry.error =
|
||||
typeof block.content === 'string'
|
||||
? block.content
|
||||
: JSON.stringify(block.content)
|
||||
} else {
|
||||
entry.output = block.content
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,10 +4,40 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import { join } from 'node:path'
|
||||
import { join, relative, resolve, sep } from 'node:path'
|
||||
|
||||
const STATE_DIR_NAME = '.openclaw'
|
||||
|
||||
/**
|
||||
* Path-traversal guard for `agent.name` before it gets joined into
|
||||
* the host workspace directory. The name is user-supplied at
|
||||
* agent-create time, and `path.join` happily resolves `..` /
|
||||
* absolute segments — so a name like `../../tmp` would point the
|
||||
* workspace at the user's home directory, the harness's pre-turn
|
||||
* snapshot would walk it, and `produced_files` rows would point at
|
||||
* arbitrary host paths that subsequent download / preview routes
|
||||
* would then serve as "agent outputs".
|
||||
*
|
||||
* Reject anything that isn't a flat, single-segment name composed
|
||||
* of safe filename characters. The check is intentionally
|
||||
* conservative — agent names are short slugs in practice.
|
||||
*/
|
||||
export function isAgentWorkspaceNameSafe(name: string): boolean {
|
||||
if (typeof name !== 'string') return false
|
||||
const trimmed = name.trim()
|
||||
if (trimmed === '' || trimmed === '.' || trimmed === '..') return false
|
||||
// No path separators, no NULs, no control chars (charCode < 0x20).
|
||||
for (let i = 0; i < trimmed.length; i++) {
|
||||
const code = trimmed.charCodeAt(i)
|
||||
if (code < 0x20) return false
|
||||
}
|
||||
if (/[\\/]/.test(trimmed)) return false
|
||||
// No `..` segments and no leading dot (avoid hidden / dotfile escapes).
|
||||
if (trimmed.startsWith('.')) return false
|
||||
if (trimmed.includes('..')) return false
|
||||
return true
|
||||
}
|
||||
|
||||
export function getOpenClawStateDir(openclawDir: string): string {
|
||||
return join(openclawDir, STATE_DIR_NAME)
|
||||
}
|
||||
@@ -24,10 +54,27 @@ export function getHostWorkspaceDir(
|
||||
openclawDir: string,
|
||||
agentName: string,
|
||||
): string {
|
||||
return join(
|
||||
getOpenClawStateDir(openclawDir),
|
||||
if (agentName !== 'main' && !isAgentWorkspaceNameSafe(agentName)) {
|
||||
throw new Error(
|
||||
`Refusing to compute workspace dir for unsafe agent name: ${agentName}`,
|
||||
)
|
||||
}
|
||||
const stateDir = getOpenClawStateDir(openclawDir)
|
||||
const candidate = resolve(
|
||||
stateDir,
|
||||
agentName === 'main' ? 'workspace' : `workspace-${agentName}`,
|
||||
)
|
||||
// Defensive containment check: even with a safe-looking name the
|
||||
// resolved path must live under the state dir. If it doesn't,
|
||||
// refuse rather than return a path the caller would then trust.
|
||||
const stateDirResolved = resolve(stateDir)
|
||||
const rel = relative(stateDirResolved, candidate)
|
||||
if (rel === '' || rel.startsWith('..') || rel.startsWith(`..${sep}`)) {
|
||||
throw new Error(
|
||||
`Resolved workspace dir escapes openclaw state dir: ${candidate}`,
|
||||
)
|
||||
}
|
||||
return candidate
|
||||
}
|
||||
|
||||
export function mergeEnvContent(
|
||||
|
||||
@@ -44,6 +44,24 @@ export interface OpenClawSessionHistoryMessage {
|
||||
messageId?: string
|
||||
messageSeq?: number
|
||||
timestamp?: number
|
||||
/**
|
||||
* OpenClaw extension envelope. The gateway records the per-session
|
||||
* monotonic sequence on `__openclaw.seq` rather than the top-level
|
||||
* `messageSeq` field, so cursor logic reads from here. `id` is the
|
||||
* gateway's stable message id.
|
||||
*/
|
||||
__openclaw?: { id?: string; seq?: number }
|
||||
/**
|
||||
* Origin of this message when the response merges multiple sessions.
|
||||
* Absent on single-session responses for backward compatibility.
|
||||
*/
|
||||
source?: 'main' | 'cron' | 'hook' | 'channel' | 'other'
|
||||
/**
|
||||
* The session key this message originated from. Differs from the
|
||||
* top-level `sessionKey` when sub-sessions (e.g. cron runs) are merged
|
||||
* into a parent agent's main-session response.
|
||||
*/
|
||||
subSessionKey?: string
|
||||
}
|
||||
|
||||
export interface OpenClawSessionHistory {
|
||||
|
||||
@@ -40,6 +40,7 @@ import {
|
||||
type OpenClawAgentRecord,
|
||||
OpenClawCliClient,
|
||||
type OpenClawConfigBatchEntry,
|
||||
type OpenClawSessionEntry,
|
||||
} from './openclaw-cli-client'
|
||||
import {
|
||||
buildOpenClawCliProviderModelRef,
|
||||
@@ -61,6 +62,7 @@ import {
|
||||
OpenClawHttpClient,
|
||||
type OpenClawSessionHistory,
|
||||
type OpenClawSessionHistoryEvent,
|
||||
type OpenClawSessionHistoryMessage,
|
||||
} from './openclaw-http-client'
|
||||
import { OpenClawObserver } from './openclaw-observer'
|
||||
import {
|
||||
@@ -234,6 +236,104 @@ function getOpenClawBrowserOSSessionPrefix(agentId: string): string {
|
||||
return `agent:${agentId}:openai-user:browseros:${agentId}:`
|
||||
}
|
||||
|
||||
const MAIN_SESSION_KEY_PATTERN = /^agent:([^:]+):main$/
|
||||
|
||||
/**
|
||||
* Extract the agent id from a main-session key (e.g. `agent:research:main`
|
||||
* → `research`). Returns null when the key isn't a top-level main session,
|
||||
* which signals the caller to use the per-session fetch path.
|
||||
*/
|
||||
function extractAgentIdFromMainSessionKey(sessionKey: string): string | null {
|
||||
const match = MAIN_SESSION_KEY_PATTERN.exec(sessionKey)
|
||||
return match?.[1] ?? null
|
||||
}
|
||||
|
||||
/**
|
||||
* Classify a session key by its source. The pattern is `agent:<id>:<kind>:...`;
|
||||
* the third segment identifies how the session was started.
|
||||
*/
|
||||
function parseSessionSource(
|
||||
sessionKey: string,
|
||||
): NonNullable<OpenClawSessionHistoryMessage['source']> {
|
||||
const parts = sessionKey.split(':')
|
||||
if (parts[0] !== 'agent' || parts.length < 3) return 'other'
|
||||
switch (parts[2]) {
|
||||
case 'main':
|
||||
return 'main'
|
||||
case 'cron':
|
||||
return 'cron'
|
||||
case 'hook':
|
||||
return 'hook'
|
||||
case 'channel':
|
||||
return 'channel'
|
||||
default:
|
||||
return 'other'
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Per-session monotonic sequence. Gateway encodes it inside the
|
||||
* `__openclaw` extension envelope; the legacy top-level `messageSeq`
|
||||
* field exists in the type but is rarely populated.
|
||||
*/
|
||||
function resolveMessageSeq(msg: OpenClawSessionHistoryMessage): number | null {
|
||||
const fromEnvelope = msg.__openclaw?.seq
|
||||
if (typeof fromEnvelope === 'number' && Number.isFinite(fromEnvelope)) {
|
||||
return fromEnvelope
|
||||
}
|
||||
if (typeof msg.messageSeq === 'number' && Number.isFinite(msg.messageSeq)) {
|
||||
return msg.messageSeq
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Stable chronological order across sessions. Falls back to seq
|
||||
* when timestamps tie or are missing, preserving intra-session order.
|
||||
*/
|
||||
function compareMessageOrder(
|
||||
a: OpenClawSessionHistoryMessage,
|
||||
b: OpenClawSessionHistoryMessage,
|
||||
): number {
|
||||
const aTs = a.timestamp ?? 0
|
||||
const bTs = b.timestamp ?? 0
|
||||
if (aTs !== bTs) return aTs - bTs
|
||||
return (resolveMessageSeq(a) ?? 0) - (resolveMessageSeq(b) ?? 0)
|
||||
}
|
||||
|
||||
/**
|
||||
* Compound cursor for the aggregated history endpoint. Maps each
|
||||
* session key to either:
|
||||
* - a `messageSeq` to fetch BEFORE on the next page (more historical),
|
||||
* - or `null` meaning the session is exhausted and should be skipped.
|
||||
*
|
||||
* Encoded as base64url JSON for URL-safe transport in `?cursor=`.
|
||||
*/
|
||||
type CompoundCursor = Record<string, number | null>
|
||||
|
||||
function decodeCompoundCursor(encoded: string | undefined): CompoundCursor {
|
||||
if (!encoded) return {}
|
||||
try {
|
||||
const json = Buffer.from(encoded, 'base64url').toString('utf8')
|
||||
const parsed = JSON.parse(json)
|
||||
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
|
||||
const out: CompoundCursor = {}
|
||||
for (const [k, v] of Object.entries(parsed)) {
|
||||
if (typeof v === 'number' || v === null) out[k] = v
|
||||
}
|
||||
return out
|
||||
}
|
||||
} catch {
|
||||
// Malformed cursors are treated as "first page" — preferable to
|
||||
// erroring out the entire history fetch on a bad client cursor.
|
||||
}
|
||||
return {}
|
||||
}
|
||||
|
||||
function encodeCompoundCursor(cursor: CompoundCursor): string {
|
||||
return Buffer.from(JSON.stringify(cursor), 'utf8').toString('base64url')
|
||||
}
|
||||
|
||||
export interface AgentOverview {
|
||||
agentId: string
|
||||
status: AgentLiveStatus
|
||||
@@ -794,9 +894,155 @@ export class OpenClawService {
|
||||
input: { limit?: number; cursor?: string; signal?: AbortSignal } = {},
|
||||
): Promise<OpenClawSessionHistory> {
|
||||
await this.assertGatewayReady()
|
||||
return this.runControlPlaneCall(() =>
|
||||
this.httpClient.getSessionHistory(sessionKey, input),
|
||||
return this.runControlPlaneCall(async () => {
|
||||
const agentId = extractAgentIdFromMainSessionKey(sessionKey)
|
||||
if (!agentId) {
|
||||
return this.httpClient.getSessionHistory(sessionKey, input)
|
||||
}
|
||||
return this.fetchAggregatedAgentHistory(sessionKey, agentId, input)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Aggregates the agent's main session and every sub-session (cron,
|
||||
* hook, channel) into a single chronological response. The main
|
||||
* session's own messages are included; each sub-session's messages
|
||||
* are tagged with `source` and `subSessionKey` so the UI can
|
||||
* distinguish autonomous turns from user-driven turns.
|
||||
*
|
||||
* Pagination uses a compound cursor that encodes a per-session seq
|
||||
* for each session in scope (`{<sessionKey>: seq | null}`). Each page
|
||||
* fetches each non-exhausted session with its own per-session cursor,
|
||||
* merges messages across sessions by timestamp, slices to `limit`,
|
||||
* and emits a fresh compound cursor reflecting where each session
|
||||
* should resume on the next page. A session with `null` in the
|
||||
* cursor is exhausted and skipped.
|
||||
*
|
||||
* Sub-session fetches that fail are logged and dropped — partial
|
||||
* timelines are preferable to a hard failure that hides the main
|
||||
* session.
|
||||
*/
|
||||
private async fetchAggregatedAgentHistory(
|
||||
mainSessionKey: string,
|
||||
agentId: string,
|
||||
input: { limit?: number; cursor?: string; signal?: AbortSignal },
|
||||
): Promise<OpenClawSessionHistory> {
|
||||
const compoundIn = decodeCompoundCursor(input.cursor)
|
||||
const sessions = await this.cliClient
|
||||
.listSessions(agentId)
|
||||
.catch((err): OpenClawSessionEntry[] => {
|
||||
logger.warn(
|
||||
'Failed to list OpenClaw sub-sessions; falling back to main only',
|
||||
{ agentId, error: err instanceof Error ? err.message : String(err) },
|
||||
)
|
||||
return []
|
||||
})
|
||||
|
||||
// Build the candidate set from the agent's session directory plus
|
||||
// the main key (which may not appear in `sessions.list` if the file
|
||||
// hasn't been written yet for a fresh agent).
|
||||
const targetKeys = new Set<string>([mainSessionKey])
|
||||
for (const entry of sessions) {
|
||||
if (entry.key?.startsWith(`agent:${agentId}:`)) {
|
||||
targetKeys.add(entry.key)
|
||||
}
|
||||
}
|
||||
|
||||
// Only fetch sessions that aren't exhausted by the inbound cursor.
|
||||
// A session with `null` in the cursor is fully read; skip it on
|
||||
// subsequent pages.
|
||||
const activeKeys = Array.from(targetKeys).filter(
|
||||
(k) => compoundIn[k] !== null,
|
||||
)
|
||||
|
||||
const fetchedHistories = await Promise.all(
|
||||
activeKeys.map(async (key) => {
|
||||
const sessionCursor = compoundIn[key]
|
||||
try {
|
||||
const history = await this.httpClient.getSessionHistory(key, {
|
||||
limit: input.limit,
|
||||
cursor:
|
||||
typeof sessionCursor === 'number'
|
||||
? String(sessionCursor)
|
||||
: undefined,
|
||||
signal: input.signal,
|
||||
})
|
||||
return { key, history }
|
||||
} catch (err) {
|
||||
logger.warn('Failed to fetch OpenClaw sub-session history', {
|
||||
sessionKey: key,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
return null
|
||||
}
|
||||
}),
|
||||
)
|
||||
|
||||
type Annotated = OpenClawSessionHistoryMessage & { __sessionKey: string }
|
||||
const merged: Annotated[] = []
|
||||
let truncated = false
|
||||
for (const result of fetchedHistories) {
|
||||
if (!result) continue
|
||||
const source = parseSessionSource(result.key)
|
||||
const isMain = result.key === mainSessionKey
|
||||
for (const msg of result.history.messages) {
|
||||
merged.push({
|
||||
...msg,
|
||||
source,
|
||||
...(isMain ? {} : { subSessionKey: result.key }),
|
||||
__sessionKey: result.key,
|
||||
})
|
||||
}
|
||||
if (result.history.truncated) truncated = true
|
||||
}
|
||||
|
||||
merged.sort(compareMessageOrder)
|
||||
|
||||
// The merged window contains the latest portion fetched. We emit
|
||||
// up to `limit` messages from the END (newest), and compute the
|
||||
// resume position for each session as the seq of the EARLIEST
|
||||
// emitted message that came from that session.
|
||||
const limited =
|
||||
typeof input.limit === 'number' && input.limit > 0
|
||||
? merged.slice(-input.limit)
|
||||
: merged
|
||||
|
||||
const compoundOut: CompoundCursor = {}
|
||||
// Carry forward exhausted sessions so subsequent pages keep skipping them.
|
||||
for (const key of Array.from(targetKeys)) {
|
||||
if (compoundIn[key] === null) {
|
||||
compoundOut[key] = null
|
||||
}
|
||||
}
|
||||
for (const result of fetchedHistories) {
|
||||
if (!result) continue
|
||||
const key = result.key
|
||||
const earliestEmitted = limited.find((m) => m.__sessionKey === key)
|
||||
const sessionFetchHasMore = Boolean(result.history.hasMore)
|
||||
const droppedFromMerge =
|
||||
result.history.messages.length >
|
||||
limited.filter((m) => m.__sessionKey === key).length
|
||||
const sessionHasMore = sessionFetchHasMore || droppedFromMerge
|
||||
if (!sessionHasMore) {
|
||||
compoundOut[key] = null
|
||||
continue
|
||||
}
|
||||
const seq = earliestEmitted ? resolveMessageSeq(earliestEmitted) : null
|
||||
compoundOut[key] = seq
|
||||
}
|
||||
|
||||
const hasMore = Object.values(compoundOut).some(
|
||||
(v) => typeof v === 'number',
|
||||
)
|
||||
const messages = limited.map(({ __sessionKey: _drop, ...rest }) => rest)
|
||||
|
||||
return {
|
||||
sessionKey: mainSessionKey,
|
||||
messages,
|
||||
cursor: hasMore ? encodeCompoundCursor(compoundOut) : null,
|
||||
hasMore,
|
||||
truncated: truncated || limited.length < merged.length,
|
||||
}
|
||||
}
|
||||
|
||||
async streamSessionHistory(
|
||||
|
||||
@@ -0,0 +1,359 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* SQLite-backed store for files an OpenClaw agent produced inside its
|
||||
* workspace during a chat turn. The detection model is a per-turn
|
||||
* snapshot diff: take a `(path → size, mtime)` map of the workspace
|
||||
* before the turn starts, re-scan after the SSE `done` event, and
|
||||
* write a row for any new or modified file.
|
||||
*
|
||||
* Adapter-agnostic by design — the watcher is injected with the
|
||||
* agent's workspace dir, so V2 can plug Claude / Codex turn lifecycle
|
||||
* into the same store with a different `workspaceDir`.
|
||||
*/
|
||||
|
||||
import { randomUUID } from 'node:crypto'
|
||||
import { realpath, stat } from 'node:fs/promises'
|
||||
import { relative, resolve, sep } from 'node:path'
|
||||
import { and, desc, eq } from 'drizzle-orm'
|
||||
import { type BrowserOsDatabase, getDb } from '../../../lib/db'
|
||||
import {
|
||||
agentDefinitions,
|
||||
type NewProducedFileRow,
|
||||
type ProducedFileRow,
|
||||
producedFiles,
|
||||
} from '../../../lib/db/schema'
|
||||
import { walkWorkspace } from './produced-files-walker'
|
||||
|
||||
const TURN_PROMPT_MAX_CHARS = 280
|
||||
|
||||
export interface FileSnapshotEntry {
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
/** A `(workspace-relative path → fs metadata)` snapshot of a workspace. */
|
||||
export type FileSnapshot = Map<string, FileSnapshotEntry>
|
||||
|
||||
export interface FinalizeTurnInput {
|
||||
agentDefinitionId: string
|
||||
sessionKey: string
|
||||
turnId: string
|
||||
/** Raw user prompt; truncated to `TURN_PROMPT_MAX_CHARS` before persist. */
|
||||
turnPrompt: string
|
||||
/** Absolute host path to the agent's workspace directory. */
|
||||
workspaceDir: string
|
||||
/** Snapshot taken before the turn began. */
|
||||
before: FileSnapshot
|
||||
}
|
||||
|
||||
export interface ResolvedFile {
|
||||
row: ProducedFileRow
|
||||
/** Absolute host path; guaranteed to live inside the original workspace. */
|
||||
absolutePath: string
|
||||
}
|
||||
|
||||
export class ProducedFilesStore {
|
||||
private readonly db: BrowserOsDatabase
|
||||
|
||||
constructor(options: { db?: BrowserOsDatabase } = {}) {
|
||||
this.db = options.db ?? getDb()
|
||||
}
|
||||
|
||||
/**
|
||||
* Walk the workspace and capture every file's size + mtime. Used to
|
||||
* bracket a chat turn so the post-turn diff knows what changed.
|
||||
*/
|
||||
async snapshotWorkspace(workspaceDir: string): Promise<FileSnapshot> {
|
||||
const snapshot: FileSnapshot = new Map()
|
||||
await walkWorkspace(workspaceDir, (relPath, metadata) => {
|
||||
snapshot.set(relPath, metadata)
|
||||
})
|
||||
return snapshot
|
||||
}
|
||||
|
||||
/**
|
||||
* Diff the live workspace against `before`, persist rows for any
|
||||
* new or modified file, return the rows so the chat-turn finalizer
|
||||
* can broadcast them on the SSE feed. Re-modifications update the
|
||||
* existing row in place (the `(agentDefinitionId, path)` unique
|
||||
* index makes the upsert deterministic).
|
||||
*/
|
||||
async finalizeTurn(input: FinalizeTurnInput): Promise<ProducedFileRow[]> {
|
||||
const after: FileSnapshot = await this.snapshotWorkspace(input.workspaceDir)
|
||||
const changed: Array<{ relPath: string; entry: FileSnapshotEntry }> = []
|
||||
for (const [relPath, entry] of after) {
|
||||
const previous = input.before.get(relPath)
|
||||
if (
|
||||
!previous ||
|
||||
previous.size !== entry.size ||
|
||||
previous.mtimeMs !== entry.mtimeMs
|
||||
) {
|
||||
changed.push({ relPath, entry })
|
||||
}
|
||||
}
|
||||
if (changed.length === 0) return []
|
||||
|
||||
const now = Date.now()
|
||||
const turnPrompt = truncatePrompt(input.turnPrompt)
|
||||
const rows: ProducedFileRow[] = []
|
||||
for (const { relPath, entry } of changed) {
|
||||
const row: NewProducedFileRow = {
|
||||
id: randomUUID(),
|
||||
agentDefinitionId: input.agentDefinitionId,
|
||||
sessionKey: input.sessionKey,
|
||||
turnId: input.turnId,
|
||||
turnPrompt,
|
||||
path: relPath,
|
||||
size: entry.size,
|
||||
mtimeMs: entry.mtimeMs,
|
||||
createdAt: now,
|
||||
detectedBy: 'diff',
|
||||
}
|
||||
// Upsert on (agent, path) — re-modifications win, no duplicates.
|
||||
const upserted = this.db
|
||||
.insert(producedFiles)
|
||||
.values(row)
|
||||
.onConflictDoUpdate({
|
||||
target: [producedFiles.agentDefinitionId, producedFiles.path],
|
||||
set: {
|
||||
sessionKey: row.sessionKey,
|
||||
turnId: row.turnId,
|
||||
turnPrompt: row.turnPrompt,
|
||||
size: row.size,
|
||||
mtimeMs: row.mtimeMs,
|
||||
createdAt: row.createdAt,
|
||||
detectedBy: row.detectedBy,
|
||||
},
|
||||
})
|
||||
.returning()
|
||||
.all()
|
||||
const persisted = upserted[0] ?? row
|
||||
rows.push(persisted as ProducedFileRow)
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
/** Inline-card query — files for a single assistant turn. */
|
||||
async listByTurn(turnId: string): Promise<ProducedFileRow[]> {
|
||||
return this.db
|
||||
.select()
|
||||
.from(producedFiles)
|
||||
.where(eq(producedFiles.turnId, turnId))
|
||||
.orderBy(desc(producedFiles.createdAt))
|
||||
.all()
|
||||
}
|
||||
|
||||
/**
|
||||
* Outputs-rail query — every file an agent has produced across all
|
||||
* sessions, newest first.
|
||||
*/
|
||||
async listByAgent(
|
||||
agentDefinitionId: string,
|
||||
options: { limit?: number } = {},
|
||||
): Promise<ProducedFileRow[]> {
|
||||
const limit = options.limit ?? 200
|
||||
return this.db
|
||||
.select()
|
||||
.from(producedFiles)
|
||||
.where(eq(producedFiles.agentDefinitionId, agentDefinitionId))
|
||||
.orderBy(desc(producedFiles.createdAt))
|
||||
.limit(limit)
|
||||
.all()
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a gateway-side OpenClaw agent name (e.g. `main`,
|
||||
* `chief-01`) to the corresponding `agentDefinitions.id` so file
|
||||
* rows can be FK'd back to the harness record.
|
||||
*
|
||||
* Two shapes exist on disk depending on how the agent was added:
|
||||
*
|
||||
* 1. Reconciled rows from `agentHarnessService.reconcileWithGateway`
|
||||
* use `id == openclawAgentId` directly
|
||||
* (see `agent-harness-service.ts:522`).
|
||||
* 2. BrowserOS-created rows use `id = oc-<uuid>` and store the
|
||||
* openclaw name in the `name` column (`db-agent-store.ts:55-65`).
|
||||
*
|
||||
* Lookup tries shape 1 first (direct id hit), then shape 2 by
|
||||
* `(adapter='openclaw', name)`.
|
||||
*/
|
||||
async resolveAgentDefinitionId(
|
||||
openclawAgentId: string,
|
||||
): Promise<string | null> {
|
||||
const directHit = this.db
|
||||
.select({ id: agentDefinitions.id })
|
||||
.from(agentDefinitions)
|
||||
.where(eq(agentDefinitions.id, openclawAgentId))
|
||||
.limit(1)
|
||||
.all()
|
||||
if (directHit[0]) return directHit[0].id
|
||||
|
||||
const byName = this.db
|
||||
.select({ id: agentDefinitions.id })
|
||||
.from(agentDefinitions)
|
||||
.where(
|
||||
and(
|
||||
eq(agentDefinitions.adapter, 'openclaw'),
|
||||
eq(agentDefinitions.name, openclawAgentId),
|
||||
),
|
||||
)
|
||||
.limit(1)
|
||||
.all()
|
||||
return byName[0]?.id ?? null
|
||||
}
|
||||
|
||||
/** Single-row lookup; null if the id is unknown. */
|
||||
async findById(id: string): Promise<ProducedFileRow | null> {
|
||||
const rows = this.db
|
||||
.select()
|
||||
.from(producedFiles)
|
||||
.where(eq(producedFiles.id, id))
|
||||
.limit(1)
|
||||
.all()
|
||||
return rows[0] ?? null
|
||||
}
|
||||
|
||||
/** Used by `removeRegisteredModel` and similar admin paths later on. */
|
||||
async deleteByAgent(agentDefinitionId: string): Promise<void> {
|
||||
this.db
|
||||
.delete(producedFiles)
|
||||
.where(eq(producedFiles.agentDefinitionId, agentDefinitionId))
|
||||
.run()
|
||||
}
|
||||
|
||||
/** Useful for hard-resetting a session's files (e.g. workspace clear). */
|
||||
async deleteBySession(sessionKey: string): Promise<void> {
|
||||
this.db
|
||||
.delete(producedFiles)
|
||||
.where(eq(producedFiles.sessionKey, sessionKey))
|
||||
.run()
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a stored file id to an absolute host path, after validating
|
||||
* that the on-disk path still lives inside `workspaceDir`. The HTTP
|
||||
* download / preview routes are the only callers; the workspace dir
|
||||
* is supplied by the openclaw service so this module stays
|
||||
* adapter-agnostic.
|
||||
*/
|
||||
async resolveFilePath(input: {
|
||||
fileId: string
|
||||
workspaceDir: string
|
||||
}): Promise<ResolvedFile | null> {
|
||||
const row = await this.findById(input.fileId)
|
||||
if (!row) return null
|
||||
|
||||
const absolutePath = await resolveSafeWorkspacePath(
|
||||
input.workspaceDir,
|
||||
row.path,
|
||||
)
|
||||
if (!absolutePath) return null
|
||||
return { row, absolutePath }
|
||||
}
|
||||
|
||||
/**
|
||||
* Group a flat list of rows by `turnId`, preserving the latest-first
|
||||
* order on the row level and keeping the most-recent group first.
|
||||
* The Outputs rail uses this shape directly.
|
||||
*/
|
||||
groupByTurn(rows: ProducedFileRow[]): Array<{
|
||||
turnId: string
|
||||
turnPrompt: string
|
||||
createdAt: number
|
||||
files: ProducedFileRow[]
|
||||
}> {
|
||||
const grouped = new Map<
|
||||
string,
|
||||
{
|
||||
turnId: string
|
||||
turnPrompt: string
|
||||
createdAt: number
|
||||
files: ProducedFileRow[]
|
||||
}
|
||||
>()
|
||||
for (const row of rows) {
|
||||
const existing = grouped.get(row.turnId)
|
||||
if (!existing) {
|
||||
grouped.set(row.turnId, {
|
||||
turnId: row.turnId,
|
||||
turnPrompt: row.turnPrompt,
|
||||
// Group's createdAt = its newest file (rows are
|
||||
// already desc-by-createdAt, so the first one wins).
|
||||
createdAt: row.createdAt,
|
||||
files: [row],
|
||||
})
|
||||
continue
|
||||
}
|
||||
existing.files.push(row)
|
||||
if (row.createdAt > existing.createdAt) {
|
||||
existing.createdAt = row.createdAt
|
||||
}
|
||||
}
|
||||
return Array.from(grouped.values()).sort(
|
||||
(a, b) => b.createdAt - a.createdAt,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
function truncatePrompt(value: string): string {
|
||||
const trimmed = value.trim()
|
||||
if (trimmed.length <= TURN_PROMPT_MAX_CHARS) return trimmed
|
||||
return `${trimmed.slice(0, TURN_PROMPT_MAX_CHARS - 1)}…`
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve `workspaceDir + relPath` to an absolute host path, but only
|
||||
* if the resolved real path lives inside the workspace root. Returns
|
||||
* null on:
|
||||
* - lexical traversal (`..` segments escaping the root),
|
||||
* - symlink escape (a file in the workspace pointing outside it),
|
||||
* - missing files,
|
||||
* - any unreadable path component.
|
||||
*
|
||||
* Exported so the unit test can hit it without a sqlite handle.
|
||||
*/
|
||||
export async function resolveSafeWorkspacePath(
|
||||
workspaceDir: string,
|
||||
relPath: string,
|
||||
): Promise<string | null> {
|
||||
// Lexical containment first — fail fast without touching the FS.
|
||||
const workspaceRoot = resolve(workspaceDir)
|
||||
const lexical = resolve(workspaceRoot, relPath)
|
||||
const lexicalRel = relative(workspaceRoot, lexical)
|
||||
if (
|
||||
lexicalRel === '' ||
|
||||
lexicalRel.startsWith('..') ||
|
||||
lexicalRel.startsWith(`..${sep}`)
|
||||
) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Realpath check — collapses symlinks so a workspace symlink that
|
||||
// points outside the root cannot be downloaded. Falls through to
|
||||
// null if anything errors (file gone, permissions, broken link).
|
||||
try {
|
||||
const [realRoot, realFile] = await Promise.all([
|
||||
realpath(workspaceRoot),
|
||||
realpath(lexical),
|
||||
])
|
||||
const realRel = relative(realRoot, realFile)
|
||||
if (
|
||||
realRel === '' ||
|
||||
realRel.startsWith('..') ||
|
||||
realRel.startsWith(`..${sep}`)
|
||||
) {
|
||||
return null
|
||||
}
|
||||
await stat(realFile)
|
||||
return realFile
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
// Re-export the row type so callers pulling the store don't have to
|
||||
// also import the schema module.
|
||||
export type { ProducedFileRow }
|
||||
@@ -0,0 +1,127 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Workspace walker used by the produced-files diff watcher. Recurses
|
||||
* an OpenClaw agent's workspace directory and yields one
|
||||
* `(workspace-relative path, size, mtime)` triple per file.
|
||||
*
|
||||
* Design choices:
|
||||
*
|
||||
* - **Pure async iteration.** No third-party deps; relies on
|
||||
* `fs.promises.readdir` + `Dirent` so directory traversal is one
|
||||
* syscall per directory.
|
||||
* - **Symlink-aware.** Symlinks themselves aren't followed (they
|
||||
* appear in `Dirent.isSymbolicLink()`); the walker skips them so
|
||||
* an agent can't smuggle host-fs paths into the diff via a
|
||||
* symlink in its workspace.
|
||||
* - **Excludes well-known cruft directories** that no useful agent
|
||||
* output ever lives inside (`node_modules`, `.git`, `.cache`).
|
||||
* These directories are also expensive to traverse, so skipping
|
||||
* them keeps the per-turn snapshot fast.
|
||||
* - **Bounded.** Hard caps on entry count and recursion depth keep
|
||||
* pathological workspaces from stalling the chat-turn finalizer.
|
||||
*/
|
||||
|
||||
import type { Dirent } from 'node:fs'
|
||||
import { readdir, stat } from 'node:fs/promises'
|
||||
import { join, relative, sep } from 'node:path'
|
||||
|
||||
const EXCLUDED_DIRECTORIES = new Set(['node_modules', '.git', '.cache'])
|
||||
|
||||
const MAX_ENTRIES = 50_000
|
||||
const MAX_DEPTH = 16
|
||||
|
||||
export interface WorkspaceFileMetadata {
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
export type WorkspaceFileVisitor = (
|
||||
/** Workspace-relative path (POSIX-style separators). */
|
||||
relativePath: string,
|
||||
metadata: WorkspaceFileMetadata,
|
||||
) => void
|
||||
|
||||
/**
|
||||
* Walk `workspaceDir` recursively, calling `visit` for every regular
|
||||
* file. Returns silently if the directory doesn't exist (a fresh
|
||||
* agent that hasn't produced anything yet shouldn't error here).
|
||||
*/
|
||||
export async function walkWorkspace(
|
||||
workspaceDir: string,
|
||||
visit: WorkspaceFileVisitor,
|
||||
): Promise<void> {
|
||||
let entriesSeen = 0
|
||||
await walk(workspaceDir, workspaceDir, 0, (file) => {
|
||||
entriesSeen += 1
|
||||
if (entriesSeen > MAX_ENTRIES) return false
|
||||
visit(file.relativePath, file.metadata)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
interface VisitedFile {
|
||||
relativePath: string
|
||||
metadata: WorkspaceFileMetadata
|
||||
}
|
||||
|
||||
async function walk(
|
||||
root: string,
|
||||
current: string,
|
||||
depth: number,
|
||||
yieldFile: (file: VisitedFile) => boolean,
|
||||
): Promise<boolean> {
|
||||
if (depth > MAX_DEPTH) return true
|
||||
|
||||
let entries: Dirent[]
|
||||
try {
|
||||
entries = await readdir(current, { withFileTypes: true })
|
||||
} catch {
|
||||
// Workspace dir missing or unreadable — fresh agent that hasn't
|
||||
// written anything yet, or transient permissions issue. Treat as
|
||||
// "no files" rather than throwing.
|
||||
return true
|
||||
}
|
||||
|
||||
for (const entry of entries) {
|
||||
if (EXCLUDED_DIRECTORIES.has(entry.name)) continue
|
||||
const absolute = join(current, entry.name)
|
||||
|
||||
if (entry.isSymbolicLink()) {
|
||||
// Skip symlinks — never follow, never record. Prevents an
|
||||
// agent from smuggling host-fs paths into the diff via a
|
||||
// symlink in its workspace.
|
||||
continue
|
||||
}
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
const keepGoing = await walk(root, absolute, depth + 1, yieldFile)
|
||||
if (!keepGoing) return false
|
||||
continue
|
||||
}
|
||||
|
||||
if (!entry.isFile()) continue
|
||||
|
||||
let stats: Awaited<ReturnType<typeof stat>>
|
||||
try {
|
||||
stats = await stat(absolute)
|
||||
} catch {
|
||||
// Concurrent delete between readdir and stat — skip silently.
|
||||
continue
|
||||
}
|
||||
const relativePath = toPosix(relative(root, absolute))
|
||||
const keepGoing = yieldFile({
|
||||
relativePath,
|
||||
metadata: { size: stats.size, mtimeMs: stats.mtimeMs },
|
||||
})
|
||||
if (!keepGoing) return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
function toPosix(value: string): string {
|
||||
if (sep === '/') return value
|
||||
return value.split(sep).join('/')
|
||||
}
|
||||
@@ -364,6 +364,31 @@ export class Browser {
|
||||
}
|
||||
}
|
||||
|
||||
async resolveFocusedElement(pageId: number): Promise<number | null> {
|
||||
const session = await this.resolveSession(pageId)
|
||||
try {
|
||||
const result = await session.Runtime.evaluate({
|
||||
expression: `(() => {
|
||||
let element = document.activeElement;
|
||||
while (element?.shadowRoot?.activeElement) {
|
||||
element = element.shadowRoot.activeElement;
|
||||
}
|
||||
return element instanceof Element ? element : null;
|
||||
})()`,
|
||||
})
|
||||
const objectId = result.result?.objectId
|
||||
if (!objectId) return null
|
||||
|
||||
const desc = await session.DOM.describeNode({ objectId })
|
||||
const backendNodeId = desc.node?.backendNodeId
|
||||
if (!backendNodeId) return null
|
||||
|
||||
return await this.resolveActionableElement(pageId, backendNodeId)
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
async resolveElementProperties(
|
||||
pageId: number,
|
||||
backendNodeId: number,
|
||||
@@ -911,6 +936,16 @@ export class Browser {
|
||||
}
|
||||
}
|
||||
|
||||
async viewportSize(page: number): Promise<{ width: number; height: number }> {
|
||||
const session = await this.resolveSession(page)
|
||||
const metrics = await session.Page.getLayoutMetrics()
|
||||
const viewport = metrics.cssVisualViewport ?? metrics.cssLayoutViewport
|
||||
return {
|
||||
width: viewport.clientWidth,
|
||||
height: viewport.clientHeight,
|
||||
}
|
||||
}
|
||||
|
||||
async evaluate(
|
||||
page: number,
|
||||
expression: string,
|
||||
@@ -1099,6 +1134,11 @@ export class Browser {
|
||||
await keyboard.typeText(session, text)
|
||||
}
|
||||
|
||||
async typeText(page: number, text: string): Promise<void> {
|
||||
const session = await this.resolveSession(page)
|
||||
await keyboard.typeText(session, text)
|
||||
}
|
||||
|
||||
async dragAt(
|
||||
page: number,
|
||||
from: { x: number; y: number },
|
||||
@@ -1212,68 +1252,16 @@ export class Browser {
|
||||
|
||||
if (deltaX === 0 && deltaY === 0) return
|
||||
|
||||
let x: number
|
||||
let y: number
|
||||
if (element !== undefined) {
|
||||
const center = await elements.getElementCenter(session, element)
|
||||
x = center.x
|
||||
y = center.y
|
||||
} else {
|
||||
const metrics = await session.Page.getLayoutMetrics()
|
||||
x = metrics.layoutViewport.clientWidth / 2
|
||||
y = metrics.layoutViewport.clientHeight / 2
|
||||
await mouse.dispatchScroll(session, center.x, center.y, deltaX, deltaY)
|
||||
return
|
||||
}
|
||||
|
||||
const beforeWindowPosition =
|
||||
element === undefined
|
||||
? await this.getWindowScrollPosition(session)
|
||||
: undefined
|
||||
|
||||
await mouse.dispatchScroll(session, x, y, deltaX, deltaY)
|
||||
|
||||
if (beforeWindowPosition === undefined) return
|
||||
|
||||
const afterWindowPosition = await this.getWindowScrollPosition(session)
|
||||
const moved = this.didScrollInExpectedDirection(
|
||||
beforeWindowPosition,
|
||||
afterWindowPosition,
|
||||
deltaX,
|
||||
deltaY,
|
||||
)
|
||||
if (moved) return
|
||||
|
||||
await this.fallbackWindowScroll(session, deltaX, deltaY)
|
||||
await this.scrollWindow(session, deltaX, deltaY)
|
||||
}
|
||||
|
||||
private async getWindowScrollPosition(
|
||||
session: ProtocolApi,
|
||||
): Promise<{ x: number; y: number }> {
|
||||
const result = await session.Runtime.evaluate({
|
||||
expression:
|
||||
'({ x: window.scrollX ?? window.pageXOffset ?? 0, y: window.scrollY ?? window.pageYOffset ?? 0 })',
|
||||
returnByValue: true,
|
||||
})
|
||||
const value = (result.result?.value ?? {}) as { x?: number; y?: number }
|
||||
return {
|
||||
x: typeof value.x === 'number' ? value.x : 0,
|
||||
y: typeof value.y === 'number' ? value.y : 0,
|
||||
}
|
||||
}
|
||||
|
||||
private didScrollInExpectedDirection(
|
||||
before: { x: number; y: number },
|
||||
after: { x: number; y: number },
|
||||
deltaX: number,
|
||||
deltaY: number,
|
||||
): boolean {
|
||||
if (deltaX > 0 && after.x > before.x) return true
|
||||
if (deltaX < 0 && after.x < before.x) return true
|
||||
if (deltaY > 0 && after.y > before.y) return true
|
||||
if (deltaY < 0 && after.y < before.y) return true
|
||||
return false
|
||||
}
|
||||
|
||||
private async fallbackWindowScroll(
|
||||
private async scrollWindow(
|
||||
session: ProtocolApi,
|
||||
deltaX: number,
|
||||
deltaY: number,
|
||||
|
||||
@@ -98,12 +98,6 @@ interface PreparedRuntimeContext {
|
||||
openclawSessionKey: string | null
|
||||
}
|
||||
|
||||
const BROWSEROS_ACP_AGENT_INSTRUCTIONS = `<role>
|
||||
You are BrowserOS - a browser agent with full control of a Chromium browser through the BrowserOS MCP server.
|
||||
|
||||
Use the BrowserOS MCP server for all browser tasks, including browsing the web, interacting with pages, inspecting browser state, and managing tabs, windows, bookmarks, and history.
|
||||
</role>`
|
||||
|
||||
export class AcpxRuntime implements AgentRuntime {
|
||||
private readonly defaultCwd: string | null
|
||||
private readonly browserosDir: string
|
||||
@@ -509,14 +503,16 @@ export function unwrapBrowserosAcpUserMessage(raw: string): string {
|
||||
}
|
||||
|
||||
function stripOuterRoleEnvelope(value: string): string {
|
||||
const prefix = `${BROWSEROS_ACP_AGENT_INSTRUCTIONS}
|
||||
|
||||
<user_request>
|
||||
`
|
||||
const suffix = `
|
||||
</user_request>`
|
||||
if (!value.startsWith(prefix) || !value.endsWith(suffix)) return value
|
||||
return value.slice(prefix.length, -suffix.length)
|
||||
// Any `<role>…</role>\n\n<user_request>\n…\n</user_request>` envelope.
|
||||
// Adapter-agnostic so both the BrowserOS multi-line role block and the
|
||||
// openclaw single-line role block get unwrapped. TKT-774's exact-prefix
|
||||
// match only covered the BrowserOS form, so the openclaw envelope
|
||||
// (added when openclaw moved to its own prepare step) was landing
|
||||
// unwrapped in history payloads.
|
||||
const match = value.match(
|
||||
/^<role\b[^>]*>[\s\S]*?<\/role>\n\n<user_request>\n([\s\S]*?)\n<\/user_request>$/,
|
||||
)
|
||||
return match ? match[1] : value
|
||||
}
|
||||
|
||||
function stripOuterRuntimeEnvelope(value: string): string {
|
||||
|
||||
@@ -27,6 +27,20 @@ export interface AgentHistoryPage {
|
||||
items: AgentHistoryEntry[]
|
||||
}
|
||||
|
||||
/**
|
||||
* One file the harness attributed to the assistant turn that just
|
||||
* finished. Emitted as part of a `produced_files` event before the
|
||||
* terminal `done` so the inline artifact card renders alongside the
|
||||
* streamed text the user just watched complete.
|
||||
*/
|
||||
export interface ProducedFileEventEntry {
|
||||
id: string
|
||||
/** Workspace-relative POSIX path. */
|
||||
path: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
export type AgentStreamEvent =
|
||||
| {
|
||||
type: 'text_delta'
|
||||
@@ -47,6 +61,10 @@ export type AgentStreamEvent =
|
||||
text: string
|
||||
rawType?: string
|
||||
}
|
||||
| {
|
||||
type: 'produced_files'
|
||||
files: ProducedFileEventEntry[]
|
||||
}
|
||||
| {
|
||||
type: 'done'
|
||||
text?: string
|
||||
|
||||
@@ -44,11 +44,44 @@ function createGoogleModel(config: ResolvedLLMConfig): LanguageModel {
|
||||
return createGoogleGenerativeAI({ apiKey: config.apiKey })(config.model)
|
||||
}
|
||||
|
||||
function buildOpenRouterExtraBody(
|
||||
config: ResolvedLLMConfig,
|
||||
): Record<string, unknown> {
|
||||
const body: Record<string, unknown> = {}
|
||||
|
||||
if (config.reasoning?.enabled !== undefined) {
|
||||
body.reasoning = { enabled: config.reasoning.enabled }
|
||||
}
|
||||
if (config.verbosity !== undefined) body.verbosity = config.verbosity
|
||||
if (config.providerRouting !== undefined) {
|
||||
body.provider = {
|
||||
...(config.providerRouting.order !== undefined && {
|
||||
order: config.providerRouting.order,
|
||||
}),
|
||||
...(config.providerRouting.only !== undefined && {
|
||||
only: config.providerRouting.only,
|
||||
}),
|
||||
...(config.providerRouting.ignore !== undefined && {
|
||||
ignore: config.providerRouting.ignore,
|
||||
}),
|
||||
...(config.providerRouting.allowFallbacks !== undefined && {
|
||||
allow_fallbacks: config.providerRouting.allowFallbacks,
|
||||
}),
|
||||
...(config.providerRouting.requireParameters !== undefined && {
|
||||
require_parameters: config.providerRouting.requireParameters,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
function createOpenRouterModel(config: ResolvedLLMConfig): LanguageModel {
|
||||
if (!config.apiKey) throw new Error('OpenRouter provider requires apiKey')
|
||||
const extraBody = buildOpenRouterExtraBody(config)
|
||||
return createOpenRouter({
|
||||
apiKey: config.apiKey,
|
||||
extraBody: { reasoning: {} },
|
||||
...(Object.keys(extraBody).length > 0 ? { extraBody } : {}),
|
||||
fetch: createOpenRouterCompatibleFetch(),
|
||||
})(config.model)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
CREATE TABLE `produced_files` (
|
||||
`id` text PRIMARY KEY NOT NULL,
|
||||
`agent_definition_id` text NOT NULL,
|
||||
`session_key` text NOT NULL,
|
||||
`turn_id` text NOT NULL,
|
||||
`turn_prompt` text NOT NULL,
|
||||
`path` text NOT NULL,
|
||||
`size` integer NOT NULL,
|
||||
`mtime_ms` integer NOT NULL,
|
||||
`created_at` integer NOT NULL,
|
||||
`detected_by` text DEFAULT 'diff' NOT NULL,
|
||||
FOREIGN KEY (`agent_definition_id`) REFERENCES `agent_definitions`(`id`) ON UPDATE no action ON DELETE cascade
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE UNIQUE INDEX `produced_files_agent_path_unique` ON `produced_files` (`agent_definition_id`,`path`);--> statement-breakpoint
|
||||
CREATE INDEX `produced_files_agent_created_idx` ON `produced_files` (`agent_definition_id`,`created_at`);--> statement-breakpoint
|
||||
CREATE INDEX `produced_files_turn_idx` ON `produced_files` (`turn_id`);--> statement-breakpoint
|
||||
CREATE INDEX `produced_files_session_idx` ON `produced_files` (`session_key`);
|
||||
@@ -0,0 +1,338 @@
|
||||
{
|
||||
"version": "6",
|
||||
"dialect": "sqlite",
|
||||
"id": "a8560df5-6cbe-46c2-b7df-ef0d09d232bf",
|
||||
"prevId": "6be24444-91aa-492e-96e5-d84c0f020468",
|
||||
"tables": {
|
||||
"agent_definitions": {
|
||||
"name": "agent_definitions",
|
||||
"columns": {
|
||||
"id": {
|
||||
"name": "id",
|
||||
"type": "text",
|
||||
"primaryKey": true,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"name": {
|
||||
"name": "name",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"adapter": {
|
||||
"name": "adapter",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"model_id": {
|
||||
"name": "model_id",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"reasoning_effort": {
|
||||
"name": "reasoning_effort",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"permission_mode": {
|
||||
"name": "permission_mode",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false,
|
||||
"default": "'approve-all'"
|
||||
},
|
||||
"session_key": {
|
||||
"name": "session_key",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"pinned": {
|
||||
"name": "pinned",
|
||||
"type": "integer",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false,
|
||||
"default": false
|
||||
},
|
||||
"adapter_config_json": {
|
||||
"name": "adapter_config_json",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"created_at": {
|
||||
"name": "created_at",
|
||||
"type": "integer",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"updated_at": {
|
||||
"name": "updated_at",
|
||||
"type": "integer",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
}
|
||||
},
|
||||
"indexes": {
|
||||
"agent_definitions_session_key_unique": {
|
||||
"name": "agent_definitions_session_key_unique",
|
||||
"columns": [
|
||||
"session_key"
|
||||
],
|
||||
"isUnique": true
|
||||
},
|
||||
"agent_definitions_updated_at_idx": {
|
||||
"name": "agent_definitions_updated_at_idx",
|
||||
"columns": [
|
||||
"updated_at"
|
||||
],
|
||||
"isUnique": false
|
||||
},
|
||||
"agent_definitions_adapter_updated_at_idx": {
|
||||
"name": "agent_definitions_adapter_updated_at_idx",
|
||||
"columns": [
|
||||
"adapter",
|
||||
"updated_at"
|
||||
],
|
||||
"isUnique": false
|
||||
}
|
||||
},
|
||||
"foreignKeys": {},
|
||||
"compositePrimaryKeys": {},
|
||||
"uniqueConstraints": {},
|
||||
"checkConstraints": {}
|
||||
},
|
||||
"oauth_tokens": {
|
||||
"name": "oauth_tokens",
|
||||
"columns": {
|
||||
"browseros_id": {
|
||||
"name": "browseros_id",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"provider": {
|
||||
"name": "provider",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"access_token": {
|
||||
"name": "access_token",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"refresh_token": {
|
||||
"name": "refresh_token",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"expires_at": {
|
||||
"name": "expires_at",
|
||||
"type": "integer",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"email": {
|
||||
"name": "email",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"account_id": {
|
||||
"name": "account_id",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"updated_at": {
|
||||
"name": "updated_at",
|
||||
"type": "integer",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
}
|
||||
},
|
||||
"indexes": {
|
||||
"oauth_tokens_browseros_id_idx": {
|
||||
"name": "oauth_tokens_browseros_id_idx",
|
||||
"columns": [
|
||||
"browseros_id"
|
||||
],
|
||||
"isUnique": false
|
||||
}
|
||||
},
|
||||
"foreignKeys": {},
|
||||
"compositePrimaryKeys": {
|
||||
"oauth_tokens_browseros_id_provider_pk": {
|
||||
"columns": [
|
||||
"browseros_id",
|
||||
"provider"
|
||||
],
|
||||
"name": "oauth_tokens_browseros_id_provider_pk"
|
||||
}
|
||||
},
|
||||
"uniqueConstraints": {},
|
||||
"checkConstraints": {}
|
||||
},
|
||||
"produced_files": {
|
||||
"name": "produced_files",
|
||||
"columns": {
|
||||
"id": {
|
||||
"name": "id",
|
||||
"type": "text",
|
||||
"primaryKey": true,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"agent_definition_id": {
|
||||
"name": "agent_definition_id",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"session_key": {
|
||||
"name": "session_key",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"turn_id": {
|
||||
"name": "turn_id",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"turn_prompt": {
|
||||
"name": "turn_prompt",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"path": {
|
||||
"name": "path",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"size": {
|
||||
"name": "size",
|
||||
"type": "integer",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"mtime_ms": {
|
||||
"name": "mtime_ms",
|
||||
"type": "integer",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"created_at": {
|
||||
"name": "created_at",
|
||||
"type": "integer",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"detected_by": {
|
||||
"name": "detected_by",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false,
|
||||
"default": "'diff'"
|
||||
}
|
||||
},
|
||||
"indexes": {
|
||||
"produced_files_agent_path_unique": {
|
||||
"name": "produced_files_agent_path_unique",
|
||||
"columns": [
|
||||
"agent_definition_id",
|
||||
"path"
|
||||
],
|
||||
"isUnique": true
|
||||
},
|
||||
"produced_files_agent_created_idx": {
|
||||
"name": "produced_files_agent_created_idx",
|
||||
"columns": [
|
||||
"agent_definition_id",
|
||||
"created_at"
|
||||
],
|
||||
"isUnique": false
|
||||
},
|
||||
"produced_files_turn_idx": {
|
||||
"name": "produced_files_turn_idx",
|
||||
"columns": [
|
||||
"turn_id"
|
||||
],
|
||||
"isUnique": false
|
||||
},
|
||||
"produced_files_session_idx": {
|
||||
"name": "produced_files_session_idx",
|
||||
"columns": [
|
||||
"session_key"
|
||||
],
|
||||
"isUnique": false
|
||||
}
|
||||
},
|
||||
"foreignKeys": {
|
||||
"produced_files_agent_definition_id_agent_definitions_id_fk": {
|
||||
"name": "produced_files_agent_definition_id_agent_definitions_id_fk",
|
||||
"tableFrom": "produced_files",
|
||||
"tableTo": "agent_definitions",
|
||||
"columnsFrom": [
|
||||
"agent_definition_id"
|
||||
],
|
||||
"columnsTo": [
|
||||
"id"
|
||||
],
|
||||
"onDelete": "cascade",
|
||||
"onUpdate": "no action"
|
||||
}
|
||||
},
|
||||
"compositePrimaryKeys": {},
|
||||
"uniqueConstraints": {},
|
||||
"checkConstraints": {}
|
||||
}
|
||||
},
|
||||
"views": {},
|
||||
"enums": {},
|
||||
"_meta": {
|
||||
"schemas": {},
|
||||
"tables": {},
|
||||
"columns": {}
|
||||
},
|
||||
"internal": {
|
||||
"indexes": {}
|
||||
}
|
||||
}
|
||||
@@ -15,6 +15,13 @@
|
||||
"when": 1777752799806,
|
||||
"tag": "0001_lazy_orphan",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 2,
|
||||
"version": "6",
|
||||
"when": 1777902205667,
|
||||
"tag": "0002_chemical_whirlwind",
|
||||
"breakpoints": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -6,3 +6,4 @@
|
||||
|
||||
export * from './agents'
|
||||
export * from './oauth'
|
||||
export * from './produced-files'
|
||||
|
||||
@@ -0,0 +1,93 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import type { InferInsertModel, InferSelectModel } from 'drizzle-orm'
|
||||
import {
|
||||
index,
|
||||
integer,
|
||||
sqliteTable,
|
||||
text,
|
||||
uniqueIndex,
|
||||
} from 'drizzle-orm/sqlite-core'
|
||||
import { agentDefinitions } from './agents'
|
||||
|
||||
/**
|
||||
* Files an OpenClaw agent produced as part of a chat turn. Populated by
|
||||
* a per-turn workspace diff: snapshot the agent's CWD before
|
||||
* `chatStream(...)` runs, re-scan after the SSE `done` event fires,
|
||||
* write rows for any new or modified path. The rail UI groups by
|
||||
* `turn_id` and the inline artifact card renders one row per file.
|
||||
*
|
||||
* Schema is intentionally adapter-agnostic — V1 only enables the
|
||||
* watcher for the openclaw adapter, but V2 can plug Claude / Codex
|
||||
* into the same table without migrating.
|
||||
*/
|
||||
export const producedFiles = sqliteTable(
|
||||
'produced_files',
|
||||
{
|
||||
/** Stable id; opaque file handle in download/preview URLs. */
|
||||
id: text('id').primaryKey(),
|
||||
|
||||
/** FK → agent_definitions.id; CASCADE so agent deletion sweeps. */
|
||||
agentDefinitionId: text('agent_definition_id')
|
||||
.notNull()
|
||||
.references(() => agentDefinitions.id, { onDelete: 'cascade' }),
|
||||
|
||||
/** OpenClaw session that owns this turn (e.g. `session-abc`). */
|
||||
sessionKey: text('session_key').notNull(),
|
||||
|
||||
/** Identifier for the assistant turn that produced the file. */
|
||||
turnId: text('turn_id').notNull(),
|
||||
|
||||
/**
|
||||
* The user prompt that initiated this turn — denormalised so the
|
||||
* rail's "group by prompt" header doesn't have to join the JSONL
|
||||
* log. Capped at 280 chars in code; the column is unbounded.
|
||||
*/
|
||||
turnPrompt: text('turn_prompt').notNull(),
|
||||
|
||||
/** Workspace-relative path (e.g. `reports/q1.pdf`). */
|
||||
path: text('path').notNull(),
|
||||
|
||||
size: integer('size').notNull(),
|
||||
|
||||
/** mtime in ms — used to detect re-modifications. */
|
||||
mtimeMs: integer('mtime_ms').notNull(),
|
||||
|
||||
/** Server clock when our watcher first saw it. */
|
||||
createdAt: integer('created_at').notNull(),
|
||||
|
||||
/**
|
||||
* `'diff'` for the V1 per-turn workspace diff watcher;
|
||||
* `'tool'` reserved for the future tool-event parsing layer.
|
||||
*/
|
||||
detectedBy: text('detected_by', { enum: ['diff', 'tool'] })
|
||||
.notNull()
|
||||
.default('diff'),
|
||||
},
|
||||
(table) => [
|
||||
// One row per (agent, path) pair — re-modifications update in place,
|
||||
// so a tool that overwrites `report.pdf` doesn't accumulate
|
||||
// duplicate rows. The most recent turn that touched the file owns
|
||||
// the row.
|
||||
uniqueIndex('produced_files_agent_path_unique').on(
|
||||
table.agentDefinitionId,
|
||||
table.path,
|
||||
),
|
||||
// Outputs-rail query: latest files per agent.
|
||||
index('produced_files_agent_created_idx').on(
|
||||
table.agentDefinitionId,
|
||||
table.createdAt,
|
||||
),
|
||||
// Inline-card query: by turn.
|
||||
index('produced_files_turn_idx').on(table.turnId),
|
||||
// Cleanup hook: by session (when a session is deleted later).
|
||||
index('produced_files_session_idx').on(table.sessionKey),
|
||||
],
|
||||
)
|
||||
|
||||
export type ProducedFileRow = InferSelectModel<typeof producedFiles>
|
||||
export type NewProducedFileRow = InferInsertModel<typeof producedFiles>
|
||||
@@ -1,5 +1,80 @@
|
||||
import { APICallError } from '@ai-sdk/provider'
|
||||
|
||||
function isRecord(value: unknown): value is Record<string, unknown> {
|
||||
return typeof value === 'object' && value !== null && !Array.isArray(value)
|
||||
}
|
||||
|
||||
function getOpenRouterErrorMessage(parsed: unknown, fallback: string): string {
|
||||
if (!isRecord(parsed) || !isRecord(parsed.error)) return fallback
|
||||
|
||||
let message =
|
||||
typeof parsed.error.message === 'string' ? parsed.error.message : fallback
|
||||
|
||||
if (parsed.error.code !== undefined) {
|
||||
message = `[${String(parsed.error.code)}] ${message}`
|
||||
}
|
||||
|
||||
const metadata = isRecord(parsed.error.metadata)
|
||||
? parsed.error.metadata
|
||||
: undefined
|
||||
if (metadata?.raw !== undefined) {
|
||||
message += ` (${JSON.stringify(metadata.raw)})`
|
||||
}
|
||||
|
||||
return message
|
||||
}
|
||||
|
||||
function getOpenRouterErrorStatus(parsed: unknown, fallback: number): number {
|
||||
if (!isRecord(parsed) || !isRecord(parsed.error)) return fallback
|
||||
return typeof parsed.error.code === 'number' ? parsed.error.code : fallback
|
||||
}
|
||||
|
||||
function sanitizeReasoningReplay(
|
||||
options?: RequestInit,
|
||||
): RequestInit | undefined {
|
||||
if (typeof options?.body !== 'string') return options
|
||||
|
||||
let body: unknown
|
||||
try {
|
||||
body = JSON.parse(options.body)
|
||||
} catch {
|
||||
return options
|
||||
}
|
||||
|
||||
if (!isRecord(body) || !Array.isArray(body.messages)) return options
|
||||
|
||||
let changed = false
|
||||
const messages = body.messages.map((message) => {
|
||||
if (
|
||||
!isRecord(message) ||
|
||||
message.role !== 'assistant' ||
|
||||
!Array.isArray(message.reasoning_details) ||
|
||||
message.reasoning_details.length === 0 ||
|
||||
(!('reasoning' in message) && !('reasoning_content' in message))
|
||||
) {
|
||||
return message
|
||||
}
|
||||
|
||||
const {
|
||||
reasoning: _reasoning,
|
||||
reasoning_content: _reasoningContent,
|
||||
...rest
|
||||
} = message
|
||||
changed = true
|
||||
return rest
|
||||
})
|
||||
|
||||
if (!changed) return options
|
||||
|
||||
return {
|
||||
...options,
|
||||
body: JSON.stringify({
|
||||
...body,
|
||||
messages,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a fetch function that extracts detailed error messages from OpenRouter-style APIs.
|
||||
*
|
||||
@@ -13,25 +88,24 @@ import { APICallError } from '@ai-sdk/provider'
|
||||
*/
|
||||
export function createOpenRouterCompatibleFetch(): typeof fetch {
|
||||
return (async (url: RequestInfo | URL, options?: RequestInit) => {
|
||||
const response = await globalThis.fetch(url, options)
|
||||
const response = await globalThis.fetch(
|
||||
url,
|
||||
sanitizeReasoningReplay(options),
|
||||
)
|
||||
let responseBody: string | undefined
|
||||
let parsedResponseBody: unknown
|
||||
|
||||
if (!response.ok) {
|
||||
const statusCode = response.status
|
||||
let errorMessage = `HTTP ${statusCode}: ${response.statusText}`
|
||||
let responseBody: string | undefined
|
||||
|
||||
try {
|
||||
responseBody = await response.clone().text()
|
||||
const parsed = JSON.parse(responseBody)
|
||||
if (parsed.error?.message) {
|
||||
errorMessage = parsed.error.message
|
||||
if (parsed.error.code) {
|
||||
errorMessage = `[${parsed.error.code}] ${errorMessage}`
|
||||
}
|
||||
if (parsed.error.metadata?.raw) {
|
||||
errorMessage += ` (${JSON.stringify(parsed.error.metadata.raw)})`
|
||||
}
|
||||
}
|
||||
parsedResponseBody = JSON.parse(responseBody)
|
||||
errorMessage = getOpenRouterErrorMessage(
|
||||
parsedResponseBody,
|
||||
errorMessage,
|
||||
)
|
||||
} catch {
|
||||
// Keep default error message if parsing fails
|
||||
}
|
||||
@@ -45,6 +119,28 @@ export function createOpenRouterCompatibleFetch(): typeof fetch {
|
||||
})
|
||||
}
|
||||
|
||||
if (response.headers.get('content-type')?.includes('application/json')) {
|
||||
try {
|
||||
responseBody = await response.clone().text()
|
||||
parsedResponseBody = JSON.parse(responseBody)
|
||||
} catch {
|
||||
parsedResponseBody = undefined
|
||||
}
|
||||
|
||||
if (isRecord(parsedResponseBody) && isRecord(parsedResponseBody.error)) {
|
||||
throw new APICallError({
|
||||
message: getOpenRouterErrorMessage(
|
||||
parsedResponseBody,
|
||||
'Provider returned error',
|
||||
),
|
||||
url: typeof url === 'string' ? url : url.toString(),
|
||||
requestBodyValues: {},
|
||||
statusCode: getOpenRouterErrorStatus(parsedResponseBody, 400),
|
||||
responseBody,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return response
|
||||
}) as typeof fetch
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ const GUARDED_TOOLS = new Set([
|
||||
'uncheck',
|
||||
'select_option',
|
||||
'press_key',
|
||||
'type_text',
|
||||
'upload_file',
|
||||
])
|
||||
|
||||
@@ -39,6 +40,9 @@ async function resolveTargetElementId(
|
||||
if (toolName === 'drag' && typeof args.sourceElement === 'number') {
|
||||
return args.sourceElement
|
||||
}
|
||||
if (toolName === 'type_text') {
|
||||
return (await browser.resolveFocusedElement(pageId)) ?? undefined
|
||||
}
|
||||
|
||||
if (typeof args.x === 'number' && typeof args.y === 'number') {
|
||||
return (
|
||||
|
||||
@@ -29,6 +29,7 @@ export interface ToolDirectories {
|
||||
export interface ToolSessionContext {
|
||||
origin?: 'sidepanel' | 'newtab'
|
||||
originPageId?: number
|
||||
suppressSnapshotOutputs?: boolean
|
||||
}
|
||||
|
||||
export type ToolContext = {
|
||||
@@ -135,7 +136,9 @@ export async function executeTool(
|
||||
response.error(`Internal error in ${tool.name}: ${message}`)
|
||||
}
|
||||
|
||||
const result = await response.build(ctx.browser)
|
||||
const result = await response.build(ctx.browser, {
|
||||
suppressSnapshots: ctx.session?.suppressSnapshotOutputs,
|
||||
})
|
||||
|
||||
const pageId = (args as Record<string, unknown>).page
|
||||
if (typeof pageId === 'number') {
|
||||
|
||||
@@ -0,0 +1,108 @@
|
||||
import type { ToolContext } from './framework'
|
||||
import {
|
||||
getPngDimensionsFromBase64,
|
||||
requestMolmoPoint,
|
||||
} from './molmo-point-client'
|
||||
|
||||
const LOG_TEXT_MAX_CHARS = 500
|
||||
|
||||
function truncateForLog(value: string | undefined): string | undefined {
|
||||
if (!value) return value
|
||||
if (value.length <= LOG_TEXT_MAX_CHARS) return value
|
||||
return `${value.slice(0, LOG_TEXT_MAX_CHARS)}... (+${value.length - LOG_TEXT_MAX_CHARS} chars)`
|
||||
}
|
||||
|
||||
export interface GuiPointResult {
|
||||
x: number
|
||||
y: number
|
||||
hitElement: GuiHitElement | null
|
||||
log: Record<string, unknown>
|
||||
}
|
||||
|
||||
export interface GuiHitElement {
|
||||
tagName: string
|
||||
role?: string
|
||||
ariaLabel?: string
|
||||
labelText?: string
|
||||
textContent?: string
|
||||
}
|
||||
|
||||
function summarizeHitElement(
|
||||
hitElement: Awaited<
|
||||
ReturnType<ToolContext['browser']['resolveElementProperties']>
|
||||
>,
|
||||
): GuiHitElement | null {
|
||||
if (!hitElement) return null
|
||||
|
||||
return {
|
||||
tagName: hitElement.tagName,
|
||||
role: hitElement.role,
|
||||
ariaLabel: truncateForLog(hitElement.ariaLabel),
|
||||
labelText: truncateForLog(hitElement.labelText),
|
||||
textContent: truncateForLog(hitElement.textContent),
|
||||
}
|
||||
}
|
||||
|
||||
export async function resolveGuiPoint(
|
||||
ctx: ToolContext,
|
||||
page: number,
|
||||
prompt: string,
|
||||
): Promise<GuiPointResult> {
|
||||
const screenshot = await ctx.browser.screenshot(page, {
|
||||
format: 'png',
|
||||
fullPage: false,
|
||||
})
|
||||
const point = await requestMolmoPoint({
|
||||
instruction: prompt,
|
||||
imageB64: screenshot.data,
|
||||
})
|
||||
|
||||
const dimensions = getPngDimensionsFromBase64(screenshot.data)
|
||||
const viewport = await ctx.browser.viewportSize(page).catch(() => null)
|
||||
const scaleX =
|
||||
dimensions && viewport?.width
|
||||
? dimensions.width / viewport.width
|
||||
: screenshot.devicePixelRatio
|
||||
const scaleY =
|
||||
dimensions && viewport?.height
|
||||
? dimensions.height / viewport.height
|
||||
: screenshot.devicePixelRatio
|
||||
const x = point.x / (scaleX || 1)
|
||||
const y = point.y / (scaleY || 1)
|
||||
const pageInfo = await ctx.browser.refreshPageInfo(page).catch(() => null)
|
||||
const hitElementId = await ctx.browser
|
||||
.resolveElementAtPoint(page, x, y)
|
||||
.catch(() => null)
|
||||
const hitElement =
|
||||
hitElementId !== null
|
||||
? await ctx.browser
|
||||
.resolveElementProperties(page, hitElementId)
|
||||
.catch(() => null)
|
||||
: null
|
||||
const hitElementSummary = summarizeHitElement(hitElement)
|
||||
|
||||
return {
|
||||
x,
|
||||
y,
|
||||
hitElement: hitElementSummary,
|
||||
log: {
|
||||
page,
|
||||
pageUrl: truncateForLog(pageInfo?.url),
|
||||
pageTitle: truncateForLog(pageInfo?.title),
|
||||
prompt: truncateForLog(prompt),
|
||||
promptLength: prompt.length,
|
||||
promptTruncated: prompt.length > LOG_TEXT_MAX_CHARS,
|
||||
modelPoint: point,
|
||||
resolvedPoint: { x, y },
|
||||
scale: { x: scaleX, y: scaleY },
|
||||
screenshot: {
|
||||
width: dimensions?.width,
|
||||
height: dimensions?.height,
|
||||
devicePixelRatio: screenshot.devicePixelRatio,
|
||||
},
|
||||
viewport,
|
||||
hitElementId,
|
||||
hitElement: hitElementSummary,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,88 @@
|
||||
import { z } from 'zod'
|
||||
import { defineToolWithCategory } from './framework'
|
||||
import { logger } from '../lib/logger'
|
||||
import { defineToolWithCategory, type ToolContext } from './framework'
|
||||
import { type GuiHitElement, resolveGuiPoint } from './gui-click-resolver'
|
||||
import type { ToolResponse } from './response'
|
||||
|
||||
const pageParam = z.number().describe('Page ID (from list_pages)')
|
||||
const defineInputTool = defineToolWithCategory('input')
|
||||
const elementParam = z
|
||||
.number()
|
||||
.describe('Element ID from snapshot (the number in [N])')
|
||||
const guiHitElementOutput = z
|
||||
.object({
|
||||
tagName: z.string(),
|
||||
role: z.string().optional(),
|
||||
ariaLabel: z.string().optional(),
|
||||
labelText: z.string().optional(),
|
||||
textContent: z.string().optional(),
|
||||
})
|
||||
.nullable()
|
||||
|
||||
function quoteForAgent(value: string): string {
|
||||
return JSON.stringify(value)
|
||||
}
|
||||
|
||||
function formatHitElementForAgent(hitElement: GuiHitElement | null): string {
|
||||
if (!hitElement) {
|
||||
return 'The click was successful, but no hit element could be resolved at the click point.'
|
||||
}
|
||||
|
||||
const details = [`tagName=${quoteForAgent(hitElement.tagName)}`]
|
||||
if (hitElement.role) details.push(`role=${quoteForAgent(hitElement.role)}`)
|
||||
if (hitElement.ariaLabel) {
|
||||
details.push(`ariaLabel=${quoteForAgent(hitElement.ariaLabel)}`)
|
||||
}
|
||||
if (hitElement.labelText) {
|
||||
details.push(`labelText=${quoteForAgent(hitElement.labelText)}`)
|
||||
}
|
||||
if (hitElement.textContent) {
|
||||
details.push(`textContent=${quoteForAgent(hitElement.textContent)}`)
|
||||
}
|
||||
|
||||
return `The click was successful and hit the element: ${details.join(', ')}.`
|
||||
}
|
||||
|
||||
async function enforceAcl(
|
||||
toolName: string,
|
||||
args: Record<string, unknown>,
|
||||
ctx: ToolContext,
|
||||
response: ToolResponse,
|
||||
): Promise<boolean> {
|
||||
if (!ctx.aclRules?.length) return false
|
||||
|
||||
const { checkAcl } = await import('./acl/acl-guard')
|
||||
const check = await checkAcl(toolName, args, ctx.browser, ctx.aclRules)
|
||||
if (!check.blocked) return false
|
||||
|
||||
const desc =
|
||||
check.rule?.description ??
|
||||
check.rule?.textMatch ??
|
||||
check.rule?.sitePattern ??
|
||||
'ACL rule'
|
||||
if (check.pageId !== undefined && check.elementId !== undefined) {
|
||||
await ctx.browser.highlightBlockedElement(
|
||||
check.pageId,
|
||||
check.elementId,
|
||||
desc,
|
||||
)
|
||||
}
|
||||
response.error(
|
||||
`Action blocked by ACL rule: "${desc}". The element on this page is restricted. Choose a different action or skip this step.`,
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
export const click = defineInputTool({
|
||||
name: 'click',
|
||||
description: 'Click an element by its ID from the last snapshot',
|
||||
description:
|
||||
'Click a visible page target using the GUI click model. Provide a concise visual prompt for what to click.',
|
||||
input: z.object({
|
||||
page: pageParam,
|
||||
element: elementParam,
|
||||
prompt: z
|
||||
.string()
|
||||
.min(1)
|
||||
.describe('Visual click instruction, e.g. "click the search box"'),
|
||||
button: z
|
||||
.enum(['left', 'right', 'middle'])
|
||||
.default('left')
|
||||
@@ -25,27 +95,49 @@ export const click = defineInputTool({
|
||||
output: z.object({
|
||||
action: z.literal('click'),
|
||||
page: z.number(),
|
||||
element: z.number(),
|
||||
prompt: z.string(),
|
||||
button: z.enum(['left', 'right', 'middle']),
|
||||
clickCount: z.number(),
|
||||
x: z.number(),
|
||||
y: z.number(),
|
||||
hitElement: guiHitElementOutput,
|
||||
guiPointDebug: z.record(z.unknown()).optional(),
|
||||
}),
|
||||
handler: async (args, ctx, response) => {
|
||||
const coords = await ctx.browser.click(args.page, args.element, {
|
||||
const { x, y, hitElement, log } = await resolveGuiPoint(
|
||||
ctx,
|
||||
args.page,
|
||||
args.prompt,
|
||||
)
|
||||
const clickLog = {
|
||||
...log,
|
||||
clickPoint: { x, y },
|
||||
button: args.button,
|
||||
clickCount: args.clickCount,
|
||||
}
|
||||
|
||||
const blocked = await enforceAcl('click', { ...args, x, y }, ctx, response)
|
||||
if (blocked) {
|
||||
logger.info('GUI click blocked by ACL', clickLog)
|
||||
return
|
||||
}
|
||||
|
||||
await ctx.browser.clickAt(args.page, x, y, {
|
||||
button: args.button,
|
||||
clickCount: args.clickCount,
|
||||
})
|
||||
const coordText = coords
|
||||
? ` at (${Math.round(coords.x)}, ${Math.round(coords.y)})`
|
||||
: ''
|
||||
response.text(`Clicked [${args.element}]${coordText}`)
|
||||
response.text(formatHitElementForAgent(hitElement))
|
||||
response.data({
|
||||
action: 'click',
|
||||
page: args.page,
|
||||
element: args.element,
|
||||
prompt: args.prompt,
|
||||
button: args.button,
|
||||
clickCount: args.clickCount,
|
||||
x,
|
||||
y,
|
||||
hitElement,
|
||||
guiPointDebug: clickLog,
|
||||
})
|
||||
response.includeSnapshot(args.page)
|
||||
},
|
||||
})
|
||||
|
||||
@@ -146,22 +238,43 @@ export const drag_at = defineInputTool({
|
||||
|
||||
export const hover = defineInputTool({
|
||||
name: 'hover',
|
||||
description: 'Hover over an element by its ID',
|
||||
description:
|
||||
'Hover over a visible page target using the GUI click model. Provide a concise visual prompt for what to hover.',
|
||||
input: z.object({
|
||||
page: pageParam,
|
||||
element: elementParam,
|
||||
prompt: z
|
||||
.string()
|
||||
.min(1)
|
||||
.describe('Visual hover instruction, e.g. "hover the account menu"'),
|
||||
}),
|
||||
output: z.object({
|
||||
action: z.literal('hover'),
|
||||
page: z.number(),
|
||||
element: z.number(),
|
||||
prompt: z.string(),
|
||||
x: z.number(),
|
||||
y: z.number(),
|
||||
guiPointDebug: z.record(z.unknown()).optional(),
|
||||
}),
|
||||
handler: async (args, ctx, response) => {
|
||||
const coords = await ctx.browser.hover(args.page, args.element)
|
||||
response.text(
|
||||
`Hovered over [${args.element}] at (${Math.round(coords.x)}, ${Math.round(coords.y)})`,
|
||||
)
|
||||
response.data({ action: 'hover', page: args.page, element: args.element })
|
||||
const { x, y, log } = await resolveGuiPoint(ctx, args.page, args.prompt)
|
||||
const hoverLog = { ...log, hoverPoint: { x, y } }
|
||||
|
||||
const blocked = await enforceAcl('hover', { ...args, x, y }, ctx, response)
|
||||
if (blocked) {
|
||||
logger.info('GUI hover blocked by ACL', hoverLog)
|
||||
return
|
||||
}
|
||||
|
||||
await ctx.browser.hoverAt(args.page, x, y)
|
||||
response.text('tool call executed successfully')
|
||||
response.data({
|
||||
action: 'hover',
|
||||
page: args.page,
|
||||
prompt: args.prompt,
|
||||
x,
|
||||
y,
|
||||
guiPointDebug: hoverLog,
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
@@ -251,6 +364,32 @@ export const press_key = defineInputTool({
|
||||
},
|
||||
})
|
||||
|
||||
export const type_text = defineInputTool({
|
||||
name: 'type_text',
|
||||
description:
|
||||
'Type text into the currently focused element. Use after GUI click focuses a text field.',
|
||||
input: z.object({
|
||||
page: pageParam,
|
||||
text: z
|
||||
.string()
|
||||
.describe('Text to type into the currently focused element'),
|
||||
}),
|
||||
output: z.object({
|
||||
action: z.literal('type_text'),
|
||||
page: z.number(),
|
||||
textLength: z.number(),
|
||||
}),
|
||||
handler: async (args, ctx, response) => {
|
||||
await ctx.browser.typeText(args.page, args.text)
|
||||
response.text('tool call executed successfully')
|
||||
response.data({
|
||||
action: 'type_text',
|
||||
page: args.page,
|
||||
textLength: args.text.length,
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
export const drag = defineInputTool({
|
||||
name: 'drag',
|
||||
description:
|
||||
@@ -303,7 +442,7 @@ export const drag = defineInputTool({
|
||||
|
||||
export const scroll = defineInputTool({
|
||||
name: 'scroll',
|
||||
description: 'Scroll the page or a specific element',
|
||||
description: 'Scroll the page viewport',
|
||||
input: z.object({
|
||||
page: pageParam,
|
||||
direction: z
|
||||
@@ -311,32 +450,21 @@ export const scroll = defineInputTool({
|
||||
.default('down')
|
||||
.describe('Scroll direction'),
|
||||
amount: z.number().default(3).describe('Number of scroll ticks'),
|
||||
element: z
|
||||
.number()
|
||||
.optional()
|
||||
.describe('Element ID to scroll at (scrolls page center if omitted)'),
|
||||
}),
|
||||
output: z.object({
|
||||
action: z.literal('scroll'),
|
||||
page: z.number(),
|
||||
direction: z.enum(['up', 'down', 'left', 'right']),
|
||||
amount: z.number(),
|
||||
element: z.number().optional(),
|
||||
}),
|
||||
handler: async (args, ctx, response) => {
|
||||
await ctx.browser.scroll(
|
||||
args.page,
|
||||
args.direction,
|
||||
args.amount,
|
||||
args.element,
|
||||
)
|
||||
await ctx.browser.scroll(args.page, args.direction, args.amount)
|
||||
response.text(`Scrolled ${args.direction} by ${args.amount}`)
|
||||
response.data({
|
||||
action: 'scroll',
|
||||
page: args.page,
|
||||
direction: args.direction,
|
||||
amount: args.amount,
|
||||
element: args.element,
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
@@ -0,0 +1,146 @@
|
||||
import { Buffer } from 'node:buffer'
|
||||
import { logger } from '../lib/logger'
|
||||
import {
|
||||
MOLMO_POINT_ENDPOINT,
|
||||
MOLMO_POINT_MAX_NEW_TOKENS,
|
||||
MOLMO_POINT_TIMEOUT_MS,
|
||||
} from './molmo-point-config'
|
||||
|
||||
interface MolmoPoint {
|
||||
object_id?: unknown
|
||||
image_num?: unknown
|
||||
x?: unknown
|
||||
y?: unknown
|
||||
}
|
||||
|
||||
interface MolmoPointResponse {
|
||||
text?: unknown
|
||||
points?: unknown
|
||||
}
|
||||
|
||||
export interface ClickPoint {
|
||||
x: number
|
||||
y: number
|
||||
}
|
||||
|
||||
export interface PngDimensions {
|
||||
width: number
|
||||
height: number
|
||||
}
|
||||
|
||||
const MOLMO_POINT_RESPONSE_LOG_MAX_CHARS = 2_000
|
||||
const MOLMO_POINT_ERROR_BODY_MAX_CHARS = 500
|
||||
const MOLMO_POINT_INSTRUCTION_LOG_MAX_CHARS = 1_000
|
||||
|
||||
function pointUrl(): string {
|
||||
return new URL('/point', MOLMO_POINT_ENDPOINT).toString()
|
||||
}
|
||||
|
||||
function truncateText(text: string, maxChars: number): string {
|
||||
if (text.length <= maxChars) return text
|
||||
return `${text.slice(0, maxChars)}... (+${text.length - maxChars} chars)`
|
||||
}
|
||||
|
||||
function firstValidPoint(points: unknown): ClickPoint | null {
|
||||
if (!Array.isArray(points)) return null
|
||||
|
||||
for (const rawPoint of points) {
|
||||
const point = rawPoint as MolmoPoint
|
||||
if (typeof point.x !== 'number' || typeof point.y !== 'number') continue
|
||||
if (!Number.isFinite(point.x) || !Number.isFinite(point.y)) continue
|
||||
return { x: point.x, y: point.y }
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
export async function requestMolmoPoint(args: {
|
||||
instruction: string
|
||||
imageB64: string
|
||||
}): Promise<ClickPoint> {
|
||||
const endpoint = pointUrl()
|
||||
const instruction = truncateText(
|
||||
args.instruction,
|
||||
MOLMO_POINT_INSTRUCTION_LOG_MAX_CHARS,
|
||||
)
|
||||
const instructionLength = args.instruction.length
|
||||
const instructionTruncated =
|
||||
instructionLength > MOLMO_POINT_INSTRUCTION_LOG_MAX_CHARS
|
||||
|
||||
const response = await fetch(endpoint, {
|
||||
method: 'POST',
|
||||
headers: { 'content-type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
instruction: args.instruction,
|
||||
image_b64: args.imageB64,
|
||||
max_new_tokens: MOLMO_POINT_MAX_NEW_TOKENS,
|
||||
}),
|
||||
signal: AbortSignal.timeout(MOLMO_POINT_TIMEOUT_MS),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const body = await response.text().catch(() => '')
|
||||
logger.warn('Molmo point request failed', {
|
||||
endpoint,
|
||||
instruction,
|
||||
instructionLength,
|
||||
instructionTruncated,
|
||||
status: response.status,
|
||||
statusText: response.statusText,
|
||||
rawResponseText: truncateText(body, MOLMO_POINT_RESPONSE_LOG_MAX_CHARS),
|
||||
rawResponseTextLength: body.length,
|
||||
rawResponseTextTruncated:
|
||||
body.length > MOLMO_POINT_RESPONSE_LOG_MAX_CHARS,
|
||||
})
|
||||
const suffix = body
|
||||
? `: ${truncateText(body, MOLMO_POINT_ERROR_BODY_MAX_CHARS)}`
|
||||
: ''
|
||||
throw new Error(`Molmo point request failed (${response.status})${suffix}`)
|
||||
}
|
||||
|
||||
const rawResponseText = await response.text()
|
||||
let payload: MolmoPointResponse
|
||||
try {
|
||||
payload = JSON.parse(rawResponseText) as MolmoPointResponse
|
||||
} catch (error) {
|
||||
logger.warn('Molmo point response parse failed', {
|
||||
endpoint,
|
||||
instruction,
|
||||
instructionLength,
|
||||
instructionTruncated,
|
||||
status: response.status,
|
||||
statusText: response.statusText,
|
||||
rawResponseText: truncateText(
|
||||
rawResponseText,
|
||||
MOLMO_POINT_RESPONSE_LOG_MAX_CHARS,
|
||||
),
|
||||
rawResponseTextLength: rawResponseText.length,
|
||||
rawResponseTextTruncated:
|
||||
rawResponseText.length > MOLMO_POINT_RESPONSE_LOG_MAX_CHARS,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
const point = firstValidPoint(payload.points)
|
||||
if (!point) {
|
||||
throw new Error('Molmo point response did not include a valid point')
|
||||
}
|
||||
|
||||
return point
|
||||
}
|
||||
|
||||
export function getPngDimensionsFromBase64(
|
||||
imageB64: string,
|
||||
): PngDimensions | null {
|
||||
const buffer = Buffer.from(imageB64, 'base64')
|
||||
if (buffer.length < 24) return null
|
||||
|
||||
const pngSignature = '89504e470d0a1a0a'
|
||||
if (buffer.subarray(0, 8).toString('hex') !== pngSignature) return null
|
||||
|
||||
return {
|
||||
width: buffer.readUInt32BE(16),
|
||||
height: buffer.readUInt32BE(20),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
export const MOLMO_POINT_ENDPOINT =
|
||||
'https://browseros--molmopoint-gui-molmopointserver-web.modal.run/'
|
||||
|
||||
export const MOLMO_POINT_MAX_NEW_TOKENS = 64
|
||||
export const MOLMO_POINT_TIMEOUT_MS = 60_000
|
||||
@@ -31,6 +31,7 @@ import {
|
||||
scroll,
|
||||
select_option,
|
||||
type_at,
|
||||
type_text,
|
||||
uncheck,
|
||||
upload_file,
|
||||
} from './input'
|
||||
@@ -95,12 +96,13 @@ export const registry = createRegistry([
|
||||
evaluate_script,
|
||||
get_console_logs,
|
||||
|
||||
// Input (17)
|
||||
// Input (18)
|
||||
click,
|
||||
click_at,
|
||||
hover,
|
||||
hover_at,
|
||||
type_at,
|
||||
type_text,
|
||||
drag_at,
|
||||
focus,
|
||||
clear,
|
||||
|
||||
@@ -25,6 +25,10 @@ interface ToolResponseOptions {
|
||||
postActionTimeoutMs?: number
|
||||
}
|
||||
|
||||
interface ToolResponseBuildOptions {
|
||||
suppressSnapshots?: boolean
|
||||
}
|
||||
|
||||
export class ToolResponse {
|
||||
private content: ContentItem[] = []
|
||||
private hasError = false
|
||||
@@ -123,12 +127,19 @@ export class ToolResponse {
|
||||
}
|
||||
}
|
||||
|
||||
async build(browser: Browser): Promise<ToolResult> {
|
||||
if (this.postActions.length > 0) {
|
||||
async build(
|
||||
browser: Browser,
|
||||
options: ToolResponseBuildOptions = {},
|
||||
): Promise<ToolResult> {
|
||||
const postActions = options.suppressSnapshots
|
||||
? this.postActions.filter((action) => action.type !== 'snapshot')
|
||||
: this.postActions
|
||||
|
||||
if (postActions.length > 0) {
|
||||
this.text('\n--- Additional context (auto-included) ---')
|
||||
}
|
||||
|
||||
for (const action of this.postActions) {
|
||||
for (const action of postActions) {
|
||||
try {
|
||||
await this.withTimeout(this.runPostAction(action, browser))
|
||||
} catch {
|
||||
|
||||
@@ -36,6 +36,7 @@ const VERB_OVERRIDES: Record<string, string> = {
|
||||
hover: 'Hovered',
|
||||
hover_at: 'Hovered at coordinates',
|
||||
type_at: 'Typed at coordinates',
|
||||
type_text: 'Typed text',
|
||||
drag_at: 'Dragged',
|
||||
focus: 'Focused element',
|
||||
fill: 'Filled field',
|
||||
@@ -186,8 +187,8 @@ const SUBJECT_EXTRACTORS: Record<string, SubjectExtractor> = {
|
||||
find_files: (i) => quote(stringField(i, 'pattern', 'query')),
|
||||
|
||||
// Element interactions
|
||||
click: (i) => stringField(i, 'element'),
|
||||
hover: (i) => stringField(i, 'element'),
|
||||
click: (i) => stringField(i, 'prompt'),
|
||||
hover: (i) => stringField(i, 'prompt', 'element'),
|
||||
focus: (i) => stringField(i, 'element'),
|
||||
clear: (i) => stringField(i, 'element'),
|
||||
check: (i) => stringField(i, 'element'),
|
||||
@@ -199,6 +200,7 @@ const SUBJECT_EXTRACTORS: Record<string, SubjectExtractor> = {
|
||||
return target ?? truncate(text, 40)
|
||||
},
|
||||
press_key: (i) => stringField(i, 'key'),
|
||||
type_text: (i) => truncate(stringField(i, 'text'), 40),
|
||||
|
||||
// Coordinate-based input
|
||||
click_at: (i) => coords(i.x, i.y),
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
import { describe, expect, it } from 'bun:test'
|
||||
import { isGuiClickOnlyBrowserToolAllowed } from '../../src/agent/gui-click-only'
|
||||
|
||||
describe('GUI click-only browser tool gating', () => {
|
||||
it('keeps GUI click and basic page-opening tools available', () => {
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('click')).toBe(true)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('hover')).toBe(true)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('scroll')).toBe(true)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('type_text')).toBe(true)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('take_screenshot')).toBe(true)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('new_page')).toBe(true)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('navigate_page')).toBe(true)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('list_pages')).toBe(true)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('get_active_page')).toBe(true)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('close_page')).toBe(true)
|
||||
})
|
||||
|
||||
it('blocks page observation and legacy interaction tools', () => {
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('take_snapshot')).toBe(false)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('take_enhanced_snapshot')).toBe(
|
||||
false,
|
||||
)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('get_dom')).toBe(false)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('get_page_content')).toBe(false)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('click_at')).toBe(false)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('fill')).toBe(false)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('press_key')).toBe(false)
|
||||
expect(isGuiClickOnlyBrowserToolAllowed('type_at')).toBe(false)
|
||||
})
|
||||
})
|
||||
@@ -92,6 +92,17 @@ function buildScheduled(overrides?: Partial<BuildSystemPromptOptions>): string {
|
||||
})
|
||||
}
|
||||
|
||||
/** Build a prompt for experimental GUI click-only mode */
|
||||
function buildGuiClickOnly(
|
||||
overrides?: Partial<BuildSystemPromptOptions>,
|
||||
): string {
|
||||
return buildSystemPrompt({
|
||||
guiClickOnly: true,
|
||||
origin: 'sidepanel',
|
||||
...overrides,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// 1. SECTION PRESENCE
|
||||
//
|
||||
@@ -284,6 +295,23 @@ describe('mode-aware framing', () => {
|
||||
expect(prompt).toContain('cannot interact with them')
|
||||
})
|
||||
|
||||
it('GUI click-only mode exposes only GUI click and page-opening guidance', () => {
|
||||
const prompt = buildGuiClickOnly()
|
||||
expect(prompt).toContain('experimental GUI click model mode')
|
||||
expect(prompt).toContain('Use `click` for visible page targets')
|
||||
expect(prompt).toContain('Use `hover` for hover menus')
|
||||
expect(prompt).toContain('Use `type_text` only after')
|
||||
expect(prompt).toContain('Use `scroll` to move the page viewport')
|
||||
expect(prompt).toContain('Use `take_screenshot` sparingly')
|
||||
expect(prompt).toContain('`new_page`')
|
||||
expect(prompt).toContain('`navigate_page`')
|
||||
expect(prompt).toContain('`close_page`')
|
||||
expect(prompt).not.toContain('take_snapshot')
|
||||
expect(prompt).not.toContain('get_dom')
|
||||
expect(prompt).not.toContain('`press_key`')
|
||||
expect(prompt).not.toContain('<external_integrations>')
|
||||
})
|
||||
|
||||
it('chat mode excludes memory-and-identity section', () => {
|
||||
// Why: chat mode is read-only — no memory writes, no soul updates.
|
||||
// The agent shouldn't even see memory tool instructions.
|
||||
|
||||
@@ -0,0 +1,278 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
*/
|
||||
|
||||
import { describe, expect, it } from 'bun:test'
|
||||
import {
|
||||
cleanHistoryUserText,
|
||||
convertOpenClawHistoryToAgentHistory,
|
||||
} from '../../../../src/api/services/openclaw/history-mapper'
|
||||
import type { OpenClawSessionHistory } from '../../../../src/api/services/openclaw/openclaw-http-client'
|
||||
|
||||
describe('cleanHistoryUserText', () => {
|
||||
it('extracts the cron payload and drops the trailer', () => {
|
||||
const raw =
|
||||
'[cron:681df8ba-85e0-404e-a6ea-891d0f5068af hello-8] Print hello\n' +
|
||||
'Current time: Tuesday, May 5th, 2026 - 2:26 AM (Asia/Calcutta) / 2026-05-04 20:56 UTC\n\n' +
|
||||
'Use the message tool if you need to notify the user directly with an explicit target. ' +
|
||||
'If you do not send directly, your final plain-text reply will be delivered automatically.'
|
||||
expect(cleanHistoryUserText(raw)).toBe('Print hello')
|
||||
})
|
||||
|
||||
it('extracts a multiline cron payload and drops the trailer', () => {
|
||||
const raw =
|
||||
'[cron:abcd1234-0000-0000-0000-000000000000 weather] Tell me the weather in Tokyo\n' +
|
||||
'and report back briefly.\n' +
|
||||
'Current time: Tuesday, May 5th, 2026 - 2:26 AM (Asia/Calcutta) / 2026-05-04 20:56 UTC\n\n' +
|
||||
'Use the message tool if you need to notify the user directly with an explicit target.'
|
||||
expect(cleanHistoryUserText(raw)).toBe(
|
||||
'Tell me the weather in Tokyo\nand report back briefly.',
|
||||
)
|
||||
})
|
||||
|
||||
it('unwraps the BrowserOS ACP user_request envelope', () => {
|
||||
const raw =
|
||||
'[Working directory: /tmp/workspace]\n\n' +
|
||||
'<role>\nYou are BrowserOS - a browser agent...\n</role>\n\n' +
|
||||
'<user_request>\nhey\n</user_request>'
|
||||
expect(cleanHistoryUserText(raw)).toBe('hey')
|
||||
})
|
||||
|
||||
it('strips a trailing system-reminder block', () => {
|
||||
const raw =
|
||||
'[Working directory: /tmp/workspace]\n\n' +
|
||||
'<role>\nYou are BrowserOS\n</role>\n\n' +
|
||||
'<user_request>\nopen google.com\n</user_request>\n\n' +
|
||||
'<system-reminder>\nA reminder the user never typed.\n</system-reminder>'
|
||||
expect(cleanHistoryUserText(raw)).toBe('open google.com')
|
||||
})
|
||||
|
||||
it('splits queued-marker concatenations and cleans each chunk', () => {
|
||||
// When multiple prompts queue up while a turn is active, BrowserOS
|
||||
// joins them with the queued-marker line. Each chunk between markers
|
||||
// is its own message that should be cleaned independently.
|
||||
const raw =
|
||||
'[Queued user message that arrived while the previous turn was still active]\n' +
|
||||
"[cron:aaaa hello-job-1] print('hello')\n" +
|
||||
'Current time: 2026-05-05 16:00 UTC\n\n' +
|
||||
'Use the message tool if you need to notify the user directly with an explicit target.\n' +
|
||||
'[Queued user message that arrived while the previous turn was still active]\n' +
|
||||
"[cron:bbbb hello-job-2] print('world')\n" +
|
||||
'Current time: 2026-05-05 16:01 UTC\n\n' +
|
||||
'Use the message tool if you need to notify the user directly with an explicit target.'
|
||||
expect(cleanHistoryUserText(raw)).toBe("print('hello')\nprint('world')")
|
||||
})
|
||||
|
||||
it('drops a Subagent Context message entirely', () => {
|
||||
// OpenClaw seeds a nested subagent's session with a "Subagent
|
||||
// Context" prefix that's pure scaffolding. The actual task lives in
|
||||
// the system prompt, so the user message body is meaningless to
|
||||
// surface. cleanHistoryUserText returns empty; the converter then
|
||||
// skips the entry so it doesn't render an empty bubble.
|
||||
const raw =
|
||||
'[Subagent Context] You are running as a subagent (depth 1/1). ' +
|
||||
'Results auto-announce to your requester; do not busy-poll for status.\n\n' +
|
||||
'Begin. Your assigned task is in the system prompt under **Your Role**.'
|
||||
expect(cleanHistoryUserText(raw)).toBe('')
|
||||
})
|
||||
|
||||
it('drops empty chunks left by leading queued marker', () => {
|
||||
// The blob often opens with a marker (no content before it). Empty
|
||||
// chunks should be dropped so we don't emit a leading newline.
|
||||
const raw =
|
||||
'[Queued user message that arrived while the previous turn was still active]\n' +
|
||||
'[cron:aaaa job] payload-only\n' +
|
||||
'Current time: now'
|
||||
expect(cleanHistoryUserText(raw)).toBe('payload-only')
|
||||
})
|
||||
|
||||
it('preserves messages that match no known scaffolding', () => {
|
||||
expect(cleanHistoryUserText('hello there')).toBe('hello there')
|
||||
expect(cleanHistoryUserText('multi\nline\nuser text')).toBe(
|
||||
'multi\nline\nuser text',
|
||||
)
|
||||
})
|
||||
|
||||
it('returns empty string unchanged', () => {
|
||||
expect(cleanHistoryUserText('')).toBe('')
|
||||
})
|
||||
})
|
||||
|
||||
describe('convertOpenClawHistoryToAgentHistory', () => {
|
||||
it('strips cron scaffolding from user messages while preserving assistant text', () => {
|
||||
const raw: OpenClawSessionHistory = {
|
||||
sessionKey: 'agent:demo:main',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: '' as never,
|
||||
// The HTTP endpoint actually returns content as an array of typed
|
||||
// blocks at runtime; the type is `string` for backward-compat.
|
||||
// Cast via `unknown` to reflect runtime.
|
||||
...({
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text:
|
||||
'[cron:abc-123 hello-1] Print hello\n' +
|
||||
'Current time: 2026-05-05 16:00 UTC\n\n' +
|
||||
'Use the message tool if you need to notify the user directly with an explicit target.',
|
||||
},
|
||||
],
|
||||
} as unknown as { content: never }),
|
||||
timestamp: 1000,
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: '' as never,
|
||||
...({
|
||||
content: [{ type: 'text', text: 'hello' }],
|
||||
} as unknown as { content: never }),
|
||||
timestamp: 1001,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
const out = convertOpenClawHistoryToAgentHistory('demo', raw)
|
||||
expect(out.items.map((i) => ({ role: i.role, text: i.text }))).toEqual([
|
||||
{ role: 'user', text: 'Print hello' },
|
||||
{ role: 'assistant', text: 'hello' },
|
||||
])
|
||||
})
|
||||
|
||||
it('drops assistant turns that have only reasoning (no text, no tools)', () => {
|
||||
// MiniMax with thinking:minimal often returns only `thinking` blocks
|
||||
// for trivial prompts ("Print hello"). The empty text bubble with a
|
||||
// dangling reasoning collapsible reads as broken UI; cleaner to skip.
|
||||
const raw: OpenClawSessionHistory = {
|
||||
sessionKey: 'agent:demo:main',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: '' as never,
|
||||
...({
|
||||
content: [{ type: 'text', text: 'hi' }],
|
||||
} as unknown as { content: never }),
|
||||
timestamp: 1000,
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: '' as never,
|
||||
...({
|
||||
content: [
|
||||
{
|
||||
type: 'thinking',
|
||||
thinking: 'I should respond with a greeting.',
|
||||
},
|
||||
],
|
||||
} as unknown as { content: never }),
|
||||
timestamp: 1001,
|
||||
},
|
||||
],
|
||||
}
|
||||
const out = convertOpenClawHistoryToAgentHistory('demo', raw)
|
||||
expect(out.items.map((i) => ({ role: i.role, text: i.text }))).toEqual([
|
||||
{ role: 'user', text: 'hi' },
|
||||
])
|
||||
})
|
||||
|
||||
it('drops Subagent Context user messages entirely (no empty bubble)', () => {
|
||||
const raw: OpenClawSessionHistory = {
|
||||
sessionKey: 'agent:demo:main',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: '' as never,
|
||||
...({
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text:
|
||||
'[Subagent Context] You are running as a subagent (depth 1/1).\n\n' +
|
||||
'Begin. Your assigned task is in the system prompt.',
|
||||
},
|
||||
],
|
||||
} as unknown as { content: never }),
|
||||
timestamp: 1000,
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: '' as never,
|
||||
...({
|
||||
content: [{ type: 'text', text: 'real reply' }],
|
||||
} as unknown as { content: never }),
|
||||
timestamp: 1001,
|
||||
},
|
||||
],
|
||||
}
|
||||
const out = convertOpenClawHistoryToAgentHistory('demo', raw)
|
||||
expect(out.items.map((i) => ({ role: i.role, text: i.text }))).toEqual([
|
||||
{ role: 'assistant', text: 'real reply' },
|
||||
])
|
||||
})
|
||||
|
||||
it('attaches assistant reasoning and pairs tool call output across messages', () => {
|
||||
const raw: OpenClawSessionHistory = {
|
||||
sessionKey: 'agent:demo:main',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: '' as never,
|
||||
...({
|
||||
content: [{ type: 'text', text: 'navigate to example.com' }],
|
||||
} as unknown as { content: never }),
|
||||
timestamp: 1000,
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: '' as never,
|
||||
...({
|
||||
content: [
|
||||
{
|
||||
type: 'thinking',
|
||||
thinking: 'I should call the navigate tool.',
|
||||
},
|
||||
{
|
||||
type: 'toolCall',
|
||||
id: 'call-1',
|
||||
name: 'navigate',
|
||||
arguments: { url: 'https://example.com' },
|
||||
},
|
||||
],
|
||||
} as unknown as { content: never }),
|
||||
timestamp: 1001,
|
||||
},
|
||||
{
|
||||
role: 'tool',
|
||||
content: '' as never,
|
||||
...({
|
||||
content: [
|
||||
{
|
||||
type: 'toolResult',
|
||||
toolCallId: 'call-1',
|
||||
content: 'navigated',
|
||||
},
|
||||
],
|
||||
} as unknown as { content: never }),
|
||||
timestamp: 1002,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
const out = convertOpenClawHistoryToAgentHistory('demo', raw)
|
||||
// 'tool' role messages are folded into the prior assistant entry, not surfaced
|
||||
expect(out.items.map((i) => i.role)).toEqual(['user', 'assistant'])
|
||||
const assistant = out.items[1]
|
||||
expect(assistant.reasoning?.text).toBe('I should call the navigate tool.')
|
||||
expect(assistant.toolCalls).toEqual([
|
||||
{
|
||||
toolCallId: 'call-1',
|
||||
toolName: 'navigate',
|
||||
status: 'completed',
|
||||
input: { url: 'https://example.com' },
|
||||
output: 'navigated',
|
||||
},
|
||||
])
|
||||
})
|
||||
})
|
||||
@@ -4,7 +4,73 @@
|
||||
*/
|
||||
|
||||
import { describe, expect, it } from 'bun:test'
|
||||
import { mergeEnvContent } from '../../../../src/api/services/openclaw/openclaw-env'
|
||||
import {
|
||||
getHostWorkspaceDir,
|
||||
isAgentWorkspaceNameSafe,
|
||||
mergeEnvContent,
|
||||
} from '../../../../src/api/services/openclaw/openclaw-env'
|
||||
|
||||
describe('isAgentWorkspaceNameSafe', () => {
|
||||
it('accepts plain slugs', () => {
|
||||
expect(isAgentWorkspaceNameSafe('agent-01')).toBe(true)
|
||||
expect(isAgentWorkspaceNameSafe('research_bot')).toBe(true)
|
||||
expect(isAgentWorkspaceNameSafe('My Agent')).toBe(true)
|
||||
})
|
||||
|
||||
it('rejects empty or whitespace-only', () => {
|
||||
expect(isAgentWorkspaceNameSafe('')).toBe(false)
|
||||
expect(isAgentWorkspaceNameSafe(' ')).toBe(false)
|
||||
})
|
||||
|
||||
it('rejects path-traversal segments', () => {
|
||||
expect(isAgentWorkspaceNameSafe('..')).toBe(false)
|
||||
expect(isAgentWorkspaceNameSafe('../tmp')).toBe(false)
|
||||
expect(isAgentWorkspaceNameSafe('foo/../bar')).toBe(false)
|
||||
expect(isAgentWorkspaceNameSafe('foo..bar')).toBe(false)
|
||||
})
|
||||
|
||||
it('rejects path separators and NULs', () => {
|
||||
expect(isAgentWorkspaceNameSafe('foo/bar')).toBe(false)
|
||||
expect(isAgentWorkspaceNameSafe('foo\\bar')).toBe(false)
|
||||
expect(isAgentWorkspaceNameSafe('foo\0bar')).toBe(false)
|
||||
})
|
||||
|
||||
it('rejects names that start with a dot (hidden / dotfile)', () => {
|
||||
expect(isAgentWorkspaceNameSafe('.hidden')).toBe(false)
|
||||
expect(isAgentWorkspaceNameSafe('.')).toBe(false)
|
||||
})
|
||||
|
||||
it('rejects control characters', () => {
|
||||
expect(isAgentWorkspaceNameSafe('foo\nbar')).toBe(false)
|
||||
expect(isAgentWorkspaceNameSafe('foo\x07bar')).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getHostWorkspaceDir', () => {
|
||||
it("returns the canonical 'main' workspace path", () => {
|
||||
expect(getHostWorkspaceDir('/tmp/openclaw', 'main')).toBe(
|
||||
'/tmp/openclaw/.openclaw/workspace',
|
||||
)
|
||||
})
|
||||
|
||||
it('returns a per-agent workspace for safe names', () => {
|
||||
expect(getHostWorkspaceDir('/tmp/openclaw', 'agent-01')).toBe(
|
||||
'/tmp/openclaw/.openclaw/workspace-agent-01',
|
||||
)
|
||||
})
|
||||
|
||||
it('throws for path-traversal names instead of escaping the state dir', () => {
|
||||
expect(() => getHostWorkspaceDir('/tmp/openclaw', '../../etc')).toThrow(
|
||||
/unsafe agent name/i,
|
||||
)
|
||||
})
|
||||
|
||||
it('throws for names containing path separators', () => {
|
||||
expect(() => getHostWorkspaceDir('/tmp/openclaw', 'foo/bar')).toThrow(
|
||||
/unsafe agent name/i,
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('mergeEnvContent', () => {
|
||||
it('appends new env keys and normalizes trailing newline', () => {
|
||||
|
||||
@@ -0,0 +1,99 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
*/
|
||||
|
||||
import { afterEach, describe, expect, it } from 'bun:test'
|
||||
import { mkdtempSync } from 'node:fs'
|
||||
import { mkdir, rm, symlink, writeFile } from 'node:fs/promises'
|
||||
import { tmpdir } from 'node:os'
|
||||
import { join } from 'node:path'
|
||||
import { resolveSafeWorkspacePath } from '../../../../src/api/services/openclaw/produced-files-store'
|
||||
|
||||
describe('resolveSafeWorkspacePath', () => {
|
||||
const tempDirs: string[] = []
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
tempDirs.map((dir) => rm(dir, { recursive: true, force: true })),
|
||||
)
|
||||
tempDirs.length = 0
|
||||
})
|
||||
|
||||
it('resolves a regular file inside the workspace', async () => {
|
||||
const root = mkTempDir()
|
||||
const target = join(root, 'output.txt')
|
||||
await writeFile(target, 'hello')
|
||||
|
||||
const resolved = await resolveSafeWorkspacePath(root, 'output.txt')
|
||||
|
||||
expect(resolved).not.toBeNull()
|
||||
expect(resolved).toContain('output.txt')
|
||||
})
|
||||
|
||||
it('resolves a nested file using its workspace-relative path', async () => {
|
||||
const root = mkTempDir()
|
||||
const subdir = join(root, 'reports')
|
||||
await mkdir(subdir, { recursive: true })
|
||||
await writeFile(join(subdir, 'q1.csv'), 'a,b\n1,2')
|
||||
|
||||
const resolved = await resolveSafeWorkspacePath(root, 'reports/q1.csv')
|
||||
|
||||
expect(resolved).not.toBeNull()
|
||||
expect(resolved).toMatch(/reports\/q1\.csv$/)
|
||||
})
|
||||
|
||||
it('rejects lexical traversal with `..` segments', async () => {
|
||||
const root = mkTempDir()
|
||||
// Sibling file lives next to the workspace root so the lexical
|
||||
// join lands on a real, readable file — proving the rejection
|
||||
// is from the containment check, not a missing-file fallback.
|
||||
const siblingDir = mkTempDir()
|
||||
await writeFile(join(siblingDir, 'secret.txt'), 'do not leak')
|
||||
|
||||
const escapingRel = join('..', '..', 'secret.txt')
|
||||
|
||||
const resolved = await resolveSafeWorkspacePath(root, escapingRel)
|
||||
|
||||
expect(resolved).toBeNull()
|
||||
})
|
||||
|
||||
it('rejects a symlink whose target lives outside the workspace', async () => {
|
||||
const root = mkTempDir()
|
||||
const outside = mkTempDir()
|
||||
const secret = join(outside, 'passwd')
|
||||
await writeFile(secret, 'shadow:contents')
|
||||
|
||||
// Symlink inside the workspace pointing to the outside file.
|
||||
// The lexical path stays inside the root, but the realpath
|
||||
// resolution should still reject it.
|
||||
await symlink(secret, join(root, 'looks-local'))
|
||||
|
||||
const resolved = await resolveSafeWorkspacePath(root, 'looks-local')
|
||||
|
||||
expect(resolved).toBeNull()
|
||||
})
|
||||
|
||||
it('returns null for a path that does not exist on disk', async () => {
|
||||
const root = mkTempDir()
|
||||
|
||||
const resolved = await resolveSafeWorkspacePath(root, 'never-created.bin')
|
||||
|
||||
expect(resolved).toBeNull()
|
||||
})
|
||||
|
||||
it('returns null when the workspace root itself is the resolved path', async () => {
|
||||
const root = mkTempDir()
|
||||
|
||||
// Empty rel-path collapses to the root — must not be downloadable.
|
||||
const resolved = await resolveSafeWorkspacePath(root, '')
|
||||
|
||||
expect(resolved).toBeNull()
|
||||
})
|
||||
|
||||
function mkTempDir(): string {
|
||||
const dir = mkdtempSync(join(tmpdir(), 'browseros-files-test-'))
|
||||
tempDirs.push(dir)
|
||||
return dir
|
||||
}
|
||||
})
|
||||
@@ -590,6 +590,25 @@ just outer
|
||||
expect(unwrapBrowserosAcpUserMessage(outerOnly)).toBe('just outer')
|
||||
})
|
||||
|
||||
it('strips the openclaw single-line role envelope (regression: TKT-774 only matched the BrowserOS multi-line form)', () => {
|
||||
// PR #924 (ACPX agent runtime adapters) introduced a second
|
||||
// `<role>…</role>` prefix for openclaw — a single-line block
|
||||
// distinct from the BrowserOS multi-line role. The original
|
||||
// exact-prefix strip only matched the BrowserOS form, so user
|
||||
// messages from openclaw agents were landing in
|
||||
// /agents/:id/sessions/main/history with the envelope still
|
||||
// attached. The strip must be adapter-agnostic: any
|
||||
// `<role>…</role>` followed by a `<user_request>` block.
|
||||
const wrapped = `<role>You are running inside BrowserOS through the OpenClaw ACP adapter. Use your OpenClaw identity, memory, and browser tools.</role>
|
||||
|
||||
<user_request>
|
||||
Need another report this time as pdf, a comparison between both yahoo and google reports you created...
|
||||
</user_request>`
|
||||
expect(unwrapBrowserosAcpUserMessage(wrapped)).toBe(
|
||||
'Need another report this time as pdf, a comparison between both yahoo and google reports you created...',
|
||||
)
|
||||
})
|
||||
|
||||
it('strips the ACPX runtime envelope when it wraps persisted history', () => {
|
||||
const wrapped = `<browseros_acpx_runtime version="2026-05-02.v1">
|
||||
You are BrowserOS, an ACPX browser agent.
|
||||
|
||||
@@ -13,8 +13,13 @@ import {
|
||||
scroll,
|
||||
select_option,
|
||||
type_at,
|
||||
type_text,
|
||||
uncheck,
|
||||
} from '../../src/tools/input'
|
||||
import {
|
||||
type ClickPoint,
|
||||
getPngDimensionsFromBase64,
|
||||
} from '../../src/tools/molmo-point-client'
|
||||
import { close_page, navigate_page, new_page } from '../../src/tools/navigation'
|
||||
import { evaluate_script, take_snapshot } from '../../src/tools/snapshot'
|
||||
import { cleanupWithBrowser, withBrowser } from '../__helpers__/with-browser'
|
||||
@@ -121,6 +126,39 @@ async function pointInsideElement(
|
||||
return { x: point.x, y: point.y }
|
||||
}
|
||||
|
||||
async function withMockedGuiPoint(
|
||||
browser: Browser,
|
||||
pageId: number,
|
||||
viewportPoint: ClickPoint,
|
||||
fn: () => Promise<void>,
|
||||
): Promise<void> {
|
||||
const screenshot = await browser.screenshot(pageId, {
|
||||
format: 'png',
|
||||
fullPage: false,
|
||||
})
|
||||
const dimensions = getPngDimensionsFromBase64(screenshot.data)
|
||||
const viewport = await browser.viewportSize(pageId)
|
||||
const scaleX = dimensions
|
||||
? dimensions.width / viewport.width
|
||||
: screenshot.devicePixelRatio
|
||||
const scaleY = dimensions
|
||||
? dimensions.height / viewport.height
|
||||
: screenshot.devicePixelRatio
|
||||
const originalFetch = globalThis.fetch
|
||||
globalThis.fetch = (async () =>
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
points: [{ x: viewportPoint.x * scaleX, y: viewportPoint.y * scaleY }],
|
||||
}),
|
||||
{ status: 200, headers: { 'content-type': 'application/json' } },
|
||||
)) as typeof fetch
|
||||
try {
|
||||
await fn()
|
||||
} finally {
|
||||
globalThis.fetch = originalFetch
|
||||
}
|
||||
}
|
||||
|
||||
const FORM_PAGE = `data:text/html,${encodeURIComponent(`<!DOCTYPE html>
|
||||
<html><body>
|
||||
<h1>Test Form</h1>
|
||||
@@ -196,7 +234,7 @@ describe('input tools', () => {
|
||||
}, 60_000)
|
||||
|
||||
it('click triggers a button', async () => {
|
||||
await withBrowser(async ({ execute }) => {
|
||||
await withBrowser(async ({ browser, execute }) => {
|
||||
const newResult = await execute(new_page, { url: FORM_PAGE })
|
||||
const pageId = pageIdOf(newResult)
|
||||
|
||||
@@ -206,18 +244,31 @@ describe('input tools', () => {
|
||||
const inputId = findElementId(snapText, 'Enter name')
|
||||
await execute(fill, { page: pageId, element: inputId, text: 'Alice' })
|
||||
|
||||
// Click submit
|
||||
const btnId = findElementId(snapText, 'Submit')
|
||||
const clickResult = await execute(click, {
|
||||
page: pageId,
|
||||
element: btnId,
|
||||
})
|
||||
assert.ok(!clickResult.isError, textOf(clickResult))
|
||||
const clickData = structuredOf<{ action: string; element: number }>(
|
||||
clickResult,
|
||||
// Click submit via the GUI point model response.
|
||||
const buttonPoint = await pointInsideElement(
|
||||
{ browser, directories: { workingDir: process.cwd() } },
|
||||
pageId,
|
||||
'submit-btn',
|
||||
)
|
||||
assert.strictEqual(clickData.action, 'click')
|
||||
assert.strictEqual(clickData.element, btnId)
|
||||
await withMockedGuiPoint(browser, pageId, buttonPoint, async () => {
|
||||
const clickResult = await execute(click, {
|
||||
page: pageId,
|
||||
prompt: 'click the Submit button',
|
||||
})
|
||||
assert.ok(!clickResult.isError, textOf(clickResult))
|
||||
assert.match(
|
||||
textOf(clickResult),
|
||||
/The click was successful and hit the element: .*tagName="button".*textContent="Submit"/,
|
||||
)
|
||||
const clickData = structuredOf<{
|
||||
action: string
|
||||
prompt: string
|
||||
hitElement: { tagName: string; textContent?: string } | null
|
||||
}>(clickResult)
|
||||
assert.strictEqual(clickData.action, 'click')
|
||||
assert.strictEqual(clickData.prompt, 'click the Submit button')
|
||||
assert.strictEqual(clickData.hitElement?.tagName, 'button')
|
||||
})
|
||||
|
||||
const output = await execute(evaluate_script, {
|
||||
page: pageId,
|
||||
@@ -229,6 +280,61 @@ describe('input tools', () => {
|
||||
})
|
||||
}, 60_000)
|
||||
|
||||
it('click is blocked by ACL after the GUI point resolves', async () => {
|
||||
await withBrowser(async ({ browser }) => {
|
||||
const ctx: ToolContext = {
|
||||
browser,
|
||||
directories: { workingDir: process.cwd() },
|
||||
aclRules: [
|
||||
{
|
||||
id: 'submit-rule',
|
||||
sitePattern: '*',
|
||||
textMatch: 'Submit',
|
||||
enabled: true,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
const newResult = await executeTool(
|
||||
new_page,
|
||||
{ url: FORM_PAGE },
|
||||
ctx,
|
||||
AbortSignal.timeout(30_000),
|
||||
)
|
||||
const pageId = pageIdOf(newResult)
|
||||
const buttonPoint = await pointInsideElement(ctx, pageId, 'submit-btn')
|
||||
|
||||
await withMockedGuiPoint(browser, pageId, buttonPoint, async () => {
|
||||
const clickResult = await executeTool(
|
||||
click,
|
||||
{ page: pageId, prompt: 'click the Submit button' },
|
||||
ctx,
|
||||
AbortSignal.timeout(30_000),
|
||||
)
|
||||
assert.ok(clickResult.isError, 'Expected ACL to block GUI click')
|
||||
assert.ok(textOf(clickResult).includes('Action blocked by ACL rule'))
|
||||
})
|
||||
|
||||
const output = await executeTool(
|
||||
evaluate_script,
|
||||
{
|
||||
page: pageId,
|
||||
expression: 'document.getElementById("output").textContent',
|
||||
},
|
||||
ctx,
|
||||
AbortSignal.timeout(30_000),
|
||||
)
|
||||
assert.strictEqual(textOf(output), '')
|
||||
|
||||
await executeTool(
|
||||
close_page,
|
||||
{ page: pageId },
|
||||
ctx,
|
||||
AbortSignal.timeout(30_000),
|
||||
)
|
||||
})
|
||||
}, 60_000)
|
||||
|
||||
it('check and uncheck toggle a checkbox', async () => {
|
||||
await withBrowser(async ({ execute }) => {
|
||||
const newResult = await execute(new_page, { url: FORM_PAGE })
|
||||
@@ -397,6 +503,104 @@ describe('input tools', () => {
|
||||
})
|
||||
}, 60_000)
|
||||
|
||||
it('type_text types into the focused element', async () => {
|
||||
await withBrowser(async ({ execute }) => {
|
||||
const newResult = await execute(new_page, { url: FORM_PAGE })
|
||||
const pageId = pageIdOf(newResult)
|
||||
|
||||
const snap = await execute(take_snapshot, { page: pageId })
|
||||
const inputId = findElementId(textOf(snap), 'Enter name')
|
||||
await execute(fill, { page: pageId, element: inputId, text: 'hello' })
|
||||
|
||||
const typeResult = await execute(type_text, {
|
||||
page: pageId,
|
||||
text: ' world',
|
||||
})
|
||||
assert.ok(!typeResult.isError, textOf(typeResult))
|
||||
assert.strictEqual(textOf(typeResult), 'tool call executed successfully')
|
||||
assert.deepStrictEqual(structuredOf(typeResult), {
|
||||
action: 'type_text',
|
||||
page: pageId,
|
||||
textLength: ' world'.length,
|
||||
})
|
||||
|
||||
const val = await execute(evaluate_script, {
|
||||
page: pageId,
|
||||
expression: 'document.getElementById("name").value',
|
||||
})
|
||||
assert.strictEqual(textOf(val), 'hello world')
|
||||
|
||||
await execute(close_page, { page: pageId })
|
||||
})
|
||||
}, 60_000)
|
||||
|
||||
it('type_text is blocked by ACL on the focused element', async () => {
|
||||
await withBrowser(async ({ browser }) => {
|
||||
const ctx: ToolContext = {
|
||||
browser,
|
||||
directories: { workingDir: process.cwd() },
|
||||
}
|
||||
|
||||
const newResult = await executeTool(
|
||||
new_page,
|
||||
{ url: FORM_PAGE },
|
||||
ctx,
|
||||
AbortSignal.timeout(30_000),
|
||||
)
|
||||
const pageId = pageIdOf(newResult)
|
||||
|
||||
const snap = await executeTool(
|
||||
take_snapshot,
|
||||
{ page: pageId },
|
||||
ctx,
|
||||
AbortSignal.timeout(30_000),
|
||||
)
|
||||
const inputId = findElementId(textOf(snap), 'Enter name')
|
||||
await executeTool(
|
||||
fill,
|
||||
{ page: pageId, element: inputId, text: 'hello' },
|
||||
ctx,
|
||||
AbortSignal.timeout(30_000),
|
||||
)
|
||||
|
||||
ctx.aclRules = [
|
||||
{
|
||||
id: 'name-rule',
|
||||
sitePattern: '*',
|
||||
textMatch: 'Enter name',
|
||||
enabled: true,
|
||||
},
|
||||
]
|
||||
|
||||
const typeResult = await executeTool(
|
||||
type_text,
|
||||
{ page: pageId, text: ' blocked' },
|
||||
ctx,
|
||||
AbortSignal.timeout(30_000),
|
||||
)
|
||||
assert.ok(typeResult.isError, 'Expected ACL to block focused typing')
|
||||
assert.ok(textOf(typeResult).includes('Action blocked by ACL rule'))
|
||||
|
||||
const val = await executeTool(
|
||||
evaluate_script,
|
||||
{
|
||||
page: pageId,
|
||||
expression: 'document.getElementById("name").value',
|
||||
},
|
||||
ctx,
|
||||
AbortSignal.timeout(30_000),
|
||||
)
|
||||
assert.strictEqual(textOf(val), 'hello')
|
||||
|
||||
await executeTool(
|
||||
close_page,
|
||||
{ page: pageId },
|
||||
ctx,
|
||||
AbortSignal.timeout(30_000),
|
||||
)
|
||||
})
|
||||
}, 60_000)
|
||||
|
||||
it('scroll dispatches without error', async () => {
|
||||
const calls: Array<{
|
||||
page: number
|
||||
@@ -437,24 +641,62 @@ describe('input tools', () => {
|
||||
page: 7,
|
||||
direction: 'down',
|
||||
amount: 5,
|
||||
element: undefined,
|
||||
})
|
||||
})
|
||||
|
||||
it('hover moves cursor over element', async () => {
|
||||
it('scroll moves the page viewport', async () => {
|
||||
await withBrowser(async ({ execute }) => {
|
||||
const newResult = await execute(new_page, { url: FORM_PAGE })
|
||||
const pageId = pageIdOf(newResult)
|
||||
|
||||
const snap = await execute(take_snapshot, { page: pageId })
|
||||
const btnId = findElementId(textOf(snap), 'Submit')
|
||||
|
||||
const hoverResult = await execute(hover, {
|
||||
const result = await execute(scroll, {
|
||||
page: pageId,
|
||||
element: btnId,
|
||||
direction: 'down',
|
||||
amount: 5,
|
||||
})
|
||||
|
||||
assert.ok(!result.isError, textOf(result))
|
||||
assert.ok(textOf(result).includes('Scrolled down'))
|
||||
|
||||
const position = await execute(evaluate_script, {
|
||||
page: pageId,
|
||||
expression: 'window.scrollY',
|
||||
})
|
||||
const data = structuredOf<{ value?: unknown }>(position)
|
||||
const value = data.value
|
||||
assert.strictEqual(typeof value, 'number')
|
||||
assert.ok(value > 0)
|
||||
|
||||
await execute(close_page, { page: pageId })
|
||||
})
|
||||
}, 60_000)
|
||||
|
||||
it('hover moves cursor via the GUI point model response', async () => {
|
||||
await withBrowser(async ({ browser, execute }) => {
|
||||
const newResult = await execute(new_page, { url: FORM_PAGE })
|
||||
const pageId = pageIdOf(newResult)
|
||||
|
||||
const buttonPoint = await pointInsideElement(
|
||||
{ browser, directories: { workingDir: process.cwd() } },
|
||||
pageId,
|
||||
'submit-btn',
|
||||
)
|
||||
await withMockedGuiPoint(browser, pageId, buttonPoint, async () => {
|
||||
const hoverResult = await execute(hover, {
|
||||
page: pageId,
|
||||
prompt: 'hover the Submit button',
|
||||
})
|
||||
assert.ok(!hoverResult.isError, textOf(hoverResult))
|
||||
assert.strictEqual(
|
||||
textOf(hoverResult),
|
||||
'tool call executed successfully',
|
||||
)
|
||||
const hoverData = structuredOf<{ action: string; prompt: string }>(
|
||||
hoverResult,
|
||||
)
|
||||
assert.strictEqual(hoverData.action, 'hover')
|
||||
assert.strictEqual(hoverData.prompt, 'hover the Submit button')
|
||||
})
|
||||
assert.ok(!hoverResult.isError, textOf(hoverResult))
|
||||
assert.ok(textOf(hoverResult).includes('Hovered'))
|
||||
|
||||
await execute(close_page, { page: pageId })
|
||||
})
|
||||
@@ -467,7 +709,7 @@ describe('input tools', () => {
|
||||
directories: { workingDir: process.cwd() },
|
||||
}
|
||||
const run =
|
||||
(tool: typeof new_page | typeof take_snapshot | typeof click) =>
|
||||
(tool: typeof new_page | typeof take_snapshot | typeof fill) =>
|
||||
(args: unknown) =>
|
||||
executeTool(tool, args, ctx, AbortSignal.timeout(30_000))
|
||||
|
||||
@@ -475,21 +717,29 @@ describe('input tools', () => {
|
||||
const pageId = pageIdOf(newResult)
|
||||
|
||||
const snap = await run(take_snapshot)({ page: pageId })
|
||||
const btnId = findElementId(textOf(snap), 'Submit')
|
||||
const inputId = findElementId(textOf(snap), 'Enter name')
|
||||
|
||||
const beforeBlock = await run(click)({ page: pageId, element: btnId })
|
||||
const beforeBlock = await run(fill)({
|
||||
page: pageId,
|
||||
element: inputId,
|
||||
text: 'Allowed',
|
||||
})
|
||||
assert.ok(!beforeBlock.isError, textOf(beforeBlock))
|
||||
|
||||
ctx.aclRules = [
|
||||
{
|
||||
id: 'submit-rule',
|
||||
id: 'name-rule',
|
||||
sitePattern: '*',
|
||||
textMatch: 'Submit',
|
||||
textMatch: 'Enter name',
|
||||
enabled: true,
|
||||
},
|
||||
]
|
||||
|
||||
const afterBlock = await run(click)({ page: pageId, element: btnId })
|
||||
const afterBlock = await run(fill)({
|
||||
page: pageId,
|
||||
element: inputId,
|
||||
text: 'Blocked',
|
||||
})
|
||||
assert.ok(afterBlock.isError, 'Expected ACL block after updating rules')
|
||||
assert.ok(textOf(afterBlock).includes('Action blocked by ACL rule'))
|
||||
|
||||
@@ -598,19 +848,9 @@ describe('input tools', () => {
|
||||
)
|
||||
assert.ok(!navResult.isError, textOf(navResult))
|
||||
|
||||
const snap = await executeTool(
|
||||
take_snapshot,
|
||||
{ page: pageId },
|
||||
ctx,
|
||||
AbortSignal.timeout(30_000),
|
||||
)
|
||||
const linkMatch = textOf(snap).match(/\[(\d+)\]\s*link/)
|
||||
assert.ok(linkMatch, `Expected a link in snapshot:\n${textOf(snap)}`)
|
||||
const linkId = Number(linkMatch?.[1])
|
||||
|
||||
const blockedClick = await executeTool(
|
||||
click,
|
||||
{ page: pageId, element: linkId },
|
||||
click_at,
|
||||
{ page: pageId, x: 10, y: 10 },
|
||||
ctx,
|
||||
AbortSignal.timeout(30_000),
|
||||
)
|
||||
|
||||
@@ -73,4 +73,26 @@ describe('ToolResponse', () => {
|
||||
assert.ok(text.includes('[Page 1 snapshot]'))
|
||||
assert.ok(text.includes('[42] button "Submit"'))
|
||||
})
|
||||
|
||||
it('suppresses snapshot post-actions when requested', async () => {
|
||||
const response = new ToolResponse({ postActionTimeoutMs: 200 })
|
||||
response.text('ok')
|
||||
response.includeSnapshot(1)
|
||||
|
||||
let called = false
|
||||
const browser = {
|
||||
snapshot: async () => {
|
||||
called = true
|
||||
return '[42] button "Submit"'
|
||||
},
|
||||
} as unknown as Browser
|
||||
|
||||
const result = await response.build(browser, { suppressSnapshots: true })
|
||||
const text = textOf(result)
|
||||
|
||||
assert.equal(called, false)
|
||||
assert.ok(text.includes('ok'))
|
||||
assert.ok(!text.includes('Additional context'))
|
||||
assert.ok(!text.includes('[Page 1 snapshot]'))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -68,6 +68,18 @@ export const LLMProviderSchema: z.ZodEnum<
|
||||
|
||||
export type LLMProvider = z.infer<typeof LLMProviderSchema>
|
||||
|
||||
const OpenRouterProviderRoutingSchema = z.object({
|
||||
order: z.array(z.string()).optional(),
|
||||
only: z.array(z.string()).optional(),
|
||||
ignore: z.array(z.string()).optional(),
|
||||
allowFallbacks: z.boolean().optional(),
|
||||
requireParameters: z.boolean().optional(),
|
||||
})
|
||||
|
||||
export type OpenRouterProviderRouting = z.infer<
|
||||
typeof OpenRouterProviderRoutingSchema
|
||||
>
|
||||
|
||||
/**
|
||||
* LLM configuration schema
|
||||
* Used by SDK endpoints and agent configuration
|
||||
@@ -84,6 +96,17 @@ export const LLMConfigSchema: z.ZodObject<{
|
||||
sessionToken: z.ZodOptional<z.ZodString>
|
||||
reasoningEffort: z.ZodOptional<z.ZodEnum<['none', 'low', 'medium', 'high']>>
|
||||
reasoningSummary: z.ZodOptional<z.ZodEnum<['auto', 'concise', 'detailed']>>
|
||||
reasoning: z.ZodOptional<
|
||||
z.ZodObject<{
|
||||
enabled: z.ZodOptional<z.ZodBoolean>
|
||||
maxTokens: z.ZodOptional<z.ZodNumber>
|
||||
effort: z.ZodOptional<
|
||||
z.ZodEnum<['minimal', 'low', 'medium', 'high', 'xhigh']>
|
||||
>
|
||||
}>
|
||||
>
|
||||
verbosity: z.ZodOptional<z.ZodEnum<['low', 'medium', 'high', 'xhigh', 'max']>>
|
||||
providerRouting: z.ZodOptional<typeof OpenRouterProviderRoutingSchema>
|
||||
}> = z.object({
|
||||
provider: LLMProviderSchema,
|
||||
model: z.string().optional(),
|
||||
@@ -99,6 +122,16 @@ export const LLMConfigSchema: z.ZodObject<{
|
||||
// ChatGPT Pro (Codex)
|
||||
reasoningEffort: z.enum(['none', 'low', 'medium', 'high']).optional(),
|
||||
reasoningSummary: z.enum(['auto', 'concise', 'detailed']).optional(),
|
||||
// Provider-specific reasoning controls.
|
||||
reasoning: z
|
||||
.object({
|
||||
enabled: z.boolean().optional(),
|
||||
maxTokens: z.number().optional(),
|
||||
effort: z.enum(['minimal', 'low', 'medium', 'high', 'xhigh']).optional(),
|
||||
})
|
||||
.optional(),
|
||||
verbosity: z.enum(['low', 'medium', 'high', 'xhigh', 'max']).optional(),
|
||||
providerRouting: OpenRouterProviderRoutingSchema.optional(),
|
||||
})
|
||||
|
||||
export type LLMConfig = z.infer<typeof LLMConfigSchema>
|
||||
|
||||
@@ -12,8 +12,8 @@ func init() {
|
||||
command := &cobra.Command{
|
||||
Use: "add <name> <path>",
|
||||
Aliases: []string{"register"},
|
||||
Annotations: map[string]string{"group": "Workspace:"},
|
||||
Short: "Register a Chromium checkout as a workspace",
|
||||
Annotations: map[string]string{"group": "Chromium Checkouts:"},
|
||||
Short: "Register a named Chromium checkout",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := ensureRepoConfigured(patchesRepo); err != nil {
|
||||
@@ -30,7 +30,7 @@ func init() {
|
||||
"workspace": entry,
|
||||
"patches_repo": appState.Config.PatchesRepo,
|
||||
}, func() {
|
||||
fmt.Println(ui.Success("Registered workspace"))
|
||||
fmt.Println(ui.Success("Registered Chromium checkout"))
|
||||
fmt.Printf("%s %s\n", ui.Muted("name:"), entry.Name)
|
||||
fmt.Printf("%s %s\n", ui.Muted("path:"), entry.Path)
|
||||
fmt.Printf("%s %s\n", ui.Muted("repo:"), appState.Config.PatchesRepo)
|
||||
|
||||
@@ -14,16 +14,19 @@ func init() {
|
||||
var changed string
|
||||
var rangeEnd string
|
||||
command := &cobra.Command{
|
||||
Use: "apply [workspace] [-- files...]",
|
||||
Use: "apply [checkout] [-- files...]",
|
||||
Annotations: map[string]string{"group": "Core:"},
|
||||
Short: "Apply repo patches to a workspace",
|
||||
Args: cobra.ArbitraryArgs,
|
||||
Short: "Apply repo patches to a checkout",
|
||||
Example: ` browseros-patch apply ch1
|
||||
browseros-patch apply ch1 -- chrome/browser/browser.cc
|
||||
browseros-patch apply --src /path/to/chromium/src`,
|
||||
Args: cobra.ArbitraryArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
positional, filters := splitWorkspaceAndFilters(cmd, args)
|
||||
if len(positional) > 1 {
|
||||
return fmt.Errorf("expected at most one workspace name")
|
||||
return fmt.Errorf("expected at most one checkout name")
|
||||
}
|
||||
ws, err := resolveWorkspace(positional, src)
|
||||
ws, err := resolveWorkspace(cmd, positional, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -58,7 +61,7 @@ func init() {
|
||||
})
|
||||
},
|
||||
}
|
||||
command.Flags().StringVar(&src, "src", "", "Chromium checkout path to operate on directly")
|
||||
command.Flags().StringVar(&src, "src", "", srcFlagUsage)
|
||||
command.Flags().BoolVar(&reset, "reset", false, "Reset patched files to BASE_COMMIT before applying")
|
||||
command.Flags().StringVar(&changed, "changed", "", "Apply only patches changed in the given repo commit")
|
||||
command.Flags().StringVar(&rangeEnd, "range-end", "", "End revision when using --changed as a range start")
|
||||
|
||||
@@ -10,16 +10,22 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const srcFlagUsage = "Chromium checkout path to operate on directly without registry lookup"
|
||||
|
||||
func repoInfo() (*repo.Info, error) {
|
||||
return appState.RepoInfo()
|
||||
}
|
||||
|
||||
func resolveWorkspace(positional []string, src string) (workspace.Entry, error) {
|
||||
func resolveWorkspace(cmd *cobra.Command, positional []string, src string) (workspace.Entry, error) {
|
||||
name := ""
|
||||
if len(positional) > 0 {
|
||||
name = positional[0]
|
||||
}
|
||||
return appState.ResolveWorkspace(name, src)
|
||||
commandPath := ""
|
||||
if cmd != nil {
|
||||
commandPath = cmd.CommandPath()
|
||||
}
|
||||
return workspace.ResolveForCommand(appState.Registry, name, appState.CWD, src, commandPath)
|
||||
}
|
||||
|
||||
func splitWorkspaceAndFilters(cmd *cobra.Command, args []string) ([]string, []string) {
|
||||
@@ -30,6 +36,24 @@ func splitWorkspaceAndFilters(cmd *cobra.Command, args []string) ([]string, []st
|
||||
return args[:atDash], args[atDash:]
|
||||
}
|
||||
|
||||
// llmTxtGuide returns a stable plain-text operating guide for coding agents.
|
||||
func llmTxtGuide() string {
|
||||
return `browseros-patch quick reference for coding agents
|
||||
|
||||
Terms:
|
||||
- patch repo: BrowserOS packages/browseros repo containing chromium_patches/.
|
||||
- Chromium checkout: local Chromium src tree registered with a checkout name like ch1.
|
||||
- checkout name: registry alias used by commands, for example ch1.
|
||||
- --src: operate on a Chromium checkout path directly without registry lookup.
|
||||
|
||||
Rules:
|
||||
- Checkout commands work from anywhere when passed a checkout name: browseros-patch diff ch1.
|
||||
- browseros-patch list reads only registered Chromium checkouts; it does not inspect sync state.
|
||||
- Use browseros-patch status ch1 or browseros-patch diff ch1 before mutating.
|
||||
- Mutating commands: browseros-patch sync ch1, browseros-patch apply ch1, browseros-patch extract ch1.
|
||||
`
|
||||
}
|
||||
|
||||
func ensureRepoConfigured(override string) error {
|
||||
if override == "" && appState.Config.PatchesRepo != "" {
|
||||
return nil
|
||||
|
||||
@@ -2,9 +2,14 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/patch/internal/app"
|
||||
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/patch/internal/workspace"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -41,3 +46,334 @@ func TestCommandProgressDisabledForJSON(t *testing.T) {
|
||||
t.Fatalf("expected nil progress reporter in JSON mode")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveWorkspaceErrorUsesCurrentCommandExample(t *testing.T) {
|
||||
oldAppState := appState
|
||||
t.Cleanup(func() {
|
||||
appState = oldAppState
|
||||
})
|
||||
|
||||
root := t.TempDir()
|
||||
registered := filepath.Join(root, "chromium-src")
|
||||
outside := filepath.Join(root, "outside")
|
||||
appState = &app.App{
|
||||
CWD: outside,
|
||||
Registry: &workspace.Registry{Version: 1, Workspaces: []workspace.Entry{
|
||||
{Name: "ch1", Path: registered},
|
||||
}},
|
||||
}
|
||||
|
||||
rootCmd := &cobra.Command{Use: "browseros-patch"}
|
||||
diffCmd := &cobra.Command{Use: "diff"}
|
||||
rootCmd.AddCommand(diffCmd)
|
||||
|
||||
_, err := resolveWorkspace(diffCmd, nil, "")
|
||||
if err == nil {
|
||||
t.Fatalf("expected error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), `browseros-patch diff ch1`) {
|
||||
t.Fatalf("expected command-specific example, got:\n%s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveWorkspaceNamedCheckoutIgnoresCWD(t *testing.T) {
|
||||
oldAppState := appState
|
||||
t.Cleanup(func() {
|
||||
appState = oldAppState
|
||||
})
|
||||
|
||||
root := t.TempDir()
|
||||
registered := filepath.Join(root, "chromium-src")
|
||||
outside := filepath.Join(root, "outside")
|
||||
appState = &app.App{
|
||||
CWD: outside,
|
||||
Registry: &workspace.Registry{Version: 1, Workspaces: []workspace.Entry{
|
||||
{Name: "ch1", Path: registered},
|
||||
}},
|
||||
}
|
||||
|
||||
rootCmd := &cobra.Command{Use: "browseros-patch"}
|
||||
diffCmd := &cobra.Command{Use: "diff"}
|
||||
rootCmd.AddCommand(diffCmd)
|
||||
|
||||
ws, err := resolveWorkspace(diffCmd, []string{"ch1"}, "")
|
||||
if err != nil {
|
||||
t.Fatalf("resolve named checkout: %v", err)
|
||||
}
|
||||
if ws.Path != registered {
|
||||
t.Fatalf("resolved path = %q, want %q", ws.Path, registered)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListReadsOnlyRegistry(t *testing.T) {
|
||||
oldAppState := appState
|
||||
oldJSONOut := jsonOut
|
||||
t.Cleanup(func() {
|
||||
appState = oldAppState
|
||||
jsonOut = oldJSONOut
|
||||
})
|
||||
|
||||
missingCheckout := filepath.Join(t.TempDir(), "missing-src")
|
||||
appState = &app.App{
|
||||
Registry: &workspace.Registry{Version: 1, Workspaces: []workspace.Entry{
|
||||
{Name: "ch1", Path: missingCheckout},
|
||||
}},
|
||||
}
|
||||
jsonOut = false
|
||||
|
||||
listCmd, _, err := rootCmd.Find([]string{"list"})
|
||||
if err != nil {
|
||||
t.Fatalf("find list: %v", err)
|
||||
}
|
||||
|
||||
var runErr error
|
||||
output := captureStdout(t, func() {
|
||||
runErr = listCmd.RunE(listCmd, nil)
|
||||
})
|
||||
if runErr != nil {
|
||||
t.Fatalf("list should not inspect checkout path: %v", runErr)
|
||||
}
|
||||
for _, want := range []string{"ch1", missingCheckout} {
|
||||
if !strings.Contains(output, want) {
|
||||
t.Fatalf("expected list output to contain %q, got:\n%s", want, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPublicHelpUsesCheckoutTerminology(t *testing.T) {
|
||||
help := rootCmd.Short + groupedHelp(rootCmd)
|
||||
for _, want := range []string{
|
||||
"Chromium checkouts",
|
||||
"Chromium Checkouts:",
|
||||
} {
|
||||
if !strings.Contains(help, want) {
|
||||
t.Fatalf("expected help to contain %q, got:\n%s", want, help)
|
||||
}
|
||||
}
|
||||
for _, forbidden := range []string{
|
||||
"Workspace-centric",
|
||||
"Workspace:",
|
||||
" workspace",
|
||||
" workspaces",
|
||||
} {
|
||||
if strings.Contains(help, forbidden) {
|
||||
t.Fatalf("expected help not to contain %q, got:\n%s", forbidden, help)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckoutCommandUsageTerminology(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
use string
|
||||
}{
|
||||
{name: "diff", use: "diff [checkout]"},
|
||||
{name: "status", use: "status [checkout]"},
|
||||
{name: "apply", use: "apply [checkout] [-- files...]"},
|
||||
{name: "sync", use: "sync [checkout]"},
|
||||
{name: "extract", use: "extract [checkout] [--range <start> <end>] [-- files...]"},
|
||||
} {
|
||||
cmd, _, err := rootCmd.Find([]string{tc.name})
|
||||
if err != nil {
|
||||
t.Fatalf("find %s: %v", tc.name, err)
|
||||
}
|
||||
if cmd.Use != tc.use {
|
||||
t.Fatalf("%s use = %q, want %q", tc.name, cmd.Use, tc.use)
|
||||
}
|
||||
if strings.Contains(strings.ToLower(cmd.Short), "workspace") {
|
||||
t.Fatalf("%s short should use checkout terminology: %q", tc.name, cmd.Short)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRootHelpExplainsPatchRepoAndCheckoutModel(t *testing.T) {
|
||||
for _, want := range []string{
|
||||
"patch repo",
|
||||
"chromium_patches/",
|
||||
"Chromium checkout",
|
||||
"ch1",
|
||||
} {
|
||||
if !strings.Contains(rootCmd.Long, want) {
|
||||
t.Fatalf("expected root long help to contain %q, got:\n%s", want, rootCmd.Long)
|
||||
}
|
||||
}
|
||||
|
||||
for _, want := range []string{
|
||||
"browseros-patch add ch1 /path/to/chromium/src",
|
||||
"browseros-patch list",
|
||||
"browseros-patch diff ch1",
|
||||
"browseros-patch sync ch1",
|
||||
"browseros-patch extract ch1",
|
||||
} {
|
||||
if !strings.Contains(rootCmd.Example, want) {
|
||||
t.Fatalf("expected root examples to contain %q, got:\n%s", want, rootCmd.Example)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckoutCommandExamplesUseNamedCheckout(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
example string
|
||||
}{
|
||||
{name: "diff", example: "browseros-patch diff ch1"},
|
||||
{name: "status", example: "browseros-patch status ch1"},
|
||||
{name: "apply", example: "browseros-patch apply ch1"},
|
||||
{name: "sync", example: "browseros-patch sync ch1"},
|
||||
{name: "extract", example: "browseros-patch extract ch1"},
|
||||
} {
|
||||
cmd, _, err := rootCmd.Find([]string{tc.name})
|
||||
if err != nil {
|
||||
t.Fatalf("find %s: %v", tc.name, err)
|
||||
}
|
||||
if !strings.Contains(cmd.Example, tc.example) {
|
||||
t.Fatalf("expected %s examples to contain %q, got:\n%s", tc.name, tc.example, cmd.Example)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSrcFlagExplainsDirectCheckoutPath(t *testing.T) {
|
||||
for _, name := range []string{"diff", "status", "apply", "sync", "extract"} {
|
||||
cmd, _, err := rootCmd.Find([]string{name})
|
||||
if err != nil {
|
||||
t.Fatalf("find %s: %v", name, err)
|
||||
}
|
||||
flag := cmd.Flags().Lookup("src")
|
||||
if flag == nil {
|
||||
t.Fatalf("%s missing --src flag", name)
|
||||
}
|
||||
if !strings.Contains(flag.Usage, "without registry lookup") {
|
||||
t.Fatalf("%s --src usage should explain registry bypass, got %q", name, flag.Usage)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLLMTxtGuideContent(t *testing.T) {
|
||||
text := llmTxtGuide()
|
||||
for _, want := range []string{
|
||||
"patch repo",
|
||||
"chromium_patches/",
|
||||
"Chromium checkout",
|
||||
"checkout name",
|
||||
"--src",
|
||||
"browseros-patch diff ch1",
|
||||
"browseros-patch list",
|
||||
"browseros-patch status ch1",
|
||||
"browseros-patch sync ch1",
|
||||
"browseros-patch apply ch1",
|
||||
"browseros-patch extract ch1",
|
||||
"list reads only registered Chromium checkouts",
|
||||
"does not inspect sync state",
|
||||
} {
|
||||
if !strings.Contains(text, want) {
|
||||
t.Fatalf("expected llm txt to contain %q, got:\n%s", want, text)
|
||||
}
|
||||
}
|
||||
if strings.Contains(text, "\x1b[") {
|
||||
t.Fatalf("llm txt should be uncolored, got:\n%s", text)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRootLLMTxtPrintsWithoutLoadingApp(t *testing.T) {
|
||||
oldAppState := appState
|
||||
oldLLMTxt := llmTxt
|
||||
t.Cleanup(func() {
|
||||
appState = oldAppState
|
||||
llmTxt = oldLLMTxt
|
||||
rootCmd.SetArgs(nil)
|
||||
rootCmd.SetOut(nil)
|
||||
rootCmd.SetErr(nil)
|
||||
})
|
||||
|
||||
appState = nil
|
||||
llmTxt = false
|
||||
var stdout bytes.Buffer
|
||||
rootCmd.SetArgs([]string{"--llm-txt"})
|
||||
rootCmd.SetOut(&stdout)
|
||||
rootCmd.SetErr(io.Discard)
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
t.Fatalf("execute --llm-txt: %v", err)
|
||||
}
|
||||
if appState != nil {
|
||||
t.Fatalf("--llm-txt should not load app state")
|
||||
}
|
||||
if !strings.Contains(stdout.String(), "browseros-patch diff ch1") {
|
||||
t.Fatalf("expected llm txt output, got:\n%s", stdout.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLLMTxtRejectedWithSubcommand(t *testing.T) {
|
||||
oldAppState := appState
|
||||
oldLLMTxt := llmTxt
|
||||
t.Cleanup(func() {
|
||||
appState = oldAppState
|
||||
llmTxt = oldLLMTxt
|
||||
rootCmd.SetArgs(nil)
|
||||
rootCmd.SetOut(nil)
|
||||
rootCmd.SetErr(nil)
|
||||
})
|
||||
|
||||
appState = nil
|
||||
llmTxt = false
|
||||
rootCmd.SetArgs([]string{"diff", "--llm-txt"})
|
||||
rootCmd.SetOut(io.Discard)
|
||||
rootCmd.SetErr(io.Discard)
|
||||
|
||||
err := rootCmd.Execute()
|
||||
if err == nil {
|
||||
t.Fatalf("expected --llm-txt subcommand error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "unknown flag: --llm-txt") {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if appState != nil {
|
||||
t.Fatalf("--llm-txt subcommand error should not load app state")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLLMTxtNotShownInSubcommandHelp(t *testing.T) {
|
||||
diffCmd, _, err := rootCmd.Find([]string{"diff"})
|
||||
if err != nil {
|
||||
t.Fatalf("find diff: %v", err)
|
||||
}
|
||||
|
||||
var help bytes.Buffer
|
||||
diffCmd.SetOut(&help)
|
||||
t.Cleanup(func() {
|
||||
diffCmd.SetOut(nil)
|
||||
})
|
||||
|
||||
if err := diffCmd.Help(); err != nil {
|
||||
t.Fatalf("diff help: %v", err)
|
||||
}
|
||||
if strings.Contains(help.String(), "--llm-txt") {
|
||||
t.Fatalf("subcommand help should not include root-only --llm-txt, got:\n%s", help.String())
|
||||
}
|
||||
}
|
||||
|
||||
func captureStdout(t *testing.T, fn func()) string {
|
||||
t.Helper()
|
||||
|
||||
oldStdout := os.Stdout
|
||||
reader, writer, err := os.Pipe()
|
||||
if err != nil {
|
||||
t.Fatalf("pipe stdout: %v", err)
|
||||
}
|
||||
os.Stdout = writer
|
||||
defer func() {
|
||||
os.Stdout = oldStdout
|
||||
}()
|
||||
|
||||
fn()
|
||||
os.Stdout = oldStdout
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatalf("close stdout writer: %v", err)
|
||||
}
|
||||
output, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
t.Fatalf("read stdout: %v", err)
|
||||
}
|
||||
return string(output)
|
||||
}
|
||||
|
||||
@@ -12,12 +12,14 @@ import (
|
||||
func init() {
|
||||
var src string
|
||||
command := &cobra.Command{
|
||||
Use: "diff [workspace]",
|
||||
Use: "diff [checkout]",
|
||||
Annotations: map[string]string{"group": "Core:"},
|
||||
Short: "Preview patch differences for a workspace",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
Short: "Preview patch differences for a checkout",
|
||||
Example: ` browseros-patch diff ch1
|
||||
browseros-patch diff --src /path/to/chromium/src`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ws, err := resolveWorkspace(args, src)
|
||||
ws, err := resolveWorkspace(cmd, args, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -41,7 +43,7 @@ func init() {
|
||||
})
|
||||
},
|
||||
}
|
||||
command.Flags().StringVar(&src, "src", "", "Chromium checkout path to operate on directly")
|
||||
command.Flags().StringVar(&src, "src", "", srcFlagUsage)
|
||||
rootCmd.AddCommand(command)
|
||||
}
|
||||
|
||||
|
||||
@@ -15,10 +15,13 @@ func init() {
|
||||
var squash bool
|
||||
var base string
|
||||
command := &cobra.Command{
|
||||
Use: "extract [workspace] [--range <start> <end>] [-- files...]",
|
||||
Use: "extract [checkout] [--range <start> <end>] [-- files...]",
|
||||
Annotations: map[string]string{"group": "Core:"},
|
||||
Short: "Extract workspace changes back to chromium_patches",
|
||||
Args: cobra.ArbitraryArgs,
|
||||
Short: "Extract checkout changes back to chromium_patches",
|
||||
Example: ` browseros-patch extract ch1
|
||||
browseros-patch extract ch1 --range HEAD~2 HEAD
|
||||
browseros-patch extract --src /path/to/chromium/src`,
|
||||
Args: cobra.ArbitraryArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
positional, filters := splitWorkspaceAndFilters(cmd, args)
|
||||
workspaceArgs := positional
|
||||
@@ -26,16 +29,16 @@ func init() {
|
||||
rangeEnd := ""
|
||||
if rangeMode {
|
||||
if len(positional) < 2 || len(positional) > 3 {
|
||||
return fmt.Errorf(`range mode expects "browseros-patch extract [workspace] --range <start> <end>"`)
|
||||
return fmt.Errorf(`range mode expects "browseros-patch extract [checkout] --range <start> <end>"`)
|
||||
}
|
||||
rangeStart = positional[len(positional)-2]
|
||||
rangeEnd = positional[len(positional)-1]
|
||||
workspaceArgs = positional[:len(positional)-2]
|
||||
}
|
||||
if len(workspaceArgs) > 1 {
|
||||
return fmt.Errorf("expected at most one workspace name")
|
||||
return fmt.Errorf("expected at most one checkout name")
|
||||
}
|
||||
ws, err := resolveWorkspace(workspaceArgs, src)
|
||||
ws, err := resolveWorkspace(cmd, workspaceArgs, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -65,7 +68,7 @@ func init() {
|
||||
})
|
||||
},
|
||||
}
|
||||
command.Flags().StringVar(&src, "src", "", "Chromium checkout path to operate on directly")
|
||||
command.Flags().StringVar(&src, "src", "", srcFlagUsage)
|
||||
command.Flags().StringVar(&commit, "commit", "", "Extract from a single commit")
|
||||
command.Flags().BoolVar(&rangeMode, "range", false, "Extract from a commit range")
|
||||
command.Flags().BoolVar(&squash, "squash", false, "Squash a range into a cumulative diff")
|
||||
|
||||
@@ -11,13 +11,14 @@ func init() {
|
||||
command := &cobra.Command{
|
||||
Use: "list",
|
||||
Aliases: []string{"ls"},
|
||||
Annotations: map[string]string{"group": "Workspace:"},
|
||||
Short: "List registered workspaces",
|
||||
Annotations: map[string]string{"group": "Chromium Checkouts:"},
|
||||
Short: "List registered Chromium checkouts",
|
||||
Example: ` browseros-patch list`,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(appState.Registry.Workspaces) == 0 {
|
||||
return renderResult(map[string]any{"workspaces": []any{}}, func() {
|
||||
fmt.Println("No workspaces registered. Run `browseros-patch add <name> <path>`.")
|
||||
fmt.Println("No Chromium checkouts registered. Run `browseros-patch add <name> <path>`.")
|
||||
})
|
||||
}
|
||||
rows := make([][]string, 0, len(appState.Registry.Workspaces))
|
||||
|
||||
@@ -11,8 +11,8 @@ func init() {
|
||||
command := &cobra.Command{
|
||||
Use: "remove <name>",
|
||||
Aliases: []string{"rm"},
|
||||
Annotations: map[string]string{"group": "Workspace:"},
|
||||
Short: "Unregister a workspace",
|
||||
Annotations: map[string]string{"group": "Chromium Checkouts:"},
|
||||
Short: "Unregister a Chromium checkout",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
entry, err := appState.Registry.Remove(args[0])
|
||||
@@ -23,7 +23,7 @@ func init() {
|
||||
return err
|
||||
}
|
||||
return renderResult(map[string]any{"workspace": entry}, func() {
|
||||
fmt.Println(ui.Success("Removed workspace"))
|
||||
fmt.Println(ui.Success("Removed Chromium checkout"))
|
||||
fmt.Printf("%s %s\n", ui.Muted("name:"), entry.Name)
|
||||
fmt.Printf("%s %s\n", ui.Muted("path:"), entry.Path)
|
||||
})
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user