mirror of
https://github.com/browseros-ai/BrowserOS.git
synced 2026-05-13 23:53:25 +00:00
Compare commits
23 Commits
fix/patch-
...
polecat/fl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1c93a05fd6 | ||
|
|
1af0378d98 | ||
|
|
d7e1125db3 | ||
|
|
8b6483a633 | ||
|
|
f54eff4543 | ||
|
|
f1ebfa5232 | ||
|
|
b89ea201fa | ||
|
|
4e405681a7 | ||
|
|
b445615d61 | ||
|
|
d68e8905fe | ||
|
|
e89fccd997 | ||
|
|
805ae8e607 | ||
|
|
833baec84d | ||
|
|
7a2a8e09bc | ||
|
|
6f8da5b7fb | ||
|
|
50cbe48558 | ||
|
|
d81b99c8e3 | ||
|
|
86cb03a1fc | ||
|
|
7765d99c73 | ||
|
|
db5e55a174 | ||
|
|
fbae45eb97 | ||
|
|
554fcd7c06 | ||
|
|
eed158eca0 |
@@ -0,0 +1,40 @@
|
||||
import { cloudSyncSignInLinks } from '@/lib/constants/productUrls'
|
||||
import { cn } from '@/lib/utils'
|
||||
|
||||
interface CloudSyncDisclosureProps {
|
||||
className?: string
|
||||
}
|
||||
|
||||
export function CloudSyncDisclosure({ className }: CloudSyncDisclosureProps) {
|
||||
const [termsLink, privacyLink, cloudSyncLink] = cloudSyncSignInLinks
|
||||
|
||||
return (
|
||||
<p
|
||||
className={cn(
|
||||
'text-center text-muted-foreground text-xs leading-relaxed',
|
||||
className,
|
||||
)}
|
||||
>
|
||||
By signing in, you agree to the <DisclosureLink link={termsLink} /> and
|
||||
acknowledge the <DisclosureLink link={privacyLink} />.{' '}
|
||||
<DisclosureLink link={cloudSyncLink} />.
|
||||
</p>
|
||||
)
|
||||
}
|
||||
|
||||
function DisclosureLink({
|
||||
link,
|
||||
}: {
|
||||
link: (typeof cloudSyncSignInLinks)[number]
|
||||
}) {
|
||||
return (
|
||||
<a
|
||||
href={link.url}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="font-medium underline underline-offset-2 hover:text-foreground"
|
||||
>
|
||||
{link.label}
|
||||
</a>
|
||||
)
|
||||
}
|
||||
@@ -80,6 +80,11 @@ const primarySettingsSections: NavSection[] = [
|
||||
icon: Palette,
|
||||
feature: Feature.CUSTOMIZATION_SUPPORT,
|
||||
},
|
||||
{
|
||||
name: 'Reset Data',
|
||||
to: '/settings/reset-data',
|
||||
icon: RotateCcw,
|
||||
},
|
||||
{
|
||||
name: 'Tool Approvals',
|
||||
to: '/settings/approvals',
|
||||
|
||||
@@ -30,6 +30,7 @@ import { MagicLinkCallback } from './login/MagicLinkCallback'
|
||||
import { MCPSettingsPage } from './mcp-settings/MCPSettingsPage'
|
||||
import { MemoryPage } from './memory/MemoryPage'
|
||||
import { ProfilePage } from './profile/ProfilePage'
|
||||
import { ResetDataPage } from './reset-data/ResetDataPage'
|
||||
import { ScheduledTasksPage } from './scheduled-tasks/ScheduledTasksPage'
|
||||
import { SearchProviderPage } from './search-provider/SearchProviderPage'
|
||||
import { SkillsPage } from './skills/SkillsPage'
|
||||
@@ -143,6 +144,7 @@ export const App: FC = () => {
|
||||
<Route path="chat" element={<LlmHubPage />} />
|
||||
<Route path="mcp" element={<MCPSettingsPage />} />
|
||||
<Route path="customization" element={<CustomizationPage />} />
|
||||
<Route path="reset-data" element={<ResetDataPage />} />
|
||||
<Route path="search" element={<SearchProviderPage />} />
|
||||
<Route path="survey" element={<SurveyPage {...surveyParams} />} />
|
||||
<Route path="usage" element={<UsagePage />} />
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { ArrowLeft } from 'lucide-react'
|
||||
import { type FC, useEffect, useMemo, useRef } from 'react'
|
||||
import { ArrowLeft, PanelRight } from 'lucide-react'
|
||||
import { type FC, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { Navigate, useNavigate, useParams, useSearchParams } from 'react-router'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import type {
|
||||
@@ -16,8 +16,14 @@ import {
|
||||
useUpdateHarnessAgent,
|
||||
} from '@/entrypoints/app/agents/useAgents'
|
||||
import type { AgentEntry } from '@/entrypoints/app/agents/useOpenClaw'
|
||||
import { type ProducedFilesRailGroup, useAgentOutputs } from '@/lib/agent-files'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { AgentRail } from './AgentRail'
|
||||
import { useAgentCommandData } from './agent-command-layout'
|
||||
import {
|
||||
OutputsRail,
|
||||
useOutputsRailOpen,
|
||||
} from './agent-conversation.outputs-rail'
|
||||
import { ClawChat } from './ClawChat'
|
||||
import { ConversationHeader } from './ConversationHeader'
|
||||
import { ConversationInput } from './ConversationInput'
|
||||
@@ -25,6 +31,8 @@ import {
|
||||
buildChatHistoryFromClawMessages,
|
||||
filterTurnsPersistedInHistory,
|
||||
flattenHistoryPages,
|
||||
mapHistoryToProducedFilesGroups,
|
||||
selectStripOnlyTurns,
|
||||
} from './claw-chat-types'
|
||||
import { consumePendingInitialMessage } from './pending-initial-message'
|
||||
import { QueuePanel } from './QueuePanel'
|
||||
@@ -38,6 +46,7 @@ function AgentConversationController({
|
||||
agents,
|
||||
agentPathPrefix,
|
||||
createAgentPath,
|
||||
onOpenOutputsRail,
|
||||
}: {
|
||||
agentId: string
|
||||
initialMessage: string | null
|
||||
@@ -45,6 +54,7 @@ function AgentConversationController({
|
||||
agents: AgentEntry[]
|
||||
agentPathPrefix: string
|
||||
createAgentPath: string
|
||||
onOpenOutputsRail?: ((turnId?: string | null) => void) | null
|
||||
}) {
|
||||
const navigate = useNavigate()
|
||||
const initialMessageSentRef = useRef<string | null>(null)
|
||||
@@ -76,6 +86,15 @@ function AgentConversationController({
|
||||
const harnessAgent = harnessAgents.find((entry) => entry.id === agentId)
|
||||
const queue = harnessAgent?.queue ?? []
|
||||
const activeTurnId = harnessAgent?.activeTurnId ?? null
|
||||
const isOpenClawAgent = harnessAgent?.adapter === 'openclaw'
|
||||
|
||||
// Used to surface produced-files strips on a fresh page load
|
||||
// when there's no optimistic turn to carry the data. Disabled
|
||||
// for non-openclaw adapters since they don't attribute files.
|
||||
const { groups: agentOutputGroups } = useAgentOutputs(
|
||||
agentId,
|
||||
isOpenClawAgent,
|
||||
)
|
||||
|
||||
const { turns, streaming, send } = useAgentConversation(agentId, {
|
||||
runtime: 'agent-harness',
|
||||
@@ -100,6 +119,44 @@ function AgentConversationController({
|
||||
() => filterTurnsPersistedInHistory(turns, historyMessages),
|
||||
[historyMessages, turns],
|
||||
)
|
||||
// Persisted turns that still need to surface their FileCardStrip
|
||||
// — history items don't carry produced-files data, so without
|
||||
// these the strip would vanish on history reload.
|
||||
const stripOnlyTurns = useMemo(
|
||||
() => selectStripOnlyTurns(turns, historyMessages),
|
||||
[historyMessages, turns],
|
||||
)
|
||||
// Two outputs from the per-turn matcher:
|
||||
// - filesByAssistantId → strip rendered directly under the
|
||||
// matching assistant history bubble.
|
||||
// - tailUnmatched → groups with no history pair (orphans);
|
||||
// rendered at the conversation tail.
|
||||
// Both are filtered to exclude turnIds already covered by a
|
||||
// live or strip-only optimistic turn (those carry their own
|
||||
// strip and history hasn't reloaded yet).
|
||||
const { filesByAssistantId, tailStripGroups } = useMemo(() => {
|
||||
if (!isOpenClawAgent) {
|
||||
return {
|
||||
filesByAssistantId: new Map<string, ProducedFilesRailGroup>(),
|
||||
tailStripGroups: [] as ProducedFilesRailGroup[],
|
||||
}
|
||||
}
|
||||
const coveredTurnIds = new Set<string>()
|
||||
for (const turn of turns) {
|
||||
if (turn.turnId) coveredTurnIds.add(turn.turnId)
|
||||
}
|
||||
const eligibleGroups = agentOutputGroups.filter(
|
||||
(group) => !coveredTurnIds.has(group.turnId),
|
||||
)
|
||||
const { byAssistantMessageId, unmatched } = mapHistoryToProducedFilesGroups(
|
||||
historyMessages,
|
||||
eligibleGroups,
|
||||
)
|
||||
return {
|
||||
filesByAssistantId: byAssistantMessageId,
|
||||
tailStripGroups: unmatched,
|
||||
}
|
||||
}, [agentOutputGroups, isOpenClawAgent, historyMessages, turns])
|
||||
onInitialMessageConsumedRef.current = onInitialMessageConsumed
|
||||
|
||||
const disabled = !agent
|
||||
@@ -171,12 +228,16 @@ function AgentConversationController({
|
||||
agentName={agentName}
|
||||
historyMessages={historyMessages}
|
||||
turns={visibleTurns}
|
||||
stripOnlyTurns={stripOnlyTurns}
|
||||
filesByAssistantId={filesByAssistantId}
|
||||
tailStripGroups={tailStripGroups}
|
||||
streaming={streaming}
|
||||
isInitialLoading={harnessHistoryQuery.isLoading}
|
||||
error={error}
|
||||
hasNextPage={false}
|
||||
isFetchingNextPage={false}
|
||||
onFetchNextPage={() => {}}
|
||||
onOpenOutputsRail={onOpenOutputsRail}
|
||||
onRetry={() => {
|
||||
void harnessHistoryQuery.refetch()
|
||||
}}
|
||||
@@ -287,6 +348,45 @@ export const AgentCommandConversation: FC<AgentCommandConversationProps> = ({
|
||||
const isPageVariant = variant === 'page'
|
||||
const backLabel = isPageVariant ? 'Back to agents' : 'Back to home'
|
||||
|
||||
const isOpenClawAgent = harnessAgent?.adapter === 'openclaw'
|
||||
const [outputsRailOpen, setOutputsRailOpen] =
|
||||
useOutputsRailOpen(resolvedAgentId)
|
||||
const railVisible = isOpenClawAgent && outputsRailOpen
|
||||
|
||||
// Deep-link target for the rail. Set when (a) the user clicks
|
||||
// View / +N on an inline file-card strip, or (b) an external nav
|
||||
// arrived with `?outputsTurn=<turnId>`. Cleared by the rail
|
||||
// itself once it has scrolled to + expanded the matching group.
|
||||
const urlOutputsTurn = searchParams.get('outputsTurn')
|
||||
const [focusTurnId, setFocusTurnId] = useState<string | null>(urlOutputsTurn)
|
||||
// If the URL param flips while we're already on this agent, sync.
|
||||
useEffect(() => {
|
||||
if (!urlOutputsTurn) return
|
||||
setFocusTurnId(urlOutputsTurn)
|
||||
if (isOpenClawAgent) setOutputsRailOpen(true)
|
||||
}, [urlOutputsTurn, isOpenClawAgent, setOutputsRailOpen])
|
||||
|
||||
const handleOpenOutputsRail = (turnId?: string | null) => {
|
||||
if (!isOpenClawAgent) return
|
||||
setOutputsRailOpen(true)
|
||||
setFocusTurnId(turnId ?? null)
|
||||
}
|
||||
const handleFocusTurnConsumed = () => {
|
||||
setFocusTurnId(null)
|
||||
if (urlOutputsTurn) {
|
||||
// Drop the URL param so a back-nav doesn't re-trigger the
|
||||
// scroll. `replace: true` keeps history clean.
|
||||
setSearchParams(
|
||||
(prev) => {
|
||||
const next = new URLSearchParams(prev)
|
||||
next.delete('outputsTurn')
|
||||
return next
|
||||
},
|
||||
{ replace: true },
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const adapterHealth = useMemo<AgentAdapterHealth | null>(() => {
|
||||
const adapterId = harnessAgent?.adapter
|
||||
if (!adapterId) return null
|
||||
@@ -346,13 +446,34 @@ export const AgentCommandConversation: FC<AgentCommandConversationProps> = ({
|
||||
onPinToggle={(next) =>
|
||||
handlePinToggle(harnessAgent ?? null, next)
|
||||
}
|
||||
headerExtra={
|
||||
isOpenClawAgent ? (
|
||||
<Button
|
||||
variant={railVisible ? 'secondary' : 'ghost'}
|
||||
size="icon"
|
||||
className="size-8 rounded-xl"
|
||||
onClick={() => setOutputsRailOpen(!railVisible)}
|
||||
title={railVisible ? 'Hide outputs' : 'Show outputs'}
|
||||
>
|
||||
<PanelRight className="size-4" />
|
||||
</Button>
|
||||
) : undefined
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Body grid: rail list + chat. Both columns share the same
|
||||
top edge (the band above) so headers can never drift. */}
|
||||
<div className="grid min-h-0 flex-1 grid-rows-[minmax(0,1fr)] lg:grid-cols-[288px_minmax(0,1fr)]">
|
||||
{/* Body grid: rail list + chat (+ outputs rail when an
|
||||
openclaw agent has it open). Columns share the same top
|
||||
edge as the band above so headers can never drift. */}
|
||||
<div
|
||||
className={cn(
|
||||
'grid min-h-0 flex-1 grid-rows-[minmax(0,1fr)]',
|
||||
railVisible
|
||||
? 'lg:grid-cols-[288px_minmax(0,1fr)_320px]'
|
||||
: 'lg:grid-cols-[288px_minmax(0,1fr)]',
|
||||
)}
|
||||
>
|
||||
<AgentRail
|
||||
agents={harnessAgents}
|
||||
adapters={adapters}
|
||||
@@ -367,13 +488,34 @@ export const AgentCommandConversation: FC<AgentCommandConversationProps> = ({
|
||||
agentId={resolvedAgentId}
|
||||
agents={agents}
|
||||
initialMessage={initialMessage}
|
||||
onInitialMessageConsumed={() =>
|
||||
setSearchParams({}, { replace: true })
|
||||
}
|
||||
onInitialMessageConsumed={() => {
|
||||
// Preserve the outputsTurn deep-link if present —
|
||||
// dropping all params would erase the rail focus
|
||||
// before it had a chance to consume.
|
||||
setSearchParams(
|
||||
(prev) => {
|
||||
const next = new URLSearchParams()
|
||||
const turn = prev.get('outputsTurn')
|
||||
if (turn) next.set('outputsTurn', turn)
|
||||
return next
|
||||
},
|
||||
{ replace: true },
|
||||
)
|
||||
}}
|
||||
agentPathPrefix={agentPathPrefix}
|
||||
createAgentPath={createAgentPath}
|
||||
onOpenOutputsRail={isOpenClawAgent ? handleOpenOutputsRail : null}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{railVisible ? (
|
||||
<OutputsRail
|
||||
agentId={resolvedAgentId}
|
||||
onClose={() => setOutputsRailOpen(false)}
|
||||
focusTurnId={focusTurnId}
|
||||
onFocusTurnConsumed={handleFocusTurnConsumed}
|
||||
/>
|
||||
) : null}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -162,12 +162,16 @@ export const AgentCommandHome: FC = () => {
|
||||
<>
|
||||
<div className="flex flex-col items-center gap-5 pt-[max(10vh,24px)] text-center">
|
||||
<div className="space-y-3">
|
||||
<h1 className="font-semibold text-[clamp(2rem,4vw,3.25rem)] leading-tight tracking-tight">
|
||||
What should your agent work on next?
|
||||
<h1 className="font-semibold text-[clamp(2.25rem,4.5vw,3.5rem)] leading-[1.08] tracking-[-0.025em] [text-wrap:balance]">
|
||||
What should your agent{' '}
|
||||
<span className="font-medium text-[var(--accent-orange)] italic">
|
||||
work on
|
||||
</span>{' '}
|
||||
next?
|
||||
</h1>
|
||||
<p className="mx-auto max-w-2xl text-muted-foreground text-sm leading-6">
|
||||
Start with a task, continue a thread, or switch to another
|
||||
agent without leaving the new tab.
|
||||
<p className="mx-auto max-w-2xl text-muted-foreground text-sm leading-6 [text-wrap:pretty]">
|
||||
Start a task, continue a thread, or hand off to a different
|
||||
agent — all without leaving this tab.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -27,6 +27,14 @@ interface AgentSelectorProps {
|
||||
onSelectAgent: (agent: AgentEntry) => void
|
||||
onCreateAgent?: () => void
|
||||
status?: string
|
||||
/**
|
||||
* `'pill'` renders the filled-pill variant used by the calm
|
||||
* composer on `/home` — bordered, slightly elevated background,
|
||||
* mono agent name, used as the visual anchor on the left of the
|
||||
* footer chip row. Default `'ghost'` keeps the existing flat
|
||||
* shadcn ghost-button trigger used by the chat surface.
|
||||
*/
|
||||
triggerVariant?: 'ghost' | 'pill'
|
||||
}
|
||||
|
||||
function getStatusDot(status?: string) {
|
||||
@@ -42,31 +50,49 @@ export const AgentSelector: FC<AgentSelectorProps> = ({
|
||||
onSelectAgent,
|
||||
onCreateAgent,
|
||||
status,
|
||||
triggerVariant = 'ghost',
|
||||
}) => {
|
||||
const [open, setOpen] = useState(false)
|
||||
const selectedAgent = agents.find(
|
||||
(agent) => agent.agentId === selectedAgentId,
|
||||
)
|
||||
|
||||
const triggerNode =
|
||||
triggerVariant === 'pill' ? (
|
||||
<button
|
||||
type="button"
|
||||
className={cn(
|
||||
'inline-flex h-6 max-w-[180px] items-center gap-1.5 rounded-full border border-border bg-accent/40 pr-2 pl-2.5 text-[11.5px] text-foreground transition-colors',
|
||||
'hover:border-border hover:bg-accent/70 data-[state=open]:border-border data-[state=open]:bg-accent/70',
|
||||
)}
|
||||
>
|
||||
<span className={cn('size-1.5 rounded-full', getStatusDot(status))} />
|
||||
<span className="truncate font-medium font-mono text-[11.5px] tracking-[-0.01em]">
|
||||
{selectedAgent?.name ?? 'Select agent'}
|
||||
</span>
|
||||
<ChevronDown className="size-3 shrink-0 text-muted-foreground" />
|
||||
</button>
|
||||
) : (
|
||||
<Button
|
||||
variant="ghost"
|
||||
className={cn(
|
||||
'flex items-center gap-2 rounded-lg px-3 py-1.5 font-medium text-sm transition-all',
|
||||
'bg-transparent text-muted-foreground hover:bg-accent hover:text-accent-foreground',
|
||||
'data-[state=open]:bg-accent',
|
||||
)}
|
||||
>
|
||||
<Bot className="h-4 w-4" />
|
||||
<span className={cn('size-2 rounded-full', getStatusDot(status))} />
|
||||
<span className="max-w-32 truncate">
|
||||
{selectedAgent?.name ?? 'Select agent'}
|
||||
</span>
|
||||
<ChevronDown className="h-3 w-3" />
|
||||
</Button>
|
||||
)
|
||||
|
||||
return (
|
||||
<Popover open={open} onOpenChange={setOpen}>
|
||||
<PopoverTrigger asChild>
|
||||
<Button
|
||||
variant="ghost"
|
||||
className={cn(
|
||||
'flex items-center gap-2 rounded-lg px-3 py-1.5 font-medium text-sm transition-all',
|
||||
'bg-transparent text-muted-foreground hover:bg-accent hover:text-accent-foreground',
|
||||
'data-[state=open]:bg-accent',
|
||||
)}
|
||||
>
|
||||
<Bot className="h-4 w-4" />
|
||||
<span className={cn('size-2 rounded-full', getStatusDot(status))} />
|
||||
<span className="max-w-32 truncate">
|
||||
{selectedAgent?.name ?? 'Select agent'}
|
||||
</span>
|
||||
<ChevronDown className="h-3 w-3" />
|
||||
</Button>
|
||||
</PopoverTrigger>
|
||||
<PopoverTrigger asChild>{triggerNode}</PopoverTrigger>
|
||||
<PopoverContent side="bottom" align="start" className="w-72 p-0">
|
||||
<Command>
|
||||
<CommandInput placeholder="Search agents..." className="h-9" />
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
import { Bot, Loader2, RefreshCw } from 'lucide-react'
|
||||
import { type FC, useEffect, useRef } from 'react'
|
||||
import { type FC, Fragment, useEffect, useRef } from 'react'
|
||||
import {
|
||||
Conversation,
|
||||
ConversationContent,
|
||||
ConversationScrollButton,
|
||||
} from '@/components/ai-elements/conversation'
|
||||
import type { AgentConversationTurn } from '@/lib/agent-conversations/types'
|
||||
import type { ProducedFilesRailGroup } from '@/lib/agent-files'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { FileCardStrip } from './agent-conversation.file-card-strip'
|
||||
import { ClawChatMessage } from './ClawChatMessage'
|
||||
import { ConversationMessage } from './ConversationMessage'
|
||||
import type { ClawChatMessage as ClawChatMessageModel } from './claw-chat-types'
|
||||
@@ -15,6 +17,29 @@ interface ClawChatProps {
|
||||
agentName: string
|
||||
historyMessages: ClawChatMessageModel[]
|
||||
turns: AgentConversationTurn[]
|
||||
/**
|
||||
* Persisted turns that still need to render their FileCardStrip
|
||||
* because the history items they were filtered against don't
|
||||
* carry produced-files data. Rendered between history and the
|
||||
* live `turns` so the strip lands at the bottom of the
|
||||
* corresponding assistant turn.
|
||||
*/
|
||||
stripOnlyTurns?: AgentConversationTurn[]
|
||||
/**
|
||||
* Maps each assistant history message id → the produced-files
|
||||
* group that came from its turn. Built by
|
||||
* `mapHistoryToProducedFilesGroups` upstream so the strip
|
||||
* renders directly under the matching message instead of
|
||||
* stacking at the conversation tail.
|
||||
*/
|
||||
filesByAssistantId?: Map<string, ProducedFilesRailGroup>
|
||||
/**
|
||||
* Produced-files groups that didn't match any persisted history
|
||||
* pair (e.g. orphaned turns where history loaded after the
|
||||
* group was attributed). Rendered at the conversation tail as
|
||||
* a fallback so the user can still see them.
|
||||
*/
|
||||
tailStripGroups?: ReadonlyArray<ProducedFilesRailGroup>
|
||||
streaming: boolean
|
||||
isInitialLoading: boolean
|
||||
error: Error | null
|
||||
@@ -22,6 +47,8 @@ interface ClawChatProps {
|
||||
isFetchingNextPage: boolean
|
||||
onFetchNextPage: () => void
|
||||
onRetry: () => void
|
||||
/** Wired through to the inline file-card strip on each assistant turn. */
|
||||
onOpenOutputsRail?: ((turnId?: string | null) => void) | null
|
||||
className?: string
|
||||
}
|
||||
|
||||
@@ -78,6 +105,9 @@ export const ClawChat: FC<ClawChatProps> = ({
|
||||
agentName,
|
||||
historyMessages,
|
||||
turns,
|
||||
stripOnlyTurns,
|
||||
filesByAssistantId,
|
||||
tailStripGroups,
|
||||
streaming,
|
||||
isInitialLoading,
|
||||
error,
|
||||
@@ -85,6 +115,7 @@ export const ClawChat: FC<ClawChatProps> = ({
|
||||
isFetchingNextPage,
|
||||
onFetchNextPage,
|
||||
onRetry,
|
||||
onOpenOutputsRail,
|
||||
className,
|
||||
}) => {
|
||||
const topSentinelRef = useRef<HTMLDivElement>(null)
|
||||
@@ -147,14 +178,44 @@ export const ClawChat: FC<ClawChatProps> = ({
|
||||
Start of conversation
|
||||
</div>
|
||||
) : null}
|
||||
{historyMessages.map((message) => (
|
||||
<ClawChatMessage key={message.id} message={message} />
|
||||
{historyMessages.map((message) => {
|
||||
const matched = filesByAssistantId?.get(message.id)
|
||||
return (
|
||||
<Fragment key={message.id}>
|
||||
<ClawChatMessage message={message} />
|
||||
{matched ? (
|
||||
<FileCardStrip
|
||||
turnId={matched.turnId}
|
||||
files={matched.files}
|
||||
onOpenRail={onOpenOutputsRail ?? (() => {})}
|
||||
/>
|
||||
) : null}
|
||||
</Fragment>
|
||||
)
|
||||
})}
|
||||
{(tailStripGroups ?? []).map((group) => (
|
||||
<FileCardStrip
|
||||
key={`tail-strip-${group.turnId}`}
|
||||
turnId={group.turnId}
|
||||
files={group.files}
|
||||
onOpenRail={onOpenOutputsRail ?? (() => {})}
|
||||
/>
|
||||
))}
|
||||
{(stripOnlyTurns ?? []).map((turn) => (
|
||||
<ConversationMessage
|
||||
key={`strip-${turn.id}`}
|
||||
turn={turn}
|
||||
streaming={false}
|
||||
stripOnly
|
||||
onOpenOutputsRail={onOpenOutputsRail}
|
||||
/>
|
||||
))}
|
||||
{turns.map((turn, index) => (
|
||||
<ConversationMessage
|
||||
key={turn.id}
|
||||
turn={turn}
|
||||
streaming={streaming && index === turns.length - 1}
|
||||
onOpenOutputsRail={onOpenOutputsRail}
|
||||
/>
|
||||
))}
|
||||
{error ? (
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { ArrowLeft, Home } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
import type { FC, ReactNode } from 'react'
|
||||
import { Badge } from '@/components/ui/badge'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { formatRelativeTime } from '@/entrypoints/app/agents/agent-display.helpers'
|
||||
@@ -14,12 +14,14 @@ import { cn } from '@/lib/utils'
|
||||
interface ConversationHeaderProps {
|
||||
agent: HarnessAgent | null
|
||||
fallbackName: string
|
||||
fallbackAdapter: 'claude' | 'codex' | 'openclaw' | 'unknown'
|
||||
fallbackAdapter: 'claude' | 'codex' | 'openclaw' | 'hermes' | 'unknown'
|
||||
adapterHealth: AgentAdapterHealth | null
|
||||
backLabel: string
|
||||
backTarget: 'home' | 'page'
|
||||
onGoHome: () => void
|
||||
onPinToggle: (next: boolean) => void
|
||||
/** Optional trailing slot — currently used for the Outputs rail toggle. */
|
||||
headerExtra?: ReactNode
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -40,6 +42,7 @@ export const ConversationHeader: FC<ConversationHeaderProps> = ({
|
||||
backTarget,
|
||||
onGoHome,
|
||||
onPinToggle,
|
||||
headerExtra,
|
||||
}) => {
|
||||
const BackIcon = backTarget === 'home' ? Home : ArrowLeft
|
||||
const adapter = agent?.adapter ?? fallbackAdapter
|
||||
@@ -90,16 +93,21 @@ export const ConversationHeader: FC<ConversationHeaderProps> = ({
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex shrink-0 flex-col items-end gap-1">
|
||||
<StatusPill
|
||||
status={status}
|
||||
hasActiveTurn={Boolean(agent?.activeTurnId)}
|
||||
/>
|
||||
<div className="flex h-4 items-center text-[11px] text-muted-foreground">
|
||||
<span className="truncate">
|
||||
{metaParts.length > 0 ? metaParts.join(' · ') : '\u00A0'}
|
||||
</span>
|
||||
<div className="flex shrink-0 items-center gap-3">
|
||||
<div className="flex shrink-0 flex-col items-end gap-1">
|
||||
<StatusPill
|
||||
status={status}
|
||||
hasActiveTurn={Boolean(agent?.activeTurnId)}
|
||||
/>
|
||||
<div className="flex h-4 items-center text-[11px] text-muted-foreground">
|
||||
<span className="truncate">
|
||||
{metaParts.length > 0 ? metaParts.join(' · ') : '\u00A0'}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
{headerExtra ? (
|
||||
<div className="flex shrink-0 items-center">{headerExtra}</div>
|
||||
) : null}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -164,7 +164,16 @@ function VoiceButton({
|
||||
)
|
||||
}
|
||||
|
||||
function ContextControls({
|
||||
/**
|
||||
* Calm-composer footer shared by both `/home` (`variant="home"`) and
|
||||
* the chat surface at `/agents/:agentId` (`variant="conversation"`).
|
||||
* Pill-shaped chips on an internal dashed divider, with a right-
|
||||
* aligned keyboard hint. The agent selector is conditional via
|
||||
* `showAgentSelector`: home shows it as a filled pill on the left,
|
||||
* the chat surface hides it (the agent is locked once you're in the
|
||||
* conversation).
|
||||
*/
|
||||
function CalmContextControls({
|
||||
agents,
|
||||
onCreateAgent,
|
||||
onSelectAgent,
|
||||
@@ -201,110 +210,128 @@ function ContextControls({
|
||||
)?.is_authenticated
|
||||
})
|
||||
|
||||
const showApps = supports(Feature.MANAGED_MCP_SUPPORT)
|
||||
const showWorkspace = supports(Feature.WORKSPACE_FOLDER_SUPPORT)
|
||||
|
||||
return (
|
||||
<div className="flex items-center justify-between border-border/40 border-t px-4 py-2.5">
|
||||
<div className="flex items-center gap-1">
|
||||
{showAgentSelector ? (
|
||||
<div className="mx-3 flex items-center gap-1 border-border/60 border-t border-dashed py-2">
|
||||
{showAgentSelector ? (
|
||||
<>
|
||||
<AgentSelector
|
||||
agents={agents}
|
||||
selectedAgentId={selectedAgentId}
|
||||
onSelectAgent={onSelectAgent}
|
||||
onCreateAgent={onCreateAgent}
|
||||
status={status}
|
||||
triggerVariant="pill"
|
||||
/>
|
||||
) : null}
|
||||
{supports(Feature.WORKSPACE_FOLDER_SUPPORT) ? (
|
||||
<WorkspaceSelector>
|
||||
<Button
|
||||
variant="ghost"
|
||||
className={cn(
|
||||
'flex items-center gap-2 rounded-lg px-3 py-1.5 font-medium text-sm transition-all',
|
||||
'bg-transparent text-muted-foreground hover:bg-accent hover:text-accent-foreground',
|
||||
'data-[state=open]:bg-accent',
|
||||
)}
|
||||
>
|
||||
<Folder className="h-4 w-4" />
|
||||
<span>{selectedFolder?.name || 'Add workspace'}</span>
|
||||
<ChevronDown className="h-3 w-3" />
|
||||
</Button>
|
||||
</WorkspaceSelector>
|
||||
) : null}
|
||||
<TabPickerPopover
|
||||
variant="selector"
|
||||
selectedTabs={selectedTabs}
|
||||
onToggleTab={onToggleTab}
|
||||
>
|
||||
<Button
|
||||
className={cn(
|
||||
'flex items-center gap-2 rounded-lg px-3 py-1.5 font-medium text-sm transition-all',
|
||||
selectedTabs.length > 0
|
||||
? 'bg-[var(--accent-orange)]! text-white shadow-sm'
|
||||
: 'bg-transparent text-muted-foreground hover:bg-accent hover:text-accent-foreground',
|
||||
'data-[state=open]:bg-accent',
|
||||
)}
|
||||
<span
|
||||
aria-hidden="true"
|
||||
className="mx-1 inline-block h-3.5 w-px shrink-0 bg-border"
|
||||
/>
|
||||
</>
|
||||
) : null}
|
||||
{showWorkspace ? (
|
||||
<WorkspaceSelector>
|
||||
<button
|
||||
type="button"
|
||||
className="inline-flex h-6 items-center gap-1.5 rounded-full px-2.5 text-[11.5px] text-muted-foreground transition-colors hover:bg-accent hover:text-foreground data-[state=open]:bg-accent data-[state=open]:text-foreground"
|
||||
>
|
||||
<Layers className="h-4 w-4" />
|
||||
<span>Tabs</span>
|
||||
</Button>
|
||||
</TabPickerPopover>
|
||||
<Button
|
||||
<Folder className="size-3" />
|
||||
<span>Workspace</span>
|
||||
<span className="font-mono text-[10.5px] text-muted-foreground/70">
|
||||
{selectedFolder?.name ?? 'none'}
|
||||
</span>
|
||||
</button>
|
||||
</WorkspaceSelector>
|
||||
) : null}
|
||||
<TabPickerPopover
|
||||
variant="selector"
|
||||
selectedTabs={selectedTabs}
|
||||
onToggleTab={onToggleTab}
|
||||
>
|
||||
<button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
onClick={onAttachClick}
|
||||
disabled={attachDisabled || !attachmentsEnabled}
|
||||
title="Attach files"
|
||||
className={cn(
|
||||
'flex items-center gap-2 rounded-lg px-3 py-1.5 font-medium text-sm transition-all',
|
||||
'bg-transparent text-muted-foreground hover:bg-accent hover:text-accent-foreground',
|
||||
'inline-flex h-6 items-center gap-1.5 rounded-full px-2.5 text-[11.5px] transition-colors data-[state=open]:bg-accent data-[state=open]:text-foreground',
|
||||
selectedTabs.length > 0
|
||||
? 'bg-[var(--accent-orange)] text-white hover:bg-[var(--accent-orange)]/90'
|
||||
: 'text-muted-foreground hover:bg-accent hover:text-foreground',
|
||||
)}
|
||||
>
|
||||
<Paperclip className="h-4 w-4" />
|
||||
<span>Attach</span>
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{supports(Feature.MANAGED_MCP_SUPPORT) ? (
|
||||
<div className="ml-auto flex items-center gap-1.5">
|
||||
<AppSelector side="bottom">
|
||||
<Button
|
||||
variant="ghost"
|
||||
className={cn(
|
||||
'flex items-center gap-2 rounded-lg px-3 py-1.5 font-medium text-sm transition-all',
|
||||
'bg-transparent text-muted-foreground hover:bg-accent hover:text-accent-foreground',
|
||||
'data-[state=open]:bg-accent',
|
||||
)}
|
||||
>
|
||||
<div className="flex items-center -space-x-1.5">
|
||||
<Layers className="size-3" />
|
||||
<span>Tabs</span>
|
||||
<span
|
||||
className={cn(
|
||||
'font-mono text-[10.5px]',
|
||||
selectedTabs.length > 0
|
||||
? 'text-white/80'
|
||||
: 'text-muted-foreground/70',
|
||||
)}
|
||||
>
|
||||
{selectedTabs.length}
|
||||
</span>
|
||||
</button>
|
||||
</TabPickerPopover>
|
||||
<button
|
||||
type="button"
|
||||
onClick={onAttachClick}
|
||||
disabled={attachDisabled || !attachmentsEnabled}
|
||||
title="Attach files"
|
||||
className="inline-flex h-6 items-center gap-1.5 rounded-full px-2.5 text-[11.5px] text-muted-foreground transition-colors hover:bg-accent hover:text-foreground disabled:cursor-not-allowed disabled:opacity-50"
|
||||
>
|
||||
<Paperclip className="size-3" />
|
||||
<span>Attach</span>
|
||||
</button>
|
||||
{showApps ? (
|
||||
<AppSelector side="bottom">
|
||||
<button
|
||||
type="button"
|
||||
className="inline-flex h-6 items-center gap-1.5 rounded-full px-2.5 text-[11.5px] text-muted-foreground transition-colors hover:bg-accent hover:text-foreground data-[state=open]:bg-accent data-[state=open]:text-foreground"
|
||||
>
|
||||
{connectedManagedServers.length > 0 ? (
|
||||
<span className="flex items-center -space-x-1.5">
|
||||
{connectedManagedServers.slice(0, 4).map((server) => (
|
||||
<div
|
||||
<span
|
||||
key={server.id}
|
||||
className="rounded-full ring-2 ring-card"
|
||||
>
|
||||
<McpServerIcon
|
||||
serverName={server.managedServerName ?? ''}
|
||||
size={16}
|
||||
size={12}
|
||||
/>
|
||||
</div>
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
{connectedManagedServers.length > 4 ? (
|
||||
<span className="text-xs">
|
||||
+{connectedManagedServers.length - 4}
|
||||
</span>
|
||||
) : null}
|
||||
<span>Apps</span>
|
||||
<ChevronDown className="h-3 w-3" />
|
||||
</Button>
|
||||
</AppSelector>
|
||||
</div>
|
||||
</span>
|
||||
) : (
|
||||
<FileText className="size-3" />
|
||||
)}
|
||||
<span>Apps</span>
|
||||
<ChevronDown className="size-3" />
|
||||
</button>
|
||||
</AppSelector>
|
||||
) : null}
|
||||
<div className="ml-auto inline-flex shrink-0 items-center gap-1.5 text-[11px] text-muted-foreground/70">
|
||||
<kbd className="inline-flex h-4 min-w-4 items-center justify-center rounded border border-border bg-accent/30 px-1 font-mono text-[10px] text-muted-foreground">
|
||||
↵
|
||||
</kbd>
|
||||
<span>to run</span>
|
||||
<span className="text-muted-foreground/40">·</span>
|
||||
<kbd className="inline-flex h-4 min-w-4 items-center justify-center rounded border border-border bg-accent/30 px-1 font-mono text-[10px] text-muted-foreground">
|
||||
⇧
|
||||
</kbd>
|
||||
<kbd className="inline-flex h-4 min-w-4 items-center justify-center rounded border border-border bg-accent/30 px-1 font-mono text-[10px] text-muted-foreground">
|
||||
↵
|
||||
</kbd>
|
||||
<span>new line</span>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function HomeShell({ children }: { children: ReactNode }) {
|
||||
return (
|
||||
<div className="overflow-hidden rounded-[1.55rem] border border-border/60 bg-card/95 shadow-sm">
|
||||
<div className="overflow-hidden rounded-[1.55rem] border border-border/60 bg-card/95 shadow-sm transition-[border-color,box-shadow] duration-150 focus-within:border-[var(--accent-orange)]/40 focus-within:shadow-[0_0_0_4px_color-mix(in_oklch,var(--accent-orange)_15%,transparent),0_1px_2px_rgba(15,23,42,0.04)]">
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
@@ -312,7 +339,7 @@ function HomeShell({ children }: { children: ReactNode }) {
|
||||
|
||||
function ConversationShell({ children }: { children: ReactNode }) {
|
||||
return (
|
||||
<div className="overflow-hidden rounded-[1.35rem] border border-border/50 bg-background/95 shadow-[0_10px_30px_rgba(15,23,42,0.06)] backdrop-blur-md">
|
||||
<div className="overflow-hidden rounded-[1.35rem] border border-border/50 bg-background/95 shadow-[0_10px_30px_rgba(15,23,42,0.06)] backdrop-blur-md transition-[border-color,box-shadow] duration-150 focus-within:border-[var(--accent-orange)]/40 focus-within:shadow-[0_0_0_4px_color-mix(in_oklch,var(--accent-orange)_15%,transparent),0_10px_30px_rgba(15,23,42,0.06)]">
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
@@ -542,7 +569,7 @@ export const ConversationInput: FC<ConversationInputProps> = ({
|
||||
}
|
||||
disabled={disabled || voice.isTranscribing}
|
||||
className={cn(
|
||||
'resize-none border-none bg-transparent px-0 text-[15px] shadow-none focus-visible:ring-0',
|
||||
'resize-none border-none bg-transparent px-0 text-[15px] shadow-none focus-visible:ring-0 dark:bg-transparent',
|
||||
'[field-sizing:fixed]',
|
||||
variant === 'home'
|
||||
? 'min-h-[40px] py-2 leading-6'
|
||||
@@ -583,7 +610,7 @@ export const ConversationInput: FC<ConversationInputProps> = ({
|
||||
{voice.error}
|
||||
</div>
|
||||
) : null}
|
||||
<ContextControls
|
||||
<CalmContextControls
|
||||
agents={agents}
|
||||
onCreateAgent={onCreateAgent}
|
||||
onSelectAgent={onSelectAgent}
|
||||
|
||||
@@ -22,10 +22,26 @@ import type {
|
||||
AgentConversationTurn,
|
||||
ToolEntry,
|
||||
} from '@/lib/agent-conversations/types'
|
||||
import { FileCardStrip } from './agent-conversation.file-card-strip'
|
||||
|
||||
interface ConversationMessageProps {
|
||||
turn: AgentConversationTurn
|
||||
streaming: boolean
|
||||
/**
|
||||
* Forwarded to the inline file-card strip's "View" / "+N"
|
||||
* button. Wired up by AgentCommandConversation so the strip can
|
||||
* deep-link straight into the Outputs rail at the matching turn
|
||||
* group. `null` here disables the strip's deep-link affordance
|
||||
* — the cards still open the preview Sheet directly.
|
||||
*/
|
||||
onOpenOutputsRail?: ((turnId?: string | null) => void) | null
|
||||
/**
|
||||
* Render only the trailing FileCardStrip for this turn — used
|
||||
* when the turn's user / assistant text is already rendered
|
||||
* elsewhere (e.g. by `ClawChatMessage` from persisted history)
|
||||
* but the produced-files affordance would otherwise be lost.
|
||||
*/
|
||||
stripOnly?: boolean
|
||||
}
|
||||
|
||||
interface RenderEntry {
|
||||
@@ -88,9 +104,22 @@ function ToolStatusIcon({ status }: { status: ToolEntry['status'] }) {
|
||||
export const ConversationMessage: FC<ConversationMessageProps> = ({
|
||||
turn,
|
||||
streaming,
|
||||
onOpenOutputsRail,
|
||||
stripOnly,
|
||||
}) => {
|
||||
const entries = useMemo(() => buildRenderEntries(turn), [turn])
|
||||
|
||||
if (stripOnly) {
|
||||
if (!turn.producedFiles || turn.producedFiles.length === 0) return null
|
||||
return (
|
||||
<FileCardStrip
|
||||
turnId={turn.turnId ?? null}
|
||||
files={turn.producedFiles}
|
||||
onOpenRail={onOpenOutputsRail ?? (() => {})}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="space-y-3">
|
||||
<Message from="user">
|
||||
@@ -185,6 +214,14 @@ export const ConversationMessage: FC<ConversationMessageProps> = ({
|
||||
</Message>
|
||||
)}
|
||||
|
||||
{turn.producedFiles && turn.producedFiles.length > 0 ? (
|
||||
<FileCardStrip
|
||||
turnId={turn.turnId ?? null}
|
||||
files={turn.producedFiles}
|
||||
onOpenRail={onOpenOutputsRail ?? (() => {})}
|
||||
/>
|
||||
) : null}
|
||||
|
||||
{!turn.done && turn.parts.length === 0 && streaming && (
|
||||
<div className="flex gap-2">
|
||||
<div className="flex size-7 shrink-0 items-center justify-center rounded-full bg-[var(--accent-orange)] text-white">
|
||||
|
||||
@@ -0,0 +1,124 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* @deprecated Replaced by `FileCardStrip` in
|
||||
* `agent-conversation.file-card-strip.tsx`. Kept temporarily so
|
||||
* any in-flight callers don't fail to import; remove in a
|
||||
* follow-up once nothing external references it.
|
||||
*
|
||||
* Compact "Files produced" card rendered under an assistant turn.
|
||||
*/
|
||||
|
||||
import { FileText, Image as ImageIcon, Paperclip } from 'lucide-react'
|
||||
import { type FC, useMemo, useState } from 'react'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { basenameOf, formatFileSize, inferFileKind } from '@/lib/agent-files'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { FilePreviewSheet } from './agent-conversation.file-preview-sheet'
|
||||
|
||||
export interface ProducedFileLike {
|
||||
id: string
|
||||
path: string
|
||||
size: number
|
||||
}
|
||||
|
||||
interface ArtifactCardProps {
|
||||
files: ReadonlyArray<ProducedFileLike>
|
||||
className?: string
|
||||
}
|
||||
|
||||
const MAX_INLINE_ROWS = 4
|
||||
|
||||
export const ArtifactCard: FC<ArtifactCardProps> = ({ files, className }) => {
|
||||
const [openFileId, setOpenFileId] = useState<string | null>(null)
|
||||
const [expanded, setExpanded] = useState(false)
|
||||
|
||||
const sortedFiles = useMemo(
|
||||
() => [...files].sort((a, b) => a.path.localeCompare(b.path)),
|
||||
[files],
|
||||
)
|
||||
|
||||
if (sortedFiles.length === 0) return null
|
||||
|
||||
const visible = expanded ? sortedFiles : sortedFiles.slice(0, MAX_INLINE_ROWS)
|
||||
const hiddenCount = sortedFiles.length - visible.length
|
||||
const openFile = sortedFiles.find((file) => file.id === openFileId) ?? null
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
'rounded-xl border border-border/60 bg-card/50 px-3 py-2.5',
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<div className="mb-2 flex items-center gap-2 text-muted-foreground text-xs">
|
||||
<Paperclip className="size-3.5" />
|
||||
<span className="font-medium text-foreground">
|
||||
{sortedFiles.length === 1
|
||||
? '1 file produced'
|
||||
: `${sortedFiles.length} files produced`}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<ul className="flex flex-col gap-1">
|
||||
{visible.map((file) => (
|
||||
<li key={file.id}>
|
||||
<ArtifactRow file={file} onOpen={() => setOpenFileId(file.id)} />
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
|
||||
{hiddenCount > 0 ? (
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
className="mt-1.5 h-7 px-2 text-xs"
|
||||
onClick={() => setExpanded(true)}
|
||||
>
|
||||
Show {hiddenCount} more
|
||||
</Button>
|
||||
) : null}
|
||||
|
||||
<FilePreviewSheet
|
||||
fileId={openFile?.id ?? null}
|
||||
filePath={openFile?.path ?? null}
|
||||
open={Boolean(openFileId)}
|
||||
onOpenChange={(next) => {
|
||||
if (!next) setOpenFileId(null)
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function ArtifactRow({
|
||||
file,
|
||||
onOpen,
|
||||
}: {
|
||||
file: ProducedFileLike
|
||||
onOpen: () => void
|
||||
}) {
|
||||
const name = basenameOf(file.path)
|
||||
const kind = inferFileKind(file.path)
|
||||
const Icon = kind === 'image' ? ImageIcon : FileText
|
||||
|
||||
return (
|
||||
<button
|
||||
type="button"
|
||||
onClick={onOpen}
|
||||
className={cn(
|
||||
'flex w-full items-center gap-2 rounded-md px-2 py-1.5 text-left text-sm transition-colors',
|
||||
'hover:bg-accent/60 focus:bg-accent/60 focus:outline-hidden',
|
||||
)}
|
||||
>
|
||||
<Icon className="size-3.5 shrink-0 text-muted-foreground" />
|
||||
<span className="min-w-0 flex-1 truncate font-medium">{name}</span>
|
||||
<span className="shrink-0 text-muted-foreground text-xs tabular-nums">
|
||||
{formatFileSize(file.size)}
|
||||
</span>
|
||||
</button>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,163 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* "Files produced" strip rendered at the bottom of any assistant
|
||||
* turn that produced files (openclaw only). Replaces Phase 5.3's
|
||||
* row-list ArtifactCard with small horizontal cards for a lighter
|
||||
* visual treatment.
|
||||
*
|
||||
* Click semantics:
|
||||
* - Card → opens FilePreviewSheet directly (preview + download).
|
||||
* - View → emits onOpenRail(turnId); the parent opens the rail
|
||||
* and scrolls to the matching turn group.
|
||||
* - +N → same as View (the user is asking to see what was
|
||||
* overflowed).
|
||||
*/
|
||||
|
||||
import { ChevronRight, FileText, Image as ImageIcon } from 'lucide-react'
|
||||
import { type FC, useMemo, useState } from 'react'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { basenameOf, formatFileSize, inferFileKind } from '@/lib/agent-files'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { FilePreviewSheet } from './agent-conversation.file-preview-sheet'
|
||||
|
||||
export interface CardStripFile {
|
||||
id: string
|
||||
path: string
|
||||
size: number
|
||||
}
|
||||
|
||||
interface FileCardStripProps {
|
||||
/**
|
||||
* The turn id that produced these files. Forwarded to
|
||||
* `onOpenRail` so the rail can scroll/expand the matching group.
|
||||
* Optional because the live `produced_files` event lands before
|
||||
* the harness has stamped a server-issued turn id on the
|
||||
* optimistic turn — in that brief window, View falls back to
|
||||
* just opening the rail at the top.
|
||||
*/
|
||||
turnId?: string | null
|
||||
files: ReadonlyArray<CardStripFile>
|
||||
/** Caller wires this to `setOutputsRailOpen(true)` + deep-link. */
|
||||
onOpenRail: (turnId?: string | null) => void
|
||||
className?: string
|
||||
}
|
||||
|
||||
const MAX_VISIBLE = 4
|
||||
|
||||
export const FileCardStrip: FC<FileCardStripProps> = ({
|
||||
turnId,
|
||||
files,
|
||||
onOpenRail,
|
||||
className,
|
||||
}) => {
|
||||
const [openFileId, setOpenFileId] = useState<string | null>(null)
|
||||
|
||||
const sortedFiles = useMemo(
|
||||
() => [...files].sort((a, b) => a.path.localeCompare(b.path)),
|
||||
[files],
|
||||
)
|
||||
|
||||
if (sortedFiles.length === 0) return null
|
||||
|
||||
const visible = sortedFiles.slice(0, MAX_VISIBLE)
|
||||
const hiddenCount = sortedFiles.length - visible.length
|
||||
const openFile = sortedFiles.find((file) => file.id === openFileId) ?? null
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
'rounded-xl border border-border/60 bg-card/50 px-3 py-2.5',
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<div className="mb-2 flex items-center gap-2">
|
||||
<span className="text-muted-foreground text-xs">
|
||||
{sortedFiles.length === 1
|
||||
? 'File produced'
|
||||
: `Files produced (${sortedFiles.length})`}
|
||||
</span>
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
className="ml-auto h-7 gap-1 px-2 text-xs"
|
||||
onClick={() => onOpenRail(turnId ?? null)}
|
||||
>
|
||||
View
|
||||
<ChevronRight className="size-3" />
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{visible.map((file) => (
|
||||
<FileCard
|
||||
key={file.id}
|
||||
file={file}
|
||||
onOpen={() => setOpenFileId(file.id)}
|
||||
/>
|
||||
))}
|
||||
{hiddenCount > 0 ? (
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => onOpenRail(turnId ?? null)}
|
||||
className={cn(
|
||||
'flex h-[56px] min-w-[56px] shrink-0 items-center justify-center rounded-lg border border-border/60 px-3 text-muted-foreground text-xs',
|
||||
'transition-colors hover:border-border hover:bg-accent/40 hover:text-foreground',
|
||||
'focus:outline-hidden focus-visible:ring-2 focus-visible:ring-[var(--accent-orange)]',
|
||||
)}
|
||||
title={`See ${hiddenCount} more in the Outputs rail`}
|
||||
>
|
||||
+{hiddenCount}
|
||||
</button>
|
||||
) : null}
|
||||
</div>
|
||||
|
||||
<FilePreviewSheet
|
||||
fileId={openFile?.id ?? null}
|
||||
filePath={openFile?.path ?? null}
|
||||
open={Boolean(openFileId)}
|
||||
onOpenChange={(next) => {
|
||||
if (!next) setOpenFileId(null)
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function FileCard({
|
||||
file,
|
||||
onOpen,
|
||||
}: {
|
||||
file: CardStripFile
|
||||
onOpen: () => void
|
||||
}) {
|
||||
const name = basenameOf(file.path)
|
||||
const kind = inferFileKind(file.path)
|
||||
const Icon = kind === 'image' ? ImageIcon : FileText
|
||||
|
||||
return (
|
||||
<button
|
||||
type="button"
|
||||
onClick={onOpen}
|
||||
title={file.path}
|
||||
className={cn(
|
||||
'flex h-[56px] w-[140px] shrink-0 flex-col justify-between rounded-lg border border-border/60 bg-background px-2.5 py-1.5 text-left',
|
||||
'transition-colors hover:border-border hover:bg-accent/40',
|
||||
'focus:outline-hidden focus-visible:ring-2 focus-visible:ring-[var(--accent-orange)]',
|
||||
)}
|
||||
>
|
||||
<div className="flex min-w-0 items-center gap-1.5">
|
||||
<Icon className="size-3.5 shrink-0 text-muted-foreground" />
|
||||
<span className="min-w-0 flex-1 truncate font-medium text-xs">
|
||||
{name}
|
||||
</span>
|
||||
</div>
|
||||
<span className="text-[11px] text-muted-foreground tabular-nums">
|
||||
{formatFileSize(file.size)}
|
||||
</span>
|
||||
</button>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,283 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Shared preview drawer used by the inline artifact card AND the
|
||||
* Outputs rail. Branches on the FilePreview discriminated union and
|
||||
* renders the appropriate body. Always opens via a controlled
|
||||
* `open`/`onOpenChange` pair so the parent owns the selected file.
|
||||
*/
|
||||
|
||||
import { Download, FileWarning, Loader2 } from 'lucide-react'
|
||||
import { type FC, useEffect, useMemo, useRef } from 'react'
|
||||
import { toast } from 'sonner'
|
||||
import { MessageResponse } from '@/components/ai-elements/message'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { ScrollArea } from '@/components/ui/scroll-area'
|
||||
import {
|
||||
Sheet,
|
||||
SheetContent,
|
||||
SheetDescription,
|
||||
SheetHeader,
|
||||
SheetTitle,
|
||||
} from '@/components/ui/sheet'
|
||||
import { Skeleton } from '@/components/ui/skeleton'
|
||||
import {
|
||||
basenameOf,
|
||||
buildFileDownloadUrl,
|
||||
extensionOf,
|
||||
type FilePreview,
|
||||
formatFileSize,
|
||||
useFilePreview,
|
||||
} from '@/lib/agent-files'
|
||||
import { useAgentServerUrl } from '@/lib/browseros/useBrowserOSProviders'
|
||||
import { cn } from '@/lib/utils'
|
||||
|
||||
interface FilePreviewSheetProps {
|
||||
fileId: string | null
|
||||
filePath: string | null
|
||||
open: boolean
|
||||
onOpenChange: (open: boolean) => void
|
||||
}
|
||||
|
||||
const MARKDOWN_EXTENSIONS = new Set(['md', 'markdown', 'mdx'])
|
||||
|
||||
export const FilePreviewSheet: FC<FilePreviewSheetProps> = ({
|
||||
fileId,
|
||||
filePath,
|
||||
open,
|
||||
onOpenChange,
|
||||
}) => {
|
||||
const { baseUrl } = useAgentServerUrl()
|
||||
const { preview, loading, error } = useFilePreview(fileId, open)
|
||||
|
||||
const fileName = filePath ? basenameOf(filePath) : 'File preview'
|
||||
const downloadUrl = useMemo(() => {
|
||||
if (!baseUrl || !fileId) return null
|
||||
return buildFileDownloadUrl(baseUrl, fileId)
|
||||
}, [baseUrl, fileId])
|
||||
|
||||
// Surface preview-load failures in a toast in addition to the
|
||||
// inline error block — the inline UI lives at the bottom of the
|
||||
// sheet and is easy to miss when scrolled into the body.
|
||||
const lastToastedFileIdRef = useRef<string | null>(null)
|
||||
useEffect(() => {
|
||||
if (!open) {
|
||||
lastToastedFileIdRef.current = null
|
||||
return
|
||||
}
|
||||
if (!error || !fileId) return
|
||||
if (lastToastedFileIdRef.current === fileId) return
|
||||
lastToastedFileIdRef.current = fileId
|
||||
toast.error('Could not load preview', { description: error.message })
|
||||
}, [open, error, fileId])
|
||||
|
||||
const handleDownload = () => {
|
||||
if (!downloadUrl) {
|
||||
toast.error("Couldn't reach the agent server", {
|
||||
description: 'Reconnect to BrowserOS and try again.',
|
||||
})
|
||||
return
|
||||
}
|
||||
// Manually trigger the download so any future failure (e.g. the
|
||||
// server returns 404 because the file was removed) can be
|
||||
// surfaced via toast — the bare <a download> path swallows
|
||||
// these errors silently.
|
||||
const link = document.createElement('a')
|
||||
link.href = downloadUrl
|
||||
link.download = fileName
|
||||
link.rel = 'noopener'
|
||||
document.body.appendChild(link)
|
||||
link.click()
|
||||
link.remove()
|
||||
}
|
||||
|
||||
return (
|
||||
<Sheet open={open} onOpenChange={onOpenChange}>
|
||||
<SheetContent
|
||||
side="right"
|
||||
className="flex w-full flex-col gap-0 p-0 sm:max-w-xl"
|
||||
>
|
||||
<SheetHeader className="border-border/60 border-b px-5 py-4">
|
||||
<SheetTitle className="truncate pr-8">{fileName}</SheetTitle>
|
||||
<SheetDescription className="truncate">
|
||||
{filePath ?? ''}
|
||||
</SheetDescription>
|
||||
</SheetHeader>
|
||||
|
||||
<ScrollArea className="min-h-0 flex-1">
|
||||
<div className="px-5 py-4">
|
||||
{loading ? (
|
||||
<PreviewSkeleton />
|
||||
) : error ? (
|
||||
<PreviewError message={error.message} />
|
||||
) : preview ? (
|
||||
<PreviewBody
|
||||
preview={preview}
|
||||
filePath={filePath}
|
||||
downloadUrl={downloadUrl}
|
||||
/>
|
||||
) : null}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
|
||||
{fileId ? (
|
||||
<div className="border-border/60 border-t bg-background/90 px-5 py-3 backdrop-blur">
|
||||
<Button
|
||||
type="button"
|
||||
size="sm"
|
||||
className="w-full gap-2"
|
||||
onClick={handleDownload}
|
||||
>
|
||||
<Download className="size-3.5" />
|
||||
Download
|
||||
</Button>
|
||||
</div>
|
||||
) : null}
|
||||
</SheetContent>
|
||||
</Sheet>
|
||||
)
|
||||
}
|
||||
|
||||
function PreviewSkeleton() {
|
||||
return (
|
||||
<div className="flex flex-col gap-2">
|
||||
<div className="flex items-center gap-2 text-muted-foreground text-xs">
|
||||
<Loader2 className="size-3.5 animate-spin" />
|
||||
Loading preview...
|
||||
</div>
|
||||
<Skeleton className="h-4 w-3/4" />
|
||||
<Skeleton className="h-4 w-full" />
|
||||
<Skeleton className="h-4 w-5/6" />
|
||||
<Skeleton className="h-4 w-2/3" />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function PreviewError({ message }: { message: string }) {
|
||||
return (
|
||||
<div className="flex flex-col items-start gap-2 rounded-lg border border-destructive/30 bg-destructive/5 px-3 py-2 text-destructive text-sm">
|
||||
<div className="flex items-center gap-2 font-medium">
|
||||
<FileWarning className="size-4" />
|
||||
Could not load preview
|
||||
</div>
|
||||
<p className="text-destructive/80 text-xs">{message}</p>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function PreviewBody({
|
||||
preview,
|
||||
filePath,
|
||||
downloadUrl,
|
||||
}: {
|
||||
preview: FilePreview
|
||||
filePath: string | null
|
||||
downloadUrl: string | null
|
||||
}) {
|
||||
if (preview.kind === 'missing') {
|
||||
return (
|
||||
<div className="rounded-lg border border-border/60 bg-muted/40 px-4 py-6 text-center text-muted-foreground text-sm">
|
||||
This file is no longer in the workspace. The agent may have moved or
|
||||
deleted it after the turn finished.
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
if (preview.kind === 'image') {
|
||||
return (
|
||||
<div className="flex flex-col gap-3">
|
||||
<PreviewMeta preview={preview} />
|
||||
<div className="overflow-hidden rounded-lg border border-border/60 bg-muted/30">
|
||||
<img
|
||||
src={preview.dataUrl}
|
||||
alt={filePath ?? 'preview'}
|
||||
className="block max-h-[60vh] w-full object-contain"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
if (preview.kind === 'pdf') {
|
||||
return (
|
||||
<div className="flex flex-col gap-3">
|
||||
<PreviewMeta preview={preview} />
|
||||
<div className="rounded-lg border border-border/60 bg-muted/40 px-4 py-6 text-center text-muted-foreground text-sm">
|
||||
PDF previews aren't supported inline yet. Use Download to open this
|
||||
file in your default PDF viewer.
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
if (preview.kind === 'binary') {
|
||||
return (
|
||||
<div className="flex flex-col gap-3">
|
||||
<PreviewMeta preview={preview} />
|
||||
<div className="rounded-lg border border-border/60 bg-muted/40 px-4 py-6 text-center text-muted-foreground text-sm">
|
||||
No inline preview for this file type.
|
||||
{downloadUrl ? ' Use Download to save it locally.' : null}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return <TextPreviewBody preview={preview} filePath={filePath} />
|
||||
}
|
||||
|
||||
function TextPreviewBody({
|
||||
preview,
|
||||
filePath,
|
||||
}: {
|
||||
preview: Extract<FilePreview, { kind: 'text' }>
|
||||
filePath: string | null
|
||||
}) {
|
||||
const ext = filePath ? extensionOf(filePath).toLowerCase() : ''
|
||||
const renderAsMarkdown = MARKDOWN_EXTENSIONS.has(ext)
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-3">
|
||||
<PreviewMeta preview={preview} />
|
||||
{renderAsMarkdown ? (
|
||||
<div
|
||||
className={cn(
|
||||
'prose prose-sm dark:prose-invert max-w-none break-words rounded-lg border border-border/60 bg-muted/30 px-4 py-3',
|
||||
"[&_[data-streamdown='code-block']]:!w-full [&_[data-streamdown='code-block']]:overflow-x-auto",
|
||||
)}
|
||||
>
|
||||
<MessageResponse mode="static" parseIncompleteMarkdown={false}>
|
||||
{preview.snippet}
|
||||
</MessageResponse>
|
||||
</div>
|
||||
) : (
|
||||
<pre className="overflow-x-auto rounded-lg border border-border/60 bg-muted/30 px-3 py-2 text-xs leading-relaxed">
|
||||
<code className="font-mono text-foreground">{preview.snippet}</code>
|
||||
</pre>
|
||||
)}
|
||||
{preview.truncated ? (
|
||||
<div className="text-muted-foreground text-xs">
|
||||
Showing the first part of this file. Download to see the full
|
||||
contents.
|
||||
</div>
|
||||
) : null}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function PreviewMeta({
|
||||
preview,
|
||||
}: {
|
||||
preview: Exclude<FilePreview, { kind: 'missing' }>
|
||||
}) {
|
||||
return (
|
||||
<div className="flex flex-wrap items-center gap-x-3 gap-y-1 text-muted-foreground text-xs">
|
||||
<span className="font-medium text-foreground">
|
||||
{formatFileSize(preview.size)}
|
||||
</span>
|
||||
<span>·</span>
|
||||
<span className="font-mono">{preview.mimeType || 'unknown'}</span>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,338 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Per-agent right-side "Outputs" panel. Lists every file the harness
|
||||
* has attributed to this agent, grouped by the turn that produced
|
||||
* them. Click a row to open the shared preview Sheet.
|
||||
*
|
||||
* Lifecycle:
|
||||
* - Open/closed state is controlled by the parent and persisted via
|
||||
* `useOutputsRailOpen(agentId)` so each agent remembers its
|
||||
* preference independently.
|
||||
* - Data refreshes whenever a turn finishes (the conversation hook
|
||||
* fires `useInvalidateAgentOutputs` from its finally block).
|
||||
* - Manual "Refresh" button is wired to `useRefreshAgentOutputs`
|
||||
* for users who navigate in mid-turn.
|
||||
*/
|
||||
|
||||
import {
|
||||
ChevronDown,
|
||||
ChevronRight,
|
||||
FileText,
|
||||
Image as ImageIcon,
|
||||
Inbox,
|
||||
Loader2,
|
||||
PanelRightClose,
|
||||
RefreshCw,
|
||||
} from 'lucide-react'
|
||||
import { type FC, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { toast } from 'sonner'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import {
|
||||
Collapsible,
|
||||
CollapsibleContent,
|
||||
CollapsibleTrigger,
|
||||
} from '@/components/ui/collapsible'
|
||||
import { ScrollArea } from '@/components/ui/scroll-area'
|
||||
import { Skeleton } from '@/components/ui/skeleton'
|
||||
import {
|
||||
basenameOf,
|
||||
formatFileSize,
|
||||
inferFileKind,
|
||||
type ProducedFilesRailGroup,
|
||||
useAgentOutputs,
|
||||
useRefreshAgentOutputs,
|
||||
} from '@/lib/agent-files'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { FilePreviewSheet } from './agent-conversation.file-preview-sheet'
|
||||
|
||||
interface OutputsRailProps {
|
||||
agentId: string
|
||||
onClose: () => void
|
||||
/**
|
||||
* When set, the rail scrolls the matching `RailTurnGroup` into
|
||||
* view and force-opens its `Collapsible`. Used by the inline
|
||||
* file-card strip's "View" / "+N" deep-link path. Cleared by
|
||||
* the parent (via `onFocusTurnConsumed`) once the rail has
|
||||
* acknowledged the deep-link so subsequent renders don't keep
|
||||
* re-scrolling the same group.
|
||||
*/
|
||||
focusTurnId?: string | null
|
||||
onFocusTurnConsumed?: () => void
|
||||
}
|
||||
|
||||
const RAIL_LOCAL_STORAGE_PREFIX = 'browseros:outputs-rail:'
|
||||
|
||||
/**
|
||||
* Controlled open/close state with per-agent localStorage memory.
|
||||
* Returns a tuple compatible with React's useState shape so the
|
||||
* parent can pass it straight into the rail without an extra effect.
|
||||
*/
|
||||
export function useOutputsRailOpen(
|
||||
agentId: string,
|
||||
): [boolean, (next: boolean) => void] {
|
||||
const [open, setOpen] = useState(false)
|
||||
|
||||
useEffect(() => {
|
||||
if (typeof window === 'undefined' || !agentId) return
|
||||
try {
|
||||
const stored = window.localStorage.getItem(
|
||||
`${RAIL_LOCAL_STORAGE_PREFIX}${agentId}`,
|
||||
)
|
||||
setOpen(stored === '1')
|
||||
} catch {
|
||||
// localStorage may be unavailable (private mode, locked-down
|
||||
// contexts) — fall back to closed.
|
||||
}
|
||||
}, [agentId])
|
||||
|
||||
const update = (next: boolean) => {
|
||||
setOpen(next)
|
||||
if (typeof window === 'undefined' || !agentId) return
|
||||
try {
|
||||
window.localStorage.setItem(
|
||||
`${RAIL_LOCAL_STORAGE_PREFIX}${agentId}`,
|
||||
next ? '1' : '0',
|
||||
)
|
||||
} catch {
|
||||
// Best-effort persistence.
|
||||
}
|
||||
}
|
||||
|
||||
return [open, update]
|
||||
}
|
||||
|
||||
export const OutputsRail: FC<OutputsRailProps> = ({
|
||||
agentId,
|
||||
onClose,
|
||||
focusTurnId,
|
||||
onFocusTurnConsumed,
|
||||
}) => {
|
||||
const { groups, loading, error } = useAgentOutputs(agentId)
|
||||
const refresh = useRefreshAgentOutputs(agentId)
|
||||
|
||||
const [openFile, setOpenFile] = useState<{
|
||||
id: string
|
||||
path: string
|
||||
} | null>(null)
|
||||
|
||||
const totalFiles = useMemo(
|
||||
() => groups.reduce((sum, group) => sum + group.files.length, 0),
|
||||
[groups],
|
||||
)
|
||||
|
||||
return (
|
||||
<aside className="flex h-full min-h-0 w-full flex-col border-border/50 border-l bg-background">
|
||||
<header className="flex shrink-0 items-center gap-2 border-border/50 border-b px-3 py-3">
|
||||
<span className="font-semibold text-[13px] uppercase tracking-wide">
|
||||
Outputs
|
||||
</span>
|
||||
{totalFiles > 0 ? (
|
||||
<span className="text-muted-foreground text-xs tabular-nums">
|
||||
{totalFiles}
|
||||
</span>
|
||||
) : null}
|
||||
<div className="ml-auto flex items-center gap-1">
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className="size-7"
|
||||
onClick={() =>
|
||||
refresh.mutate(undefined, {
|
||||
onError: (err) =>
|
||||
toast.error('Refresh failed', {
|
||||
description:
|
||||
err instanceof Error ? err.message : String(err),
|
||||
}),
|
||||
})
|
||||
}
|
||||
disabled={refresh.isPending}
|
||||
title="Refresh"
|
||||
>
|
||||
{refresh.isPending ? (
|
||||
<Loader2 className="size-3.5 animate-spin" />
|
||||
) : (
|
||||
<RefreshCw className="size-3.5" />
|
||||
)}
|
||||
</Button>
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className="size-7"
|
||||
onClick={onClose}
|
||||
title="Hide outputs"
|
||||
>
|
||||
<PanelRightClose className="size-3.5" />
|
||||
</Button>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<ScrollArea className="min-h-0 flex-1">
|
||||
<div className="px-2 py-2">
|
||||
{loading && groups.length === 0 ? (
|
||||
<RailSkeleton />
|
||||
) : error ? (
|
||||
<RailError message={error.message} />
|
||||
) : groups.length === 0 ? (
|
||||
<RailEmpty />
|
||||
) : (
|
||||
<ul className="flex flex-col gap-2">
|
||||
{groups.map((group) => (
|
||||
<li key={group.turnId}>
|
||||
<RailTurnGroup
|
||||
group={group}
|
||||
focused={
|
||||
Boolean(focusTurnId) && focusTurnId === group.turnId
|
||||
}
|
||||
onFocusConsumed={onFocusTurnConsumed}
|
||||
onOpenFile={(file) =>
|
||||
setOpenFile({ id: file.id, path: file.path })
|
||||
}
|
||||
/>
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
)}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
|
||||
<FilePreviewSheet
|
||||
fileId={openFile?.id ?? null}
|
||||
filePath={openFile?.path ?? null}
|
||||
open={Boolean(openFile)}
|
||||
onOpenChange={(next) => {
|
||||
if (!next) setOpenFile(null)
|
||||
}}
|
||||
/>
|
||||
</aside>
|
||||
)
|
||||
}
|
||||
|
||||
function RailTurnGroup({
|
||||
group,
|
||||
focused,
|
||||
onFocusConsumed,
|
||||
onOpenFile,
|
||||
}: {
|
||||
group: ProducedFilesRailGroup
|
||||
focused: boolean
|
||||
onFocusConsumed?: () => void
|
||||
onOpenFile: (file: { id: string; path: string }) => void
|
||||
}) {
|
||||
const [open, setOpen] = useState(true)
|
||||
const headerLabel = group.turnPrompt.trim() || 'Turn'
|
||||
const containerRef = useRef<HTMLDivElement>(null)
|
||||
|
||||
// Deep-link consumption: when the parent passes `focused=true`,
|
||||
// expand the collapsible (in case the user had collapsed it
|
||||
// earlier) and scroll into view. Fire `onFocusConsumed` so the
|
||||
// parent can drop the URL param and we don't re-scroll on every
|
||||
// render after that.
|
||||
useEffect(() => {
|
||||
if (!focused) return
|
||||
setOpen(true)
|
||||
containerRef.current?.scrollIntoView({
|
||||
behavior: 'smooth',
|
||||
block: 'nearest',
|
||||
})
|
||||
onFocusConsumed?.()
|
||||
}, [focused, onFocusConsumed])
|
||||
|
||||
return (
|
||||
<div ref={containerRef}>
|
||||
<Collapsible open={open} onOpenChange={setOpen}>
|
||||
<CollapsibleTrigger
|
||||
className={cn(
|
||||
'flex w-full items-center gap-1.5 rounded-md px-1.5 py-1 text-left text-muted-foreground text-xs',
|
||||
'transition-colors hover:bg-accent/40 hover:text-foreground',
|
||||
)}
|
||||
>
|
||||
{open ? (
|
||||
<ChevronDown className="size-3 shrink-0" />
|
||||
) : (
|
||||
<ChevronRight className="size-3 shrink-0" />
|
||||
)}
|
||||
<span className="min-w-0 flex-1 truncate font-medium">
|
||||
{headerLabel}
|
||||
</span>
|
||||
<span className="shrink-0 tabular-nums">{group.files.length}</span>
|
||||
</CollapsibleTrigger>
|
||||
<CollapsibleContent>
|
||||
<ul className="mt-1 ml-1 flex flex-col gap-0.5 border-border/40 border-l pl-2">
|
||||
{group.files.map((file) => (
|
||||
<li key={file.id}>
|
||||
<RailFileRow file={file} onOpen={() => onOpenFile(file)} />
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
</CollapsibleContent>
|
||||
</Collapsible>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function RailFileRow({
|
||||
file,
|
||||
onOpen,
|
||||
}: {
|
||||
file: ProducedFilesRailGroup['files'][number]
|
||||
onOpen: () => void
|
||||
}) {
|
||||
const name = basenameOf(file.path)
|
||||
const kind = inferFileKind(file.path)
|
||||
const Icon = kind === 'image' ? ImageIcon : FileText
|
||||
|
||||
return (
|
||||
<button
|
||||
type="button"
|
||||
onClick={onOpen}
|
||||
className={cn(
|
||||
'flex w-full items-center gap-2 rounded-md px-1.5 py-1 text-left text-xs transition-colors',
|
||||
'hover:bg-accent/60 focus:bg-accent/60 focus:outline-hidden',
|
||||
)}
|
||||
title={file.path}
|
||||
>
|
||||
<Icon className="size-3 shrink-0 text-muted-foreground" />
|
||||
<span className="min-w-0 flex-1 truncate">{name}</span>
|
||||
<span className="shrink-0 text-muted-foreground tabular-nums">
|
||||
{formatFileSize(file.size)}
|
||||
</span>
|
||||
</button>
|
||||
)
|
||||
}
|
||||
|
||||
function RailSkeleton() {
|
||||
return (
|
||||
<div className="flex flex-col gap-2 px-1.5 py-1">
|
||||
<Skeleton className="h-4 w-1/2" />
|
||||
<Skeleton className="h-4 w-3/4" />
|
||||
<Skeleton className="h-4 w-2/3" />
|
||||
<Skeleton className="h-4 w-5/6" />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function RailEmpty() {
|
||||
return (
|
||||
<div className="mx-2 my-3 flex flex-col items-center gap-1.5 rounded-lg border border-border/60 border-dashed bg-muted/20 px-3 py-6 text-center text-muted-foreground text-xs">
|
||||
<Inbox className="size-4" />
|
||||
<p className="font-medium">No outputs yet</p>
|
||||
<p className="text-[11px] text-muted-foreground/70 leading-snug">
|
||||
Files this agent creates will appear here, grouped by the turn that made
|
||||
them.
|
||||
</p>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function RailError({ message }: { message: string }) {
|
||||
return (
|
||||
<div className="mx-2 my-3 rounded-lg border border-destructive/30 bg-destructive/5 px-3 py-2 text-destructive text-xs">
|
||||
{message}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
import type { OpenClawChatHistoryMessage } from '@/entrypoints/app/agents/useOpenClaw'
|
||||
import type { AgentConversationTurn } from '@/lib/agent-conversations/types'
|
||||
import type { ProducedFilesRailGroup } from '@/lib/agent-files'
|
||||
|
||||
export type ClawChatRole = 'user' | 'assistant'
|
||||
|
||||
@@ -234,6 +235,30 @@ export function filterTurnsPersistedInHistory(
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Persisted turns that still carry `producedFiles` — once history
|
||||
* reloads, the assistant text is rendered by `ClawChatMessage` and
|
||||
* the optimistic turn is filtered out by
|
||||
* `filterTurnsPersistedInHistory`. The historical message has no
|
||||
* `producedFiles` field (history items don't carry that), so the
|
||||
* inline file-card strip would vanish on history reload.
|
||||
*
|
||||
* Returning these here lets the caller render a strip-only entry
|
||||
* after the corresponding history bubble — full message stays as
|
||||
* the persisted history pair, but the produced-files affordance
|
||||
* survives.
|
||||
*/
|
||||
export function selectStripOnlyTurns(
|
||||
turns: AgentConversationTurn[],
|
||||
historyMessages: ClawChatMessage[],
|
||||
): AgentConversationTurn[] {
|
||||
return turns.filter(
|
||||
(turn) =>
|
||||
Boolean(turn.producedFiles && turn.producedFiles.length > 0) &&
|
||||
isTurnPersistedInHistory(turn, historyMessages),
|
||||
)
|
||||
}
|
||||
|
||||
function isTurnPersistedInHistory(
|
||||
turn: AgentConversationTurn,
|
||||
historyMessages: ClawChatMessage[],
|
||||
@@ -285,3 +310,59 @@ function getClawMessageText(message: ClawChatMessage): string {
|
||||
.join('')
|
||||
.trim()
|
||||
}
|
||||
|
||||
function firstNonBlankLine(value: string): string {
|
||||
for (const raw of value.split('\n')) {
|
||||
const trimmed = raw.trim()
|
||||
if (trimmed) return trimmed
|
||||
}
|
||||
return ''
|
||||
}
|
||||
|
||||
/**
|
||||
* Map each assistant history message to the produced-files group
|
||||
* that came from its turn. Match key is `group.turnPrompt` (first
|
||||
* non-blank line of the user prompt that initiated the turn) vs.
|
||||
* the first non-blank line of the user message that immediately
|
||||
* preceded this assistant message — the same shape the server
|
||||
* emits when storing turnPrompt.
|
||||
*
|
||||
* Walks history forward (oldest-first per `flattenHistoryPages`)
|
||||
* and consumes groups in chronological order. A group can only
|
||||
* match once — if two turns share the same prompt the earlier
|
||||
* one wins, and the later assistant message stays unassociated
|
||||
* (those land back in `tailStripGroups` at the conversation tail).
|
||||
*/
|
||||
export function mapHistoryToProducedFilesGroups(
|
||||
historyMessages: ClawChatMessage[],
|
||||
groups: ReadonlyArray<ProducedFilesRailGroup>,
|
||||
): {
|
||||
byAssistantMessageId: Map<string, ProducedFilesRailGroup>
|
||||
unmatched: ProducedFilesRailGroup[]
|
||||
} {
|
||||
const byAssistantMessageId = new Map<string, ProducedFilesRailGroup>()
|
||||
if (groups.length === 0) {
|
||||
return { byAssistantMessageId, unmatched: [] }
|
||||
}
|
||||
// Oldest-first so the iteration order matches history.
|
||||
const remaining = [...groups].sort((a, b) => a.createdAt - b.createdAt)
|
||||
|
||||
let pendingPrompt: string | null = null
|
||||
for (const message of historyMessages) {
|
||||
if (message.role === 'user') {
|
||||
pendingPrompt = firstNonBlankLine(getClawMessageText(message))
|
||||
continue
|
||||
}
|
||||
if (message.role !== 'assistant' || !pendingPrompt) continue
|
||||
const matchIndex = remaining.findIndex(
|
||||
(group) => group.turnPrompt === pendingPrompt,
|
||||
)
|
||||
if (matchIndex >= 0) {
|
||||
const [match] = remaining.splice(matchIndex, 1)
|
||||
byAssistantMessageId.set(message.id, match)
|
||||
}
|
||||
pendingPrompt = null
|
||||
}
|
||||
|
||||
return { byAssistantMessageId, unmatched: remaining }
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
import { useCallback, useEffect, useRef, useState } from 'react'
|
||||
import {
|
||||
type AgentHarnessStreamEvent,
|
||||
attachToHarnessTurn,
|
||||
@@ -10,12 +10,19 @@ import type { OpenClawChatHistoryMessage } from '@/entrypoints/app/agents/useOpe
|
||||
import type {
|
||||
AgentConversationTurn,
|
||||
AssistantPart,
|
||||
ConversationTurnFile,
|
||||
ToolEntry,
|
||||
UserAttachmentPreview,
|
||||
} from '@/lib/agent-conversations/types'
|
||||
import { useInvalidateAgentOutputs } from '@/lib/agent-files'
|
||||
import type { ServerAttachmentPayload } from '@/lib/attachments'
|
||||
import { sentry } from '@/lib/sentry/sentry'
|
||||
import { consumeSSEStream } from '@/lib/sse'
|
||||
import { buildToolLabel } from '@/lib/tool-labels'
|
||||
import {
|
||||
createWorkflowUsageRecord,
|
||||
recordWorkflowUsage,
|
||||
} from '@/lib/workflow-usage/storage'
|
||||
import { mapAgentHarnessToolStatus } from './agent-stream-events'
|
||||
|
||||
export interface SendInput {
|
||||
@@ -53,6 +60,12 @@ export function useAgentConversation(
|
||||
) {
|
||||
const [turns, setTurns] = useState<AgentConversationTurn[]>([])
|
||||
const [streaming, setStreaming] = useState(false)
|
||||
const invalidateAgentOutputs = useInvalidateAgentOutputs()
|
||||
// Stable ref so the resume effect doesn't re-subscribe on every
|
||||
// render (the hook's returned callable is freshly closured each
|
||||
// time, but the underlying queryClient is stable).
|
||||
const invalidateAgentOutputsRef = useRef(invalidateAgentOutputs)
|
||||
invalidateAgentOutputsRef.current = invalidateAgentOutputs
|
||||
const sessionKeyRef = useRef(options.sessionKey ?? '')
|
||||
const historyRef = useRef<OpenClawChatHistoryMessage[]>(options.history ?? [])
|
||||
const textAccRef = useRef('')
|
||||
@@ -60,6 +73,8 @@ export function useAgentConversation(
|
||||
const streamAbortRef = useRef<AbortController | null>(null)
|
||||
const onCompleteRef = useRef(options.onComplete)
|
||||
const onSessionKeyChangeRef = useRef(options.onSessionKeyChange)
|
||||
const workflowToolNamesRef = useRef<string[]>([])
|
||||
const workflowToolIdsRef = useRef(new Set<string>())
|
||||
// Per-turn resume bookkeeping. `turnId` is captured from the response
|
||||
// header; `lastSeq` advances with every SSE event so a reconnect can
|
||||
// resume via Last-Event-ID.
|
||||
@@ -104,6 +119,35 @@ export function useAgentConversation(
|
||||
})
|
||||
}
|
||||
|
||||
const resetWorkflowUsageCapture = useCallback(() => {
|
||||
workflowToolNamesRef.current = []
|
||||
workflowToolIdsRef.current = new Set()
|
||||
}, [])
|
||||
|
||||
const persistWorkflowUsageCapture = useCallback(
|
||||
(turnId?: string | null) => {
|
||||
const toolNames = workflowToolNamesRef.current
|
||||
if (toolNames.length === 0) return
|
||||
|
||||
void recordWorkflowUsage(
|
||||
createWorkflowUsageRecord({
|
||||
id: `agent-harness-turn:${turnId ?? crypto.randomUUID()}`,
|
||||
source: 'agent-harness-chat',
|
||||
toolNames,
|
||||
}),
|
||||
).catch((error) => {
|
||||
sentry.captureException(error, {
|
||||
extra: {
|
||||
message: 'Failed to persist agent workflow usage pattern',
|
||||
agentId,
|
||||
turnId,
|
||||
},
|
||||
})
|
||||
})
|
||||
},
|
||||
[agentId],
|
||||
)
|
||||
|
||||
const appendTextDelta = (delta: string) => {
|
||||
textAccRef.current += delta
|
||||
const text = textAccRef.current
|
||||
@@ -152,9 +196,25 @@ export function useAgentConversation(
|
||||
})
|
||||
}
|
||||
|
||||
const setProducedFilesOnCurrentTurn = (files: ConversationTurnFile[]) => {
|
||||
setTurns((prev) => {
|
||||
const last = prev[prev.length - 1]
|
||||
if (!last) return prev
|
||||
// Replace, don't merge: the server's diff is authoritative for
|
||||
// the just-completed turn — duplicate events shouldn't grow the
|
||||
// list, and a re-attribution should overwrite an earlier one.
|
||||
return [...prev.slice(0, -1), { ...last, producedFiles: files }]
|
||||
})
|
||||
}
|
||||
|
||||
const upsertAgentHarnessTool = (event: AgentHarnessStreamEvent) => {
|
||||
if (event.type !== 'tool_call') return
|
||||
const rawName = event.title || event.rawType || 'tool call'
|
||||
const toolId = event.id ?? rawName
|
||||
if (!workflowToolIdsRef.current.has(toolId)) {
|
||||
workflowToolIdsRef.current.add(toolId)
|
||||
workflowToolNamesRef.current.push(rawName)
|
||||
}
|
||||
const { label, subject } = buildToolLabel(
|
||||
rawName,
|
||||
event.text ? { description: event.text } : undefined,
|
||||
@@ -208,6 +268,9 @@ export function useAgentConversation(
|
||||
case 'tool_call':
|
||||
upsertAgentHarnessTool(event)
|
||||
break
|
||||
case 'produced_files':
|
||||
setProducedFilesOnCurrentTurn(event.files)
|
||||
break
|
||||
case 'done':
|
||||
markCurrentTurnDone()
|
||||
break
|
||||
@@ -259,6 +322,7 @@ export function useAgentConversation(
|
||||
...prev,
|
||||
{
|
||||
id: crypto.randomUUID(),
|
||||
turnId: active.turnId,
|
||||
userText: active.prompt ?? '',
|
||||
parts: [],
|
||||
done: false,
|
||||
@@ -272,6 +336,7 @@ export function useAgentConversation(
|
||||
streamAbortRef.current = abortController
|
||||
setStreaming(true)
|
||||
weStartedStream = true
|
||||
resetWorkflowUsageCapture()
|
||||
|
||||
const response = await attachToHarnessTurn(agentId, {
|
||||
turnId: active.turnId,
|
||||
@@ -304,9 +369,15 @@ export function useAgentConversation(
|
||||
// When `cancelled` is true the next run will set these
|
||||
// itself, so resetting here would only cause a brief flicker.
|
||||
if (!cancelled && weStartedStream) {
|
||||
const finishedTurnId = turnIdRef.current
|
||||
persistWorkflowUsageCapture(finishedTurnId)
|
||||
turnIdRef.current = null
|
||||
lastSeqRef.current = null
|
||||
setStreaming(false)
|
||||
void invalidateAgentOutputsRef.current(
|
||||
agentId,
|
||||
finishedTurnId ?? undefined,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -316,7 +387,66 @@ export function useAgentConversation(
|
||||
cancelled = true
|
||||
abortController.abort()
|
||||
}
|
||||
}, [agentId, activeTurnIdDep])
|
||||
}, [
|
||||
agentId,
|
||||
activeTurnIdDep,
|
||||
persistWorkflowUsageCapture,
|
||||
resetWorkflowUsageCapture,
|
||||
])
|
||||
|
||||
/**
|
||||
* Send the chat request and follow the 409-active-turn redirect
|
||||
* once. Pulled out of `send` to keep its cognitive complexity in
|
||||
* check — the retry adds a branch that biome counts heavily.
|
||||
*/
|
||||
const openSendStream = async (
|
||||
targetAgentId: string,
|
||||
text: string,
|
||||
attachments: ServerAttachmentPayload[],
|
||||
signal: AbortSignal,
|
||||
): Promise<Response> => {
|
||||
const initial = await chatWithHarnessAgent(
|
||||
targetAgentId,
|
||||
text,
|
||||
signal,
|
||||
attachments,
|
||||
)
|
||||
if (initial.status !== 409) return initial
|
||||
// 409 means the server already has an active turn for this agent
|
||||
// (a previous tab kicked one off and we're a fresh mount that
|
||||
// missed the resume window). Attach to it instead of double-sending.
|
||||
const body = (await initial.json()) as { turnId?: string }
|
||||
if (!body.turnId) return initial
|
||||
return attachToHarnessTurn(targetAgentId, {
|
||||
turnId: body.turnId,
|
||||
signal,
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Pull session-key / turn-id off response headers and propagate to
|
||||
* refs + the optimistic turn. Stamping `turnId` here lets the
|
||||
* inline artifact card fall back to /files/turn/<id> on a resumed
|
||||
* mount that missed the live `produced_files` event.
|
||||
*/
|
||||
const applyResponseHeadersToTurn = (response: Response) => {
|
||||
const responseSessionKey =
|
||||
response.headers.get('X-Session-Key') ??
|
||||
response.headers.get('X-Session-Id')
|
||||
if (responseSessionKey) {
|
||||
sessionKeyRef.current = responseSessionKey
|
||||
onSessionKeyChangeRef.current?.(responseSessionKey)
|
||||
}
|
||||
const responseTurnId = response.headers.get('X-Turn-Id')
|
||||
if (!responseTurnId) return
|
||||
turnIdRef.current = responseTurnId
|
||||
lastSeqRef.current = null
|
||||
setTurns((prev) => {
|
||||
const last = prev[prev.length - 1]
|
||||
if (!last) return prev
|
||||
return [...prev.slice(0, -1), { ...last, turnId: responseTurnId }]
|
||||
})
|
||||
}
|
||||
|
||||
const send = async (input: string | SendInput) => {
|
||||
const normalized: SendInput =
|
||||
@@ -340,43 +470,20 @@ export function useAgentConversation(
|
||||
}
|
||||
setTurns((prev) => [...prev, turn])
|
||||
setStreaming(true)
|
||||
resetWorkflowUsageCapture()
|
||||
textAccRef.current = ''
|
||||
thinkAccRef.current = ''
|
||||
const abortController = new AbortController()
|
||||
streamAbortRef.current = abortController
|
||||
|
||||
try {
|
||||
let response = await chatWithHarnessAgent(
|
||||
const response = await openSendStream(
|
||||
agentId,
|
||||
trimmed,
|
||||
abortController.signal,
|
||||
attachments,
|
||||
abortController.signal,
|
||||
)
|
||||
// 409 means the server already has an active turn for this
|
||||
// agent (e.g. a previous tab kicked one off and we're a fresh
|
||||
// mount that missed the resume window). Attach to it instead of
|
||||
// double-sending.
|
||||
if (response.status === 409) {
|
||||
const body = (await response.json()) as { turnId?: string }
|
||||
if (body.turnId) {
|
||||
response = await attachToHarnessTurn(agentId, {
|
||||
turnId: body.turnId,
|
||||
signal: abortController.signal,
|
||||
})
|
||||
}
|
||||
}
|
||||
const responseSessionKey =
|
||||
response.headers.get('X-Session-Key') ??
|
||||
response.headers.get('X-Session-Id')
|
||||
if (responseSessionKey) {
|
||||
sessionKeyRef.current = responseSessionKey
|
||||
onSessionKeyChangeRef.current?.(responseSessionKey)
|
||||
}
|
||||
const responseTurnId = response.headers.get('X-Turn-Id')
|
||||
if (responseTurnId) {
|
||||
turnIdRef.current = responseTurnId
|
||||
lastSeqRef.current = null
|
||||
}
|
||||
applyResponseHeadersToTurn(response)
|
||||
if (!response.ok) {
|
||||
const err = await response.text()
|
||||
updateCurrentTurnParts((parts) => [
|
||||
@@ -404,10 +511,16 @@ export function useAgentConversation(
|
||||
if (streamAbortRef.current === abortController) {
|
||||
streamAbortRef.current = null
|
||||
}
|
||||
// Capture before nulling — the invalidation needs the turn id so
|
||||
// useAgentTurnFiles consumers also flush, not just the agent-wide
|
||||
// rail query.
|
||||
const finishedTurnId = turnIdRef.current
|
||||
persistWorkflowUsageCapture(finishedTurnId)
|
||||
turnIdRef.current = null
|
||||
lastSeqRef.current = null
|
||||
onCompleteRef.current?.()
|
||||
setStreaming(false)
|
||||
void invalidateAgentOutputs(agentId, finishedTurnId ?? undefined)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { Bot, Cpu, Sparkles } from 'lucide-react'
|
||||
import { Bot, Cpu, Sparkles, Wand2 } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
import type { HarnessAgentAdapter } from './agent-harness-types'
|
||||
|
||||
@@ -23,6 +23,9 @@ export const AdapterIcon: FC<AdapterIconProps> = ({ adapter, className }) => {
|
||||
case 'openclaw':
|
||||
// OpenClaw — bot/automation framing.
|
||||
return <Bot className={className} aria-label="OpenClaw" />
|
||||
case 'hermes':
|
||||
// Hermes — messenger god framing, wand evokes the agentic conjuring.
|
||||
return <Wand2 className={className} aria-label="Hermes" />
|
||||
default:
|
||||
return <Bot className={className} aria-label="Agent" />
|
||||
}
|
||||
@@ -36,6 +39,8 @@ export function adapterLabel(adapter: HarnessAgentAdapter | 'unknown'): string {
|
||||
return 'Codex'
|
||||
case 'openclaw':
|
||||
return 'OpenClaw'
|
||||
case 'hermes':
|
||||
return 'Hermes'
|
||||
default:
|
||||
return 'Agent'
|
||||
}
|
||||
|
||||
@@ -117,6 +117,7 @@ function inferAdapterFromLabel(label: string): HarnessAgentAdapter | 'unknown' {
|
||||
if (lower === 'claude code') return 'claude'
|
||||
if (lower === 'codex') return 'codex'
|
||||
if (lower === 'openclaw') return 'openclaw'
|
||||
if (lower === 'hermes') return 'hermes'
|
||||
return 'unknown'
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import { createAgentPageActions } from './agents-page-actions'
|
||||
import {
|
||||
useDefaultAgentName,
|
||||
useHarnessAgentDefaults,
|
||||
useHermesProviderSelection,
|
||||
useOpenClawProviderSelection,
|
||||
} from './agents-page-hooks'
|
||||
import {
|
||||
@@ -106,6 +107,7 @@ export const AgentsPage: FC = () => {
|
||||
)
|
||||
const [harnessModelId, setHarnessModelId] = useState('')
|
||||
const [harnessReasoningEffort, setHarnessReasoningEffort] = useState('')
|
||||
const [createHermesProviderId, setCreateHermesProviderId] = useState('')
|
||||
const [showTerminal, setShowTerminal] = useState(false)
|
||||
const [cliAuthModalOpen, setCliAuthModalOpen] = useState(false)
|
||||
const [pageError, setPageError] = useState<string | null>(null)
|
||||
@@ -133,6 +135,14 @@ export const AgentsPage: FC = () => {
|
||||
cliAuthModalOpen,
|
||||
setCliAuthModalOpen,
|
||||
})
|
||||
const { selectableHermesProviders } = useHermesProviderSelection({
|
||||
providers,
|
||||
defaultProviderId,
|
||||
createOpen,
|
||||
createRuntime,
|
||||
createHermesProviderId,
|
||||
setCreateHermesProviderId,
|
||||
})
|
||||
useDefaultAgentName(createOpen, setNewName)
|
||||
useHarnessAgentDefaults({
|
||||
adapters,
|
||||
@@ -226,11 +236,13 @@ export const AgentsPage: FC = () => {
|
||||
createAgentPageActions({
|
||||
createProviderId,
|
||||
createRuntime,
|
||||
createHermesProviderId,
|
||||
harnessModelId,
|
||||
harnessReasoningEffort,
|
||||
navigate,
|
||||
newName,
|
||||
selectableOpenClawProviders,
|
||||
selectableHermesProviders,
|
||||
setupProviderId,
|
||||
createHarnessAgent: createHarnessAgent.mutateAsync,
|
||||
createOpenClawAgent,
|
||||
@@ -386,6 +398,8 @@ export const AgentsPage: FC = () => {
|
||||
harnessAdapterId={harnessAdapterId}
|
||||
harnessModelId={harnessModelId}
|
||||
harnessReasoningEffort={harnessReasoningEffort}
|
||||
hermesProviders={selectableHermesProviders}
|
||||
hermesSelectedProviderId={createHermesProviderId}
|
||||
name={newName}
|
||||
open={createOpen}
|
||||
providers={selectableOpenClawProviders}
|
||||
@@ -401,12 +415,14 @@ export const AgentsPage: FC = () => {
|
||||
if (!open) {
|
||||
setCreateError(null)
|
||||
createHarnessAgent.reset()
|
||||
setCreateHermesProviderId('')
|
||||
}
|
||||
}}
|
||||
onRuntimeChange={setCreateRuntime}
|
||||
onHarnessAdapterChange={handleHarnessAdapterChange}
|
||||
onHarnessModelChange={setHarnessModelId}
|
||||
onHarnessReasoningChange={setHarnessReasoningEffort}
|
||||
onHermesProviderChange={setCreateHermesProviderId}
|
||||
onNameChange={setNewName}
|
||||
onProviderChange={setCreateProviderId}
|
||||
/>
|
||||
|
||||
@@ -40,6 +40,8 @@ interface NewAgentDialogProps {
|
||||
harnessAdapterId: HarnessAgentAdapter
|
||||
harnessModelId: string
|
||||
harnessReasoningEffort: string
|
||||
hermesProviders: ProviderOption[]
|
||||
hermesSelectedProviderId: string
|
||||
name: string
|
||||
open: boolean
|
||||
providers: ProviderOption[]
|
||||
@@ -55,6 +57,7 @@ interface NewAgentDialogProps {
|
||||
onHarnessAdapterChange: (adapter: HarnessAgentAdapter) => void
|
||||
onHarnessModelChange: (modelId: string) => void
|
||||
onHarnessReasoningChange: (reasoningEffort: string) => void
|
||||
onHermesProviderChange: (providerId: string) => void
|
||||
onNameChange: (name: string) => void
|
||||
onProviderChange: (providerId: string) => void
|
||||
}
|
||||
@@ -69,6 +72,8 @@ export const NewAgentDialog: FC<NewAgentDialogProps> = ({
|
||||
harnessAdapterId,
|
||||
harnessModelId,
|
||||
harnessReasoningEffort,
|
||||
hermesProviders,
|
||||
hermesSelectedProviderId,
|
||||
name,
|
||||
open,
|
||||
providers,
|
||||
@@ -84,22 +89,29 @@ export const NewAgentDialog: FC<NewAgentDialogProps> = ({
|
||||
onHarnessAdapterChange,
|
||||
onHarnessModelChange,
|
||||
onHarnessReasoningChange,
|
||||
onHermesProviderChange,
|
||||
onNameChange,
|
||||
onProviderChange,
|
||||
}) => {
|
||||
const selectedHarnessAdapter =
|
||||
adapters.find((adapter) => adapter.id === harnessAdapterId) ?? adapters[0]
|
||||
const isHarnessRuntime = createRuntime !== 'openclaw'
|
||||
const isHermesRuntime = createRuntime === 'hermes'
|
||||
const isClassicHarnessRuntime = isHarnessRuntime && !isHermesRuntime
|
||||
const openClawBlocked = createRuntime === 'openclaw' && !canManageOpenClaw
|
||||
const cliBlocked =
|
||||
createRuntime === 'openclaw' &&
|
||||
!!selectedCliProvider &&
|
||||
!cliAuthStatus?.loggedIn
|
||||
const hermesBlocked =
|
||||
isHermesRuntime &&
|
||||
(hermesProviders.length === 0 || !hermesSelectedProviderId)
|
||||
const canCreate =
|
||||
Boolean(name.trim()) &&
|
||||
!creating &&
|
||||
!openClawBlocked &&
|
||||
!cliBlocked &&
|
||||
!hermesBlocked &&
|
||||
(createRuntime === 'openclaw'
|
||||
? providers.length > 0
|
||||
: Boolean(selectedHarnessAdapter))
|
||||
@@ -143,7 +155,8 @@ export const NewAgentDialog: FC<NewAgentDialogProps> = ({
|
||||
if (
|
||||
value === 'openclaw' ||
|
||||
value === 'claude' ||
|
||||
value === 'codex'
|
||||
value === 'codex' ||
|
||||
value === 'hermes'
|
||||
) {
|
||||
onRuntimeChange(value)
|
||||
if (value !== 'openclaw') onHarnessAdapterChange(value)
|
||||
@@ -196,7 +209,16 @@ export const NewAgentDialog: FC<NewAgentDialogProps> = ({
|
||||
</>
|
||||
) : null}
|
||||
|
||||
{isHarnessRuntime ? (
|
||||
{isHermesRuntime ? (
|
||||
<ProviderSelector
|
||||
providers={hermesProviders}
|
||||
defaultProviderId={defaultProviderId}
|
||||
selectedId={hermesSelectedProviderId}
|
||||
onSelect={onHermesProviderChange}
|
||||
/>
|
||||
) : null}
|
||||
|
||||
{isClassicHarnessRuntime ? (
|
||||
<>
|
||||
<div className="grid gap-2">
|
||||
<Label htmlFor="harness-model">Model</Label>
|
||||
|
||||
@@ -1,6 +1,21 @@
|
||||
import type { AgentEntry } from './useOpenClaw'
|
||||
|
||||
export type HarnessAgentAdapter = 'claude' | 'codex' | 'openclaw'
|
||||
export type HarnessAgentAdapter = 'claude' | 'codex' | 'openclaw' | 'hermes'
|
||||
|
||||
/**
|
||||
* One file the harness attributed to the assistant turn that just
|
||||
* finished. Mirrors the server-side `ProducedFileEventEntry` shape so
|
||||
* the inline artifact card can render alongside the streamed text the
|
||||
* user just watched complete. Only present for openclaw adapter
|
||||
* turns; claude / codex don't produce these events in v1.
|
||||
*/
|
||||
export interface HarnessProducedFile {
|
||||
id: string
|
||||
/** Workspace-relative POSIX path. */
|
||||
path: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
export type AgentHarnessStreamEvent =
|
||||
| {
|
||||
@@ -22,6 +37,10 @@ export type AgentHarnessStreamEvent =
|
||||
text: string
|
||||
rawType?: string
|
||||
}
|
||||
| {
|
||||
type: 'produced_files'
|
||||
files: HarnessProducedFile[]
|
||||
}
|
||||
| {
|
||||
type: 'done'
|
||||
text?: string
|
||||
@@ -111,6 +130,17 @@ export interface CreateHarnessAgentInput {
|
||||
adapter: HarnessAgentAdapter
|
||||
modelId?: string
|
||||
reasoningEffort?: string
|
||||
/**
|
||||
* Hermes-only — provider id from `HERMES_SUPPORTED_PROVIDERS`. When
|
||||
* paired with `apiKey`, the backend writes a per-agent
|
||||
* config.yaml + .env into the agent's HERMES_HOME so the first chat
|
||||
* doesn't depend on the user having run `hermes setup` globally.
|
||||
*/
|
||||
providerType?: string
|
||||
/** Hermes-only — API key paired with `providerType`. */
|
||||
apiKey?: string
|
||||
/** Hermes-only — base URL for the `custom` provider. */
|
||||
baseUrl?: string
|
||||
}
|
||||
|
||||
export interface HarnessHistoryReasoning {
|
||||
|
||||
@@ -20,17 +20,22 @@ import type {
|
||||
export interface AgentPageActionInput {
|
||||
createProviderId: string
|
||||
createRuntime: CreateAgentRuntime
|
||||
createHermesProviderId: string
|
||||
harnessModelId: string
|
||||
harnessReasoningEffort: string
|
||||
navigate: NavigateFunction
|
||||
newName: string
|
||||
selectableOpenClawProviders: ProviderOption[]
|
||||
selectableHermesProviders: ProviderOption[]
|
||||
setupProviderId: string
|
||||
createHarnessAgent: (input: {
|
||||
name: string
|
||||
adapter: HarnessAgentAdapter
|
||||
modelId?: string
|
||||
reasoningEffort?: string
|
||||
providerType?: string
|
||||
apiKey?: string
|
||||
baseUrl?: string
|
||||
}) => Promise<HarnessAgent>
|
||||
createOpenClawAgent: (
|
||||
input: OpenClawAgentMutationInput,
|
||||
@@ -114,20 +119,37 @@ export function createAgentPageActions(input: AgentPageActionInput) {
|
||||
const handleHarnessCreate = async () => {
|
||||
if (!input.newName.trim()) return
|
||||
|
||||
const isHermes = input.createRuntime === 'hermes'
|
||||
// Hermes pulls every provider field from the user's selected entry
|
||||
// in the global LLM-providers list (managed under AI Settings). The
|
||||
// backend rejects creation if any required field is missing.
|
||||
const hermesProvider = isHermes
|
||||
? input.selectableHermesProviders.find(
|
||||
(option) => option.id === input.createHermesProviderId,
|
||||
)
|
||||
: undefined
|
||||
const effectiveModelId = isHermes
|
||||
? hermesProvider?.modelId
|
||||
: input.harnessModelId || undefined
|
||||
|
||||
input.setCreateError(null)
|
||||
try {
|
||||
const agent = await input.createHarnessAgent({
|
||||
name: input.newName.trim(),
|
||||
adapter: input.createRuntime as HarnessAgentAdapter,
|
||||
modelId: input.harnessModelId || undefined,
|
||||
modelId: effectiveModelId,
|
||||
reasoningEffort: input.harnessReasoningEffort || undefined,
|
||||
providerType: hermesProvider?.type,
|
||||
apiKey: hermesProvider?.apiKey,
|
||||
baseUrl: hermesProvider?.baseUrl,
|
||||
})
|
||||
input.setCreateOpen(false)
|
||||
input.setNewName('')
|
||||
track(AGENT_CREATED_EVENT, {
|
||||
runtime: input.createRuntime,
|
||||
model_id: input.harnessModelId || undefined,
|
||||
model_id: effectiveModelId,
|
||||
reasoning_effort: input.harnessReasoningEffort || undefined,
|
||||
provider_type: hermesProvider?.type,
|
||||
})
|
||||
input.navigate(`/agents/${agent.id}`)
|
||||
} catch (err) {
|
||||
@@ -140,6 +162,7 @@ export function createAgentPageActions(input: AgentPageActionInput) {
|
||||
openclaw: handleOpenClawCreate,
|
||||
claude: handleHarnessCreate,
|
||||
codex: handleHarnessCreate,
|
||||
hermes: handleHarnessCreate,
|
||||
}
|
||||
void createByRuntime[input.createRuntime]()
|
||||
}
|
||||
|
||||
@@ -4,8 +4,9 @@ import type {
|
||||
HarnessAdapterDescriptor,
|
||||
HarnessAgentAdapter,
|
||||
} from './agent-harness-types'
|
||||
import type { CreateAgentRuntime } from './agents-page-types'
|
||||
import type { CreateAgentRuntime, ProviderOption } from './agents-page-types'
|
||||
import { toProviderOptions } from './agents-page-utils'
|
||||
import { getHermesSupportedProviders } from './hermes-supported-providers'
|
||||
import {
|
||||
buildOpenClawCliProviderOptions,
|
||||
findOpenClawCliProviderById,
|
||||
@@ -171,3 +172,60 @@ export function useOpenClawProviderSelection(input: {
|
||||
cliAuthError,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mirror of useOpenClawProviderSelection but for Hermes. Hermes only
|
||||
* needs the create-dialog flow (no setup dialog, no CLI providers), so
|
||||
* this hook is much smaller — it just filters the global provider list
|
||||
* to ones Hermes can drive and seeds the selected id when the dialog
|
||||
* opens.
|
||||
*/
|
||||
export function useHermesProviderSelection(input: {
|
||||
providers: LlmProviderConfig[]
|
||||
defaultProviderId: string
|
||||
createOpen: boolean
|
||||
createRuntime: CreateAgentRuntime
|
||||
createHermesProviderId: string
|
||||
setCreateHermesProviderId: Dispatch<SetStateAction<string>>
|
||||
}) {
|
||||
const {
|
||||
providers,
|
||||
defaultProviderId,
|
||||
createOpen,
|
||||
createRuntime,
|
||||
createHermesProviderId,
|
||||
setCreateHermesProviderId,
|
||||
} = input
|
||||
|
||||
const selectableHermesProviders = useMemo<ProviderOption[]>(
|
||||
() =>
|
||||
getHermesSupportedProviders(providers).map((provider) => ({
|
||||
id: provider.id,
|
||||
type: provider.type,
|
||||
name: provider.name,
|
||||
modelId: provider.modelId,
|
||||
baseUrl: provider.baseUrl,
|
||||
apiKey: provider.apiKey,
|
||||
})),
|
||||
[providers],
|
||||
)
|
||||
|
||||
useEffect(() => {
|
||||
if (selectableHermesProviders.length === 0) return
|
||||
if (!createOpen || createRuntime !== 'hermes') return
|
||||
if (createHermesProviderId) return
|
||||
const fallbackId =
|
||||
selectableHermesProviders.find((p) => p.id === defaultProviderId)?.id ??
|
||||
selectableHermesProviders[0].id
|
||||
setCreateHermesProviderId(fallbackId)
|
||||
}, [
|
||||
createHermesProviderId,
|
||||
createOpen,
|
||||
createRuntime,
|
||||
defaultProviderId,
|
||||
selectableHermesProviders,
|
||||
setCreateHermesProviderId,
|
||||
])
|
||||
|
||||
return { selectableHermesProviders }
|
||||
}
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
import {
|
||||
HERMES_SUPPORTED_BROWSEROS_PROVIDER_TYPES,
|
||||
type HermesSupportedBrowserosProviderType,
|
||||
} from '@browseros/shared/constants/hermes'
|
||||
import type { LlmProviderConfig, ProviderType } from '@/lib/llm-providers/types'
|
||||
|
||||
export function isHermesSupportedProviderType(
|
||||
providerType: ProviderType,
|
||||
): providerType is HermesSupportedBrowserosProviderType {
|
||||
return (
|
||||
HERMES_SUPPORTED_BROWSEROS_PROVIDER_TYPES as readonly ProviderType[]
|
||||
).includes(providerType)
|
||||
}
|
||||
|
||||
/**
|
||||
* Filters the user's global LLM providers down to ones Hermes can use.
|
||||
* A provider qualifies when its type is in the Hermes-supported set
|
||||
* AND it has an API key wired up. CLI-style providers (chatgpt-pro,
|
||||
* github-copilot, qwen-code) and other unsupported types (browseros,
|
||||
* ollama, lmstudio, bedrock, azure, google, moonshot) are filtered
|
||||
* out — Hermes can't drive them today.
|
||||
*/
|
||||
export function getHermesSupportedProviders(
|
||||
providers: LlmProviderConfig[],
|
||||
): LlmProviderConfig[] {
|
||||
return providers.filter(
|
||||
(provider) =>
|
||||
!!provider.apiKey && isHermesSupportedProviderType(provider.type),
|
||||
)
|
||||
}
|
||||
@@ -25,12 +25,18 @@ interface HarnessAgentsResponse {
|
||||
|
||||
export type { AgentHarnessStreamEvent }
|
||||
|
||||
const AGENT_QUERY_KEYS = {
|
||||
export const AGENT_QUERY_KEYS = {
|
||||
adapters: 'agent-harness-adapters',
|
||||
agents: 'agent-harness-agents',
|
||||
/** Outputs-rail data for one agent — `[agentOutputs, baseUrl, agentId]`. */
|
||||
agentOutputs: 'agent-harness-agent-outputs',
|
||||
/** Per-turn artifact-card files — `[agentTurnFiles, baseUrl, agentId, turnId]`. */
|
||||
agentTurnFiles: 'agent-harness-agent-turn-files',
|
||||
/** Single-file preview payload — `[filePreview, baseUrl, fileId]`. */
|
||||
filePreview: 'agent-harness-file-preview',
|
||||
} as const
|
||||
|
||||
async function agentsFetch<T>(
|
||||
export async function agentsFetch<T>(
|
||||
baseUrl: string,
|
||||
path: string,
|
||||
init?: RequestInit,
|
||||
|
||||
@@ -85,7 +85,8 @@ export const SidebarLayout: FC = () => {
|
||||
|
||||
return (
|
||||
<RpcClientProvider>
|
||||
<div className="relative min-h-screen bg-background">
|
||||
{/* pl-14 offsets all content by the collapsed sidebar width (w-14 = 56px) so it never sits under the rail */}
|
||||
<div className="relative min-h-screen bg-background pl-14">
|
||||
{/* Sidebar - fixed overlay */}
|
||||
{/* biome-ignore lint/a11y/noStaticElementInteractions: hover interactions needed */}
|
||||
<div
|
||||
@@ -96,7 +97,6 @@ export const SidebarLayout: FC = () => {
|
||||
<AppSidebar expanded={sidebarOpen} onOpenShortcuts={openShortcuts} />
|
||||
</div>
|
||||
|
||||
{/* Main content - full width, centered */}
|
||||
{location.pathname === '/home/chat' ? (
|
||||
<main className="relative h-dvh overflow-hidden">
|
||||
<Outlet />
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
import type { FC } from 'react'
|
||||
import { useEffect, useState } from 'react'
|
||||
import { useNavigate } from 'react-router'
|
||||
import { CloudSyncDisclosure } from '@/components/auth/CloudSyncDisclosure'
|
||||
import { Alert, AlertDescription } from '@/components/ui/alert'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import {
|
||||
@@ -199,6 +200,8 @@ export const LoginPage: FC = () => {
|
||||
)}
|
||||
Continue with Google
|
||||
</Button>
|
||||
|
||||
<CloudSyncDisclosure />
|
||||
</CardContent>
|
||||
</Card>
|
||||
)
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
|
||||
import { useAgentServerUrl } from '@/lib/browseros/useBrowserOSProviders'
|
||||
|
||||
export const MEMORY_QUERY_KEY = 'memory'
|
||||
|
||||
async function fetchMemory(baseUrl: string): Promise<string> {
|
||||
const response = await fetch(`${baseUrl}/memory`)
|
||||
if (!response.ok) throw new Error(`HTTP ${response.status}`)
|
||||
@@ -30,7 +32,7 @@ export function useMemoryContent() {
|
||||
const queryClient = useQueryClient()
|
||||
|
||||
const { data, isLoading, error, refetch } = useQuery<string, Error>({
|
||||
queryKey: ['memory', baseUrl],
|
||||
queryKey: [MEMORY_QUERY_KEY, baseUrl],
|
||||
queryFn: () => fetchMemory(baseUrl as string),
|
||||
enabled: !!baseUrl && !urlLoading,
|
||||
})
|
||||
@@ -38,7 +40,7 @@ export function useMemoryContent() {
|
||||
const saveMutation = useMutation({
|
||||
mutationFn: (content: string) => saveMemory(baseUrl as string, content),
|
||||
onSuccess: (_data, content) => {
|
||||
queryClient.setQueryData(['memory', baseUrl], content)
|
||||
queryClient.setQueryData([MEMORY_QUERY_KEY, baseUrl], content)
|
||||
},
|
||||
})
|
||||
|
||||
|
||||
@@ -0,0 +1,180 @@
|
||||
import { useMutation, useQueryClient } from '@tanstack/react-query'
|
||||
import { Brain, FileText, Loader2, RotateCcw } from 'lucide-react'
|
||||
import { type FC, type ReactNode, useState } from 'react'
|
||||
import { toast } from 'sonner'
|
||||
import {
|
||||
AlertDialog,
|
||||
AlertDialogAction,
|
||||
AlertDialogCancel,
|
||||
AlertDialogContent,
|
||||
AlertDialogDescription,
|
||||
AlertDialogFooter,
|
||||
AlertDialogHeader,
|
||||
AlertDialogTitle,
|
||||
} from '@/components/ui/alert-dialog'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { useAgentServerUrl } from '@/lib/browseros/useBrowserOSProviders'
|
||||
import { MEMORY_QUERY_KEY } from '../memory/useMemoryContent'
|
||||
import { SOUL_QUERY_KEY } from '../soul/useSoulContent'
|
||||
|
||||
type ResetTarget = 'memory' | 'soul'
|
||||
|
||||
type ResetAction = {
|
||||
target: ResetTarget
|
||||
title: string
|
||||
description: string
|
||||
buttonLabel: string
|
||||
icon: ReactNode
|
||||
}
|
||||
|
||||
async function deleteServerResource(
|
||||
baseUrl: string,
|
||||
resource: ResetTarget,
|
||||
): Promise<void> {
|
||||
const response = await fetch(`${baseUrl}/${resource}`, { method: 'DELETE' })
|
||||
if (!response.ok) throw new Error(`HTTP ${response.status}`)
|
||||
}
|
||||
|
||||
export const ResetDataPage: FC = () => {
|
||||
const {
|
||||
baseUrl,
|
||||
isLoading: isUrlLoading,
|
||||
error: urlError,
|
||||
} = useAgentServerUrl()
|
||||
const queryClient = useQueryClient()
|
||||
const [pendingAction, setPendingAction] = useState<ResetAction | null>(null)
|
||||
|
||||
const resetMutation = useMutation({
|
||||
mutationFn: async (target: ResetTarget) => {
|
||||
if (!baseUrl) throw new Error('BrowserOS server URL is unavailable')
|
||||
await deleteServerResource(baseUrl, target)
|
||||
return target
|
||||
},
|
||||
onSuccess: async (target) => {
|
||||
if (target === 'memory') {
|
||||
queryClient.setQueryData([MEMORY_QUERY_KEY, baseUrl], '')
|
||||
}
|
||||
await queryClient.invalidateQueries({
|
||||
queryKey: target === 'memory' ? [MEMORY_QUERY_KEY] : [SOUL_QUERY_KEY],
|
||||
})
|
||||
toast.success(target === 'memory' ? 'Memory reset' : 'SOUL.md reset')
|
||||
},
|
||||
onError: (_error, target) => {
|
||||
toast.error(
|
||||
target === 'memory'
|
||||
? 'Failed to reset memory'
|
||||
: 'Failed to reset SOUL.md',
|
||||
)
|
||||
},
|
||||
})
|
||||
|
||||
const actions: ResetAction[] = [
|
||||
{
|
||||
target: 'memory',
|
||||
title: 'Reset memory?',
|
||||
description:
|
||||
'This deletes CORE.md and daily memory files. This cannot be undone.',
|
||||
buttonLabel: 'Reset memory',
|
||||
icon: <Brain className="h-4 w-4 text-muted-foreground" />,
|
||||
},
|
||||
{
|
||||
target: 'soul',
|
||||
title: 'Reset SOUL.md?',
|
||||
description:
|
||||
'This replaces SOUL.md with the default template. This cannot be undone.',
|
||||
buttonLabel: 'Reset SOUL.md',
|
||||
icon: <FileText className="h-4 w-4 text-muted-foreground" />,
|
||||
},
|
||||
]
|
||||
|
||||
const isBusy = isUrlLoading || resetMutation.isPending
|
||||
const disabled = isBusy || Boolean(urlError) || !baseUrl
|
||||
|
||||
const handleConfirm = () => {
|
||||
if (!pendingAction) return
|
||||
resetMutation.mutate(pendingAction.target)
|
||||
setPendingAction(null)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="mx-auto w-full max-w-3xl space-y-6 p-6">
|
||||
<div>
|
||||
<div className="mb-2 flex items-center gap-2 text-muted-foreground">
|
||||
<RotateCcw className="h-4 w-4" />
|
||||
<span className="font-medium text-xs uppercase tracking-wider">
|
||||
Reset
|
||||
</span>
|
||||
</div>
|
||||
<h1 className="font-semibold text-2xl">Reset Data</h1>
|
||||
</div>
|
||||
|
||||
{urlError ? (
|
||||
<div className="rounded-lg border border-destructive/50 bg-destructive/5 p-4">
|
||||
<p className="text-destructive text-sm">
|
||||
BrowserOS server is unavailable.
|
||||
</p>
|
||||
</div>
|
||||
) : null}
|
||||
|
||||
<div className="space-y-3">
|
||||
{actions.map((action) => (
|
||||
<div
|
||||
key={action.target}
|
||||
className="flex flex-col gap-3 rounded-lg border bg-card p-4 shadow-sm sm:flex-row sm:items-center sm:justify-between"
|
||||
>
|
||||
<div className="flex min-w-0 items-center gap-3">
|
||||
{action.icon}
|
||||
<div className="min-w-0">
|
||||
<h2 className="font-medium text-sm">{action.buttonLabel}</h2>
|
||||
<p className="mt-1 text-muted-foreground text-xs">
|
||||
{action.description}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<Button
|
||||
type="button"
|
||||
variant="destructive"
|
||||
size="sm"
|
||||
className="shrink-0"
|
||||
disabled={disabled}
|
||||
onClick={() => setPendingAction(action)}
|
||||
>
|
||||
{resetMutation.isPending &&
|
||||
resetMutation.variables === action.target ? (
|
||||
<Loader2 className="h-3.5 w-3.5 animate-spin" />
|
||||
) : (
|
||||
<RotateCcw className="h-3.5 w-3.5" />
|
||||
)}
|
||||
{action.buttonLabel}
|
||||
</Button>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
|
||||
<AlertDialog
|
||||
open={Boolean(pendingAction)}
|
||||
onOpenChange={(open) => {
|
||||
if (!open) setPendingAction(null)
|
||||
}}
|
||||
>
|
||||
<AlertDialogContent>
|
||||
<AlertDialogHeader>
|
||||
<AlertDialogTitle>{pendingAction?.title}</AlertDialogTitle>
|
||||
<AlertDialogDescription>
|
||||
{pendingAction?.description}
|
||||
</AlertDialogDescription>
|
||||
</AlertDialogHeader>
|
||||
<AlertDialogFooter>
|
||||
<AlertDialogCancel>Cancel</AlertDialogCancel>
|
||||
<AlertDialogAction
|
||||
onClick={handleConfirm}
|
||||
className="bg-destructive text-destructive-foreground hover:bg-destructive/90"
|
||||
>
|
||||
{pendingAction?.buttonLabel}
|
||||
</AlertDialogAction>
|
||||
</AlertDialogFooter>
|
||||
</AlertDialogContent>
|
||||
</AlertDialog>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
import { AlertCircle, CheckCircle2, Loader2, Mail } from 'lucide-react'
|
||||
import { useState } from 'react'
|
||||
import { CloudSyncDisclosure } from '@/components/auth/CloudSyncDisclosure'
|
||||
import { Alert, AlertDescription } from '@/components/ui/alert'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { Input } from '@/components/ui/input'
|
||||
@@ -199,6 +200,8 @@ export const StepTwo = ({ direction, onContinue }: StepTwoProps) => {
|
||||
</Button>
|
||||
</form>
|
||||
|
||||
<CloudSyncDisclosure />
|
||||
|
||||
<div className="text-center">
|
||||
<Button
|
||||
variant="ghost"
|
||||
|
||||
@@ -2,7 +2,8 @@ import { keepPreviousData, useQueryClient } from '@tanstack/react-query'
|
||||
import type { UIMessage } from 'ai'
|
||||
import { Loader2 } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
import { useMemo } from 'react'
|
||||
import { useMemo, useState } from 'react'
|
||||
import { toast } from 'sonner'
|
||||
import { useSessionInfo } from '@/lib/auth/sessionStorage'
|
||||
import { useConversations } from '@/lib/conversations/conversationStorage'
|
||||
import { GetProfileIdByUserIdDocument } from '@/lib/conversations/graphql/uploadConversationDocument'
|
||||
@@ -21,8 +22,11 @@ import {
|
||||
import { LocalChatHistory } from './local/LocalChatHistory'
|
||||
|
||||
const RemoteChatHistory: FC<{ userId: string }> = ({ userId }) => {
|
||||
const { conversationId: activeConversationId } = useChatSessionContext()
|
||||
const { conversationId: activeConversationId, resetConversation } =
|
||||
useChatSessionContext()
|
||||
const { clearConversations } = useConversations()
|
||||
const queryClient = useQueryClient()
|
||||
const [isClearingAll, setIsClearingAll] = useState(false)
|
||||
|
||||
const { data: profileData } = useGraphqlQuery(GetProfileIdByUserIdDocument, {
|
||||
userId,
|
||||
@@ -68,6 +72,50 @@ const RemoteChatHistory: FC<{ userId: string }> = ({ userId }) => {
|
||||
deleteConversationMutation.mutate({ rowId: id })
|
||||
}
|
||||
|
||||
const getAllRemoteConversationIds = async () => {
|
||||
let pages = graphqlData?.pages ?? []
|
||||
let hasMore = Boolean(
|
||||
pages.at(-1)?.conversations?.pageInfo.hasNextPage ?? hasNextPage,
|
||||
)
|
||||
|
||||
while (hasMore) {
|
||||
const result = await fetchNextPage()
|
||||
pages = result.data?.pages ?? pages
|
||||
hasMore = Boolean(pages.at(-1)?.conversations?.pageInfo.hasNextPage)
|
||||
}
|
||||
|
||||
return pages.flatMap((page) =>
|
||||
(page.conversations?.nodes ?? [])
|
||||
.filter((node): node is NonNullable<typeof node> => node !== null)
|
||||
.map((node) => node.rowId),
|
||||
)
|
||||
}
|
||||
|
||||
const handleClearAll = async () => {
|
||||
setIsClearingAll(true)
|
||||
try {
|
||||
const ids = [...new Set(await getAllRemoteConversationIds())]
|
||||
for (let i = 0; i < ids.length; i += 10) {
|
||||
const batch = ids.slice(i, i + 10)
|
||||
await Promise.all(
|
||||
batch.map((rowId) =>
|
||||
deleteConversationMutation.mutateAsync({ rowId }),
|
||||
),
|
||||
)
|
||||
}
|
||||
await clearConversations()
|
||||
resetConversation()
|
||||
await queryClient.invalidateQueries({
|
||||
queryKey: [getQueryKeyFromDocument(GetConversationsForHistoryDocument)],
|
||||
})
|
||||
toast.success('Chat sessions cleared')
|
||||
} catch {
|
||||
toast.error('Failed to clear chat sessions')
|
||||
} finally {
|
||||
setIsClearingAll(false)
|
||||
}
|
||||
}
|
||||
|
||||
const conversations = useMemo<HistoryConversation[]>(() => {
|
||||
if (!graphqlData?.pages) return []
|
||||
|
||||
@@ -110,6 +158,8 @@ const RemoteChatHistory: FC<{ userId: string }> = ({ userId }) => {
|
||||
groupedConversations={groupedConversations}
|
||||
activeConversationId={activeConversationId}
|
||||
onDelete={handleDelete}
|
||||
onClearAll={handleClearAll}
|
||||
isClearingAll={isClearingAll || deleteConversationMutation.isPending}
|
||||
hasNextPage={hasNextPage}
|
||||
isFetchingNextPage={isFetchingNextPage}
|
||||
onLoadMore={fetchNextPage}
|
||||
@@ -121,8 +171,6 @@ const RemoteChatHistory: FC<{ userId: string }> = ({ userId }) => {
|
||||
export const ChatHistory: FC = () => {
|
||||
const { sessionInfo } = useSessionInfo()
|
||||
const userId = sessionInfo.user?.id
|
||||
// needed to initiate remote-sync
|
||||
useConversations()
|
||||
|
||||
if (userId) {
|
||||
return <RemoteChatHistory userId={userId} />
|
||||
|
||||
@@ -1,6 +1,16 @@
|
||||
import { Loader2, MessageSquare } from 'lucide-react'
|
||||
import { type FC, useEffect, useRef } from 'react'
|
||||
import { Loader2, MessageSquare, Trash2 } from 'lucide-react'
|
||||
import { type FC, useEffect, useRef, useState } from 'react'
|
||||
import { Link } from 'react-router'
|
||||
import {
|
||||
AlertDialog,
|
||||
AlertDialogAction,
|
||||
AlertDialogCancel,
|
||||
AlertDialogContent,
|
||||
AlertDialogDescription,
|
||||
AlertDialogFooter,
|
||||
AlertDialogHeader,
|
||||
AlertDialogTitle,
|
||||
} from '@/components/ui/alert-dialog'
|
||||
import { ConversationGroup } from './ConversationGroup'
|
||||
import type { GroupedConversations } from './types'
|
||||
import { TIME_GROUP_LABELS } from './utils'
|
||||
@@ -13,6 +23,8 @@ interface ConversationListProps {
|
||||
isFetchingNextPage?: boolean
|
||||
onLoadMore?: () => void
|
||||
isRefreshing?: boolean
|
||||
onClearAll?: () => void
|
||||
isClearingAll?: boolean
|
||||
}
|
||||
|
||||
export const ConversationList: FC<ConversationListProps> = ({
|
||||
@@ -23,8 +35,11 @@ export const ConversationList: FC<ConversationListProps> = ({
|
||||
isFetchingNextPage,
|
||||
onLoadMore,
|
||||
isRefreshing,
|
||||
onClearAll,
|
||||
isClearingAll,
|
||||
}) => {
|
||||
const loadMoreRef = useRef<HTMLDivElement>(null)
|
||||
const [showClearAllDialog, setShowClearAllDialog] = useState(false)
|
||||
|
||||
useEffect(() => {
|
||||
if (!hasNextPage || !onLoadMore) return
|
||||
@@ -56,65 +71,118 @@ export const ConversationList: FC<ConversationListProps> = ({
|
||||
groupedConversations.thisMonth.length > 0 ||
|
||||
groupedConversations.older.length > 0
|
||||
|
||||
return (
|
||||
<main className="mt-4 flex h-full flex-1 flex-col space-y-4 overflow-y-auto">
|
||||
<div className="w-full p-3">
|
||||
{isRefreshing && (
|
||||
<div className="flex items-center justify-center gap-2 pb-3 text-muted-foreground text-xs">
|
||||
<Loader2 className="h-3 w-3 animate-spin" />
|
||||
<span>Fetching latest conversations</span>
|
||||
</div>
|
||||
)}
|
||||
{!hasConversations ? (
|
||||
<div className="flex flex-col items-center justify-center py-12 text-center">
|
||||
<MessageSquare className="mb-3 h-10 w-10 text-muted-foreground/50" />
|
||||
<p className="text-muted-foreground text-sm">
|
||||
No conversations yet
|
||||
</p>
|
||||
<Link to="/" className="mt-2 text-primary text-sm hover:underline">
|
||||
Start a new chat
|
||||
</Link>
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<ConversationGroup
|
||||
label={TIME_GROUP_LABELS.today}
|
||||
conversations={groupedConversations.today}
|
||||
onDelete={onDelete}
|
||||
activeConversationId={activeConversationId}
|
||||
/>
|
||||
<ConversationGroup
|
||||
label={TIME_GROUP_LABELS.thisWeek}
|
||||
conversations={groupedConversations.thisWeek}
|
||||
onDelete={onDelete}
|
||||
activeConversationId={activeConversationId}
|
||||
/>
|
||||
<ConversationGroup
|
||||
label={TIME_GROUP_LABELS.thisMonth}
|
||||
conversations={groupedConversations.thisMonth}
|
||||
onDelete={onDelete}
|
||||
activeConversationId={activeConversationId}
|
||||
/>
|
||||
<ConversationGroup
|
||||
label={TIME_GROUP_LABELS.older}
|
||||
conversations={groupedConversations.older}
|
||||
onDelete={onDelete}
|
||||
activeConversationId={activeConversationId}
|
||||
/>
|
||||
const handleConfirmClearAll = () => {
|
||||
onClearAll?.()
|
||||
setShowClearAllDialog(false)
|
||||
}
|
||||
|
||||
{hasNextPage && (
|
||||
<div
|
||||
ref={loadMoreRef}
|
||||
className="flex items-center justify-center py-4"
|
||||
return (
|
||||
<>
|
||||
<main className="mt-4 flex h-full flex-1 flex-col space-y-4 overflow-y-auto">
|
||||
<div className="w-full p-3">
|
||||
<div className="mb-3 flex items-center justify-between gap-3 px-1">
|
||||
<h2 className="font-semibold text-sm">Chat history</h2>
|
||||
{onClearAll && hasConversations && (
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => setShowClearAllDialog(true)}
|
||||
disabled={isClearingAll}
|
||||
className="inline-flex h-8 shrink-0 items-center gap-1.5 rounded-md px-2.5 font-medium text-muted-foreground text-xs transition-colors hover:bg-destructive/10 hover:text-destructive disabled:pointer-events-none disabled:opacity-50"
|
||||
title="Clear sessions"
|
||||
>
|
||||
{isFetchingNextPage && (
|
||||
<Loader2 className="h-5 w-5 animate-spin text-muted-foreground" />
|
||||
{isClearingAll ? (
|
||||
<Loader2 className="h-3.5 w-3.5 animate-spin" />
|
||||
) : (
|
||||
<Trash2 className="h-3.5 w-3.5" />
|
||||
)}
|
||||
</div>
|
||||
Clear sessions
|
||||
</button>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</main>
|
||||
</div>
|
||||
|
||||
{isRefreshing && (
|
||||
<div className="flex items-center justify-center gap-2 pb-3 text-muted-foreground text-xs">
|
||||
<Loader2 className="h-3 w-3 animate-spin" />
|
||||
<span>Fetching latest conversations</span>
|
||||
</div>
|
||||
)}
|
||||
{!hasConversations ? (
|
||||
<div className="flex flex-col items-center justify-center py-12 text-center">
|
||||
<MessageSquare className="mb-3 h-10 w-10 text-muted-foreground/50" />
|
||||
<p className="text-muted-foreground text-sm">
|
||||
No conversations yet
|
||||
</p>
|
||||
<Link
|
||||
to="/"
|
||||
className="mt-2 text-primary text-sm hover:underline"
|
||||
>
|
||||
Start a new chat
|
||||
</Link>
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<ConversationGroup
|
||||
label={TIME_GROUP_LABELS.today}
|
||||
conversations={groupedConversations.today}
|
||||
onDelete={onDelete}
|
||||
activeConversationId={activeConversationId}
|
||||
/>
|
||||
<ConversationGroup
|
||||
label={TIME_GROUP_LABELS.thisWeek}
|
||||
conversations={groupedConversations.thisWeek}
|
||||
onDelete={onDelete}
|
||||
activeConversationId={activeConversationId}
|
||||
/>
|
||||
<ConversationGroup
|
||||
label={TIME_GROUP_LABELS.thisMonth}
|
||||
conversations={groupedConversations.thisMonth}
|
||||
onDelete={onDelete}
|
||||
activeConversationId={activeConversationId}
|
||||
/>
|
||||
<ConversationGroup
|
||||
label={TIME_GROUP_LABELS.older}
|
||||
conversations={groupedConversations.older}
|
||||
onDelete={onDelete}
|
||||
activeConversationId={activeConversationId}
|
||||
/>
|
||||
|
||||
{hasNextPage && (
|
||||
<div
|
||||
ref={loadMoreRef}
|
||||
className="flex items-center justify-center py-4"
|
||||
>
|
||||
{isFetchingNextPage && (
|
||||
<Loader2 className="h-5 w-5 animate-spin text-muted-foreground" />
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</main>
|
||||
|
||||
<AlertDialog
|
||||
open={showClearAllDialog}
|
||||
onOpenChange={setShowClearAllDialog}
|
||||
>
|
||||
<AlertDialogContent>
|
||||
<AlertDialogHeader>
|
||||
<AlertDialogTitle>Clear all sessions?</AlertDialogTitle>
|
||||
<AlertDialogDescription>
|
||||
This action permanently deletes every chat session in history.
|
||||
</AlertDialogDescription>
|
||||
</AlertDialogHeader>
|
||||
<AlertDialogFooter>
|
||||
<AlertDialogCancel>Cancel</AlertDialogCancel>
|
||||
<AlertDialogAction
|
||||
onClick={handleConfirmClearAll}
|
||||
className="bg-destructive text-destructive-foreground hover:bg-destructive/90"
|
||||
>
|
||||
Clear sessions
|
||||
</AlertDialogAction>
|
||||
</AlertDialogFooter>
|
||||
</AlertDialogContent>
|
||||
</AlertDialog>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import type { FC } from 'react'
|
||||
import { useMemo } from 'react'
|
||||
import { toast } from 'sonner'
|
||||
import { useConversations } from '@/lib/conversations/conversationStorage'
|
||||
import { useChatSessionContext } from '../../layout/ChatSessionContext'
|
||||
import { ConversationList } from '../components/ConversationList'
|
||||
@@ -7,9 +8,13 @@ import type { HistoryConversation } from '../components/types'
|
||||
import { extractLastUserMessage, groupConversations } from '../components/utils'
|
||||
|
||||
export const LocalChatHistory: FC = () => {
|
||||
const { conversations: localConversations, removeConversation } =
|
||||
useConversations()
|
||||
const { conversationId: activeConversationId } = useChatSessionContext()
|
||||
const {
|
||||
conversations: localConversations,
|
||||
removeConversation,
|
||||
clearConversations,
|
||||
} = useConversations()
|
||||
const { conversationId: activeConversationId, resetConversation } =
|
||||
useChatSessionContext()
|
||||
|
||||
const conversations = useMemo<HistoryConversation[]>(() => {
|
||||
return localConversations.map((conv) => ({
|
||||
@@ -24,11 +29,22 @@ export const LocalChatHistory: FC = () => {
|
||||
[conversations],
|
||||
)
|
||||
|
||||
const handleClearAll = async () => {
|
||||
try {
|
||||
await clearConversations()
|
||||
resetConversation()
|
||||
toast.success('Chat sessions cleared')
|
||||
} catch {
|
||||
toast.error('Failed to clear chat sessions')
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<ConversationList
|
||||
groupedConversations={groupedConversations}
|
||||
activeConversationId={activeConversationId}
|
||||
onDelete={removeConversation}
|
||||
onClearAll={handleClearAll}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -108,6 +108,7 @@ function formatAdapterName(adapter: HarnessAgentAdapter): string {
|
||||
if (adapter === 'claude') return 'Claude Code'
|
||||
if (adapter === 'codex') return 'Codex'
|
||||
if (adapter === 'openclaw') return 'OpenClaw'
|
||||
if (adapter === 'hermes') return 'Hermes'
|
||||
return adapter
|
||||
}
|
||||
|
||||
|
||||
@@ -48,6 +48,18 @@ import {
|
||||
normalizeToolApprovalConfig,
|
||||
toolApprovalConfigStorage,
|
||||
} from '@/lib/tool-approvals/storage'
|
||||
import {
|
||||
analyzeWorkflowUsage,
|
||||
detectWorkflowAdvisorCommand,
|
||||
formatWorkflowAnalysisResponse,
|
||||
formatWorkflowUsageClearedResponse,
|
||||
formatWorkflowUsageDataResponse,
|
||||
type WorkflowAdvisorCommand,
|
||||
} from '@/lib/workflow-usage/advisor'
|
||||
import {
|
||||
clearWorkflowUsageRecords,
|
||||
getWorkflowUsageRecords,
|
||||
} from '@/lib/workflow-usage/storage'
|
||||
import { selectedWorkspaceStorage } from '@/lib/workspace/workspace-storage'
|
||||
import type { ChatMode } from './chatTypes'
|
||||
import { GetConversationWithMessagesDocument } from './graphql/chatSessionDocument'
|
||||
@@ -133,6 +145,7 @@ export interface ChatSessionOptions {
|
||||
}
|
||||
|
||||
const NEWTAB_SYSTEM_PROMPT = `IMPORTANT: The user is chatting from the New Tab page. When performing browser actions, ALWAYS open content in a NEW TAB rather than navigating the current tab. The user's new tab page should remain accessible.`
|
||||
const WORKFLOW_ADVISOR_LOCAL_ONLY = 'workflow-advisor'
|
||||
|
||||
const getUserSystemPrompt = (
|
||||
origin: ChatOrigin | undefined,
|
||||
@@ -142,6 +155,25 @@ const getUserSystemPrompt = (
|
||||
? [personalization, NEWTAB_SYSTEM_PROMPT].filter(Boolean).join('\n\n')
|
||||
: personalization
|
||||
|
||||
const createTextMessage = (
|
||||
role: 'user' | 'assistant',
|
||||
text: string,
|
||||
options?: { localOnly?: boolean },
|
||||
): UIMessage =>
|
||||
({
|
||||
id: crypto.randomUUID(),
|
||||
role,
|
||||
parts: [{ type: 'text', text }],
|
||||
metadata: options?.localOnly
|
||||
? { browserosLocalOnly: WORKFLOW_ADVISOR_LOCAL_ONLY }
|
||||
: undefined,
|
||||
}) as UIMessage
|
||||
|
||||
const isWorkflowAdvisorLocalOnlyMessage = (message: UIMessage): boolean => {
|
||||
const metadata = (message as { metadata?: Record<string, unknown> }).metadata
|
||||
return metadata?.browserosLocalOnly === WORKFLOW_ADVISOR_LOCAL_ONLY
|
||||
}
|
||||
|
||||
const buildRequestBrowserContext = ({
|
||||
activeTab,
|
||||
action,
|
||||
@@ -376,7 +408,9 @@ export const useChatSession = (options?: ChatSessionOptions) => {
|
||||
Feature.PREVIOUS_CONVERSATION_ARRAY,
|
||||
)
|
||||
|
||||
const previousMessages = messagesRef.current
|
||||
const previousMessages = messagesRef.current.filter(
|
||||
(message) => !isWorkflowAdvisorLocalOnlyMessage(message),
|
||||
)
|
||||
const history =
|
||||
previousMessages.length > 0
|
||||
? formatConversationHistory(previousMessages)
|
||||
@@ -559,7 +593,9 @@ export const useChatSession = (options?: ChatSessionOptions) => {
|
||||
})
|
||||
}
|
||||
|
||||
const messagesToSave = messages.filter((m) => m.parts?.length > 0)
|
||||
const messagesToSave = messages.filter(
|
||||
(m) => m.parts?.length > 0 && !isWorkflowAdvisorLocalOnlyMessage(m),
|
||||
)
|
||||
if (messagesToSave.length === 0) return
|
||||
|
||||
if (isLoggedIn) {
|
||||
@@ -645,6 +681,54 @@ export const useChatSession = (options?: ChatSessionOptions) => {
|
||||
action?: ChatAction
|
||||
} | null>(null)
|
||||
|
||||
const appendLocalWorkflowAdvisorExchange = (
|
||||
userText: string,
|
||||
responseText: string,
|
||||
) => {
|
||||
const nextMessages = [
|
||||
...messagesRef.current,
|
||||
createTextMessage('user', userText, { localOnly: true }),
|
||||
createTextMessage('assistant', responseText, { localOnly: true }),
|
||||
]
|
||||
messagesRef.current = nextMessages
|
||||
setMessages(nextMessages)
|
||||
}
|
||||
|
||||
const handleWorkflowAdvisorCommand = async (
|
||||
text: string,
|
||||
command: WorkflowAdvisorCommand,
|
||||
) => {
|
||||
try {
|
||||
if (command === 'clear') {
|
||||
await clearWorkflowUsageRecords()
|
||||
appendLocalWorkflowAdvisorExchange(
|
||||
text,
|
||||
formatWorkflowUsageClearedResponse(),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
const records = await getWorkflowUsageRecords()
|
||||
const response =
|
||||
command === 'view'
|
||||
? formatWorkflowUsageDataResponse(records)
|
||||
: formatWorkflowAnalysisResponse(analyzeWorkflowUsage(records))
|
||||
|
||||
appendLocalWorkflowAdvisorExchange(text, response)
|
||||
} catch (error) {
|
||||
sentry.captureException(error, {
|
||||
extra: {
|
||||
message: 'Failed to run local workflow advisor command',
|
||||
command,
|
||||
},
|
||||
})
|
||||
appendLocalWorkflowAdvisorExchange(
|
||||
text,
|
||||
"I couldn't read the local workflow usage patterns. Nothing was sent to a model or external service.",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const dispatchMessage = useCallback(
|
||||
(text: string) => {
|
||||
startExecutionTask({
|
||||
@@ -678,6 +762,12 @@ export const useChatSession = (options?: ChatSessionOptions) => {
|
||||
}, [dispatchMessage, isIntegrationsSynced])
|
||||
|
||||
const sendMessage = (params: { text: string; action?: ChatAction }) => {
|
||||
const workflowAdvisorCommand = detectWorkflowAdvisorCommand(params.text)
|
||||
if (workflowAdvisorCommand) {
|
||||
void handleWorkflowAdvisorCommand(params.text, workflowAdvisorCommand)
|
||||
return
|
||||
}
|
||||
|
||||
const target = selectedChatTargetRef.current
|
||||
const llmTargetProvider = toLlmProviderConfig(target)
|
||||
const agentTarget = target?.kind === 'acp' ? target : undefined
|
||||
|
||||
@@ -10,6 +10,10 @@ import type {
|
||||
ExecutionTaskStatus,
|
||||
} from '@/lib/execution-history/types'
|
||||
import { sentry } from '@/lib/sentry/sentry'
|
||||
import {
|
||||
createWorkflowUsageRecordFromExecutionTask,
|
||||
recordWorkflowUsage,
|
||||
} from '@/lib/workflow-usage/storage'
|
||||
|
||||
interface StartExecutionTaskInput {
|
||||
conversationId: string
|
||||
@@ -145,6 +149,17 @@ export function useExecutionHistoryTracker() {
|
||||
}
|
||||
|
||||
persistTask(nextTask)
|
||||
void recordWorkflowUsage(
|
||||
createWorkflowUsageRecordFromExecutionTask(nextTask),
|
||||
).catch((error) => {
|
||||
sentry.captureException(error, {
|
||||
extra: {
|
||||
message: 'Failed to persist workflow usage pattern',
|
||||
conversationId: nextTask.conversationId,
|
||||
taskId: nextTask.id,
|
||||
},
|
||||
})
|
||||
})
|
||||
activeTaskRef.current = null
|
||||
},
|
||||
[persistTask],
|
||||
|
||||
@@ -42,11 +42,34 @@ export interface UserAttachmentPreview {
|
||||
dataUrl?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Files attributed to this turn by the harness's per-turn workspace
|
||||
* diff. Populated either via the live `produced_files` SSE event or
|
||||
* (on resume) the `useAgentTurnFiles` fallback. Mirrors the wire
|
||||
* shape from `agent-harness-types.HarnessProducedFile` minus the
|
||||
* stream-only fields the inline card doesn't need.
|
||||
*/
|
||||
export interface ConversationTurnFile {
|
||||
id: string
|
||||
path: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
export interface AgentConversationTurn {
|
||||
id: string
|
||||
/**
|
||||
* Server-issued turn id, set as soon as the response headers arrive
|
||||
* (`X-Turn-Id`) for fresh sends, or from the active-turn payload on
|
||||
* resume. Required for the historic-files fallback fetch; absent on
|
||||
* the brief optimistic window before the first header.
|
||||
*/
|
||||
turnId?: string | null
|
||||
userText: string
|
||||
userAttachments?: UserAttachmentPreview[]
|
||||
parts: AssistantPart[]
|
||||
/** Files produced during this turn (openclaw only in v1). */
|
||||
producedFiles?: ConversationTurnFile[]
|
||||
done: boolean
|
||||
timestamp: number
|
||||
}
|
||||
|
||||
@@ -0,0 +1,126 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Pure helpers used by the artifact card and the Outputs rail.
|
||||
* Display formatting only — no React, no fetch, no DOM. Anything
|
||||
* stateful belongs in `./useAgentOutputs` or `./useFilePreview`.
|
||||
*/
|
||||
|
||||
import { buildAgentApiUrl } from '@/entrypoints/app/agents/agent-api-url'
|
||||
|
||||
/**
|
||||
* Coarse classification of a file's intended preview / icon path.
|
||||
* Mirrors the server-side `FilePreviewKind` minus `missing` — the
|
||||
* client only ever computes a kind for a row it already has.
|
||||
*/
|
||||
export type FileKind = 'text' | 'image' | 'pdf' | 'binary'
|
||||
|
||||
const TEXT_EXTENSIONS = new Set([
|
||||
'txt',
|
||||
'md',
|
||||
'markdown',
|
||||
'json',
|
||||
'jsonl',
|
||||
'csv',
|
||||
'tsv',
|
||||
'xml',
|
||||
'yaml',
|
||||
'yml',
|
||||
'toml',
|
||||
'ini',
|
||||
'log',
|
||||
'html',
|
||||
'htm',
|
||||
'css',
|
||||
'js',
|
||||
'mjs',
|
||||
'cjs',
|
||||
'ts',
|
||||
'tsx',
|
||||
'jsx',
|
||||
'py',
|
||||
'rb',
|
||||
'go',
|
||||
'rs',
|
||||
'java',
|
||||
'kt',
|
||||
'swift',
|
||||
'c',
|
||||
'h',
|
||||
'cpp',
|
||||
'hpp',
|
||||
'sh',
|
||||
'zsh',
|
||||
'bash',
|
||||
'sql',
|
||||
'svg',
|
||||
])
|
||||
|
||||
const IMAGE_EXTENSIONS = new Set([
|
||||
'png',
|
||||
'jpg',
|
||||
'jpeg',
|
||||
'gif',
|
||||
'webp',
|
||||
'bmp',
|
||||
'ico',
|
||||
'heic',
|
||||
'heif',
|
||||
])
|
||||
|
||||
/** Best-effort kind based on extension only. Server's preview API
|
||||
* is the source of truth for actual rendering — this is just for
|
||||
* picking an icon / sort hint without a network round-trip. */
|
||||
export function inferFileKind(path: string): FileKind {
|
||||
const ext = extensionOf(path).toLowerCase()
|
||||
if (ext === 'pdf') return 'pdf'
|
||||
if (IMAGE_EXTENSIONS.has(ext)) return 'image'
|
||||
if (TEXT_EXTENSIONS.has(ext)) return 'text'
|
||||
return 'binary'
|
||||
}
|
||||
|
||||
/** Plain extension without the leading dot. Empty string when none. */
|
||||
export function extensionOf(path: string): string {
|
||||
const dot = path.lastIndexOf('.')
|
||||
if (dot === -1) return ''
|
||||
const slash = path.lastIndexOf('/')
|
||||
if (dot < slash) return ''
|
||||
return path.slice(dot + 1)
|
||||
}
|
||||
|
||||
/** File name (final path segment), no directory prefix. */
|
||||
export function basenameOf(path: string): string {
|
||||
const slash = path.lastIndexOf('/')
|
||||
return slash === -1 ? path : path.slice(slash + 1)
|
||||
}
|
||||
|
||||
const SIZE_UNITS = ['B', 'KB', 'MB', 'GB', 'TB'] as const
|
||||
|
||||
/** "2.4 MB" / "340 KB" / "78 B" — for the artifact card's right-side
|
||||
* metadata. Not localised; the rail uses one space + the unit. */
|
||||
export function formatFileSize(bytes: number): string {
|
||||
if (!Number.isFinite(bytes) || bytes < 0) return '—'
|
||||
if (bytes < 1024) return `${bytes} ${SIZE_UNITS[0]}`
|
||||
let value = bytes
|
||||
let unit = 0
|
||||
while (value >= 1024 && unit < SIZE_UNITS.length - 1) {
|
||||
value /= 1024
|
||||
unit += 1
|
||||
}
|
||||
// 1-digit precision below 10, integer above — feels less noisy.
|
||||
const formatted = value < 10 ? value.toFixed(1) : Math.round(value).toString()
|
||||
return `${formatted} ${SIZE_UNITS[unit]}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the per-file download URL using the same agent-api root the
|
||||
* rest of the harness hits. Returned URL is already absolute.
|
||||
*/
|
||||
export function buildFileDownloadUrl(baseUrl: string, fileId: string): string {
|
||||
return buildAgentApiUrl(
|
||||
baseUrl,
|
||||
`/files/${encodeURIComponent(fileId)}/download`,
|
||||
)
|
||||
}
|
||||
32
packages/browseros-agent/apps/agent/lib/agent-files/index.ts
Normal file
32
packages/browseros-agent/apps/agent/lib/agent-files/index.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
export {
|
||||
basenameOf,
|
||||
buildFileDownloadUrl,
|
||||
extensionOf,
|
||||
type FileKind,
|
||||
formatFileSize,
|
||||
inferFileKind,
|
||||
} from './file-helpers'
|
||||
export type {
|
||||
BinaryFilePreview,
|
||||
FilePreview,
|
||||
FilePreviewKind,
|
||||
ImageFilePreview,
|
||||
MissingFilePreview,
|
||||
PdfFilePreview,
|
||||
ProducedFile,
|
||||
ProducedFilesRailGroup,
|
||||
TextFilePreview,
|
||||
} from './types'
|
||||
export {
|
||||
useAgentOutputs,
|
||||
useAgentTurnFiles,
|
||||
useInvalidateAgentOutputs,
|
||||
useRefreshAgentOutputs,
|
||||
} from './useAgentOutputs'
|
||||
export { useFilePreview } from './useFilePreview'
|
||||
75
packages/browseros-agent/apps/agent/lib/agent-files/types.ts
Normal file
75
packages/browseros-agent/apps/agent/lib/agent-files/types.ts
Normal file
@@ -0,0 +1,75 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Wire types shared by the inline artifact card and the per-agent
|
||||
* Outputs rail. These mirror `ProducedFileEntry` /
|
||||
* `ProducedFilesRailGroup` on the server and the `FilePreview`
|
||||
* discriminated union from `apps/server/src/api/services/openclaw/file-preview.ts`.
|
||||
*
|
||||
* The schema mirror is deliberate (vs sharing a workspace package)
|
||||
* because the server keeps the on-disk row shape — `agentDefinitionId`,
|
||||
* `sessionKey` — out of the wire payload. Dropping those columns at the
|
||||
* type boundary keeps the client honest about what it can refer to.
|
||||
*/
|
||||
|
||||
export interface ProducedFile {
|
||||
id: string
|
||||
/** Workspace-relative POSIX path. */
|
||||
path: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
/** Server clock when the file was first attributed to its turn. */
|
||||
createdAt: number
|
||||
detectedBy: 'diff' | 'tool'
|
||||
}
|
||||
|
||||
export interface ProducedFilesRailGroup {
|
||||
turnId: string
|
||||
/** First non-blank line of the user prompt that initiated this turn. */
|
||||
turnPrompt: string
|
||||
createdAt: number
|
||||
files: ProducedFile[]
|
||||
}
|
||||
|
||||
export type FilePreviewKind = 'text' | 'image' | 'pdf' | 'binary' | 'missing'
|
||||
|
||||
interface BasePreview {
|
||||
kind: FilePreviewKind
|
||||
mimeType: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
export interface TextFilePreview extends BasePreview {
|
||||
kind: 'text'
|
||||
snippet: string
|
||||
/** True when the on-disk file is larger than the server's snippet cap. */
|
||||
truncated: boolean
|
||||
}
|
||||
|
||||
export interface ImageFilePreview extends BasePreview {
|
||||
kind: 'image'
|
||||
/** Base64 data URL (incl. `data:` prefix). Suitable for `<img src>`. */
|
||||
dataUrl: string
|
||||
}
|
||||
|
||||
export interface PdfFilePreview extends BasePreview {
|
||||
kind: 'pdf'
|
||||
}
|
||||
|
||||
export interface BinaryFilePreview extends BasePreview {
|
||||
kind: 'binary'
|
||||
}
|
||||
|
||||
export interface MissingFilePreview {
|
||||
kind: 'missing'
|
||||
}
|
||||
|
||||
export type FilePreview =
|
||||
| TextFilePreview
|
||||
| ImageFilePreview
|
||||
| PdfFilePreview
|
||||
| BinaryFilePreview
|
||||
| MissingFilePreview
|
||||
@@ -0,0 +1,166 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* React Query hooks backing the per-agent Outputs rail and the
|
||||
* inline artifact card.
|
||||
*
|
||||
* Live updates: the consumer of `useAgentConversation` (see Phase 5)
|
||||
* is expected to call `useInvalidateAgentOutputs(agentId)` whenever
|
||||
* an assistant turn completes, so the rail picks up the new
|
||||
* `produced_files` rows the server attributed during that turn.
|
||||
* No SSE channel here — invalidation off the existing chat-stream
|
||||
* completion is enough for v1.
|
||||
*/
|
||||
|
||||
import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
|
||||
import {
|
||||
AGENT_QUERY_KEYS,
|
||||
agentsFetch,
|
||||
} from '@/entrypoints/app/agents/useAgents'
|
||||
import { useAgentServerUrl } from '@/lib/browseros/useBrowserOSProviders'
|
||||
import type { ProducedFile, ProducedFilesRailGroup } from './types'
|
||||
|
||||
interface OutputsResponse {
|
||||
groups: ProducedFilesRailGroup[]
|
||||
}
|
||||
|
||||
interface TurnFilesResponse {
|
||||
files: ProducedFile[]
|
||||
}
|
||||
|
||||
export function useAgentOutputs(agentId: string, enabled = true) {
|
||||
const {
|
||||
baseUrl,
|
||||
isLoading: urlLoading,
|
||||
error: urlError,
|
||||
} = useAgentServerUrl()
|
||||
|
||||
const query = useQuery<ProducedFilesRailGroup[], Error>({
|
||||
queryKey: [AGENT_QUERY_KEYS.agentOutputs, baseUrl, agentId],
|
||||
queryFn: async () => {
|
||||
const data = await agentsFetch<OutputsResponse>(
|
||||
baseUrl as string,
|
||||
`/${encodeURIComponent(agentId)}/files`,
|
||||
)
|
||||
return data.groups ?? []
|
||||
},
|
||||
enabled: Boolean(baseUrl) && !urlLoading && enabled && Boolean(agentId),
|
||||
})
|
||||
|
||||
return {
|
||||
groups: query.data ?? [],
|
||||
loading: query.isLoading || urlLoading,
|
||||
error: query.error ?? urlError,
|
||||
refetch: query.refetch,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Per-turn fetch for the inline artifact card. Used both as the
|
||||
* fallback when an SSE `produced_files` event was missed, and to
|
||||
* rehydrate a turn the user scrolled back to.
|
||||
*/
|
||||
export function useAgentTurnFiles(
|
||||
agentId: string,
|
||||
turnId: string | null,
|
||||
enabled = true,
|
||||
) {
|
||||
const {
|
||||
baseUrl,
|
||||
isLoading: urlLoading,
|
||||
error: urlError,
|
||||
} = useAgentServerUrl()
|
||||
|
||||
const query = useQuery<ProducedFile[], Error>({
|
||||
queryKey: [AGENT_QUERY_KEYS.agentTurnFiles, baseUrl, agentId, turnId],
|
||||
queryFn: async () => {
|
||||
const data = await agentsFetch<TurnFilesResponse>(
|
||||
baseUrl as string,
|
||||
`/${encodeURIComponent(agentId)}/files/turn/${encodeURIComponent(
|
||||
turnId as string,
|
||||
)}`,
|
||||
)
|
||||
return data.files ?? []
|
||||
},
|
||||
enabled:
|
||||
Boolean(baseUrl) &&
|
||||
!urlLoading &&
|
||||
enabled &&
|
||||
Boolean(agentId) &&
|
||||
Boolean(turnId),
|
||||
})
|
||||
|
||||
return {
|
||||
files: query.data ?? [],
|
||||
loading: query.isLoading || urlLoading,
|
||||
error: query.error ?? urlError,
|
||||
refetch: query.refetch,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a callable that invalidates outputs / turn-files queries
|
||||
* for one agent across any baseUrl. Call after an assistant turn
|
||||
* completes so the rail (and the inline file-card strip) pick up
|
||||
* the new attributed rows. Cheap when the queries aren't mounted
|
||||
* — react-query just marks the cached value stale.
|
||||
*
|
||||
* Implementation note: react-query's `invalidateQueries({ queryKey })`
|
||||
* does positional partial-match, so passing `undefined` as the
|
||||
* baseUrl placeholder does NOT match a cached `[…, baseUrl, …]`
|
||||
* key — the cache stayed stale. Use a predicate so we ignore the
|
||||
* baseUrl position entirely.
|
||||
*/
|
||||
export function useInvalidateAgentOutputs() {
|
||||
const queryClient = useQueryClient()
|
||||
return async (agentId: string, turnId?: string) => {
|
||||
await Promise.all([
|
||||
queryClient.invalidateQueries({
|
||||
predicate: (query) => {
|
||||
const key = query.queryKey
|
||||
return (
|
||||
Array.isArray(key) &&
|
||||
key[0] === AGENT_QUERY_KEYS.agentOutputs &&
|
||||
key[2] === agentId
|
||||
)
|
||||
},
|
||||
}),
|
||||
queryClient.invalidateQueries({
|
||||
predicate: (query) => {
|
||||
const key = query.queryKey
|
||||
if (
|
||||
!Array.isArray(key) ||
|
||||
key[0] !== AGENT_QUERY_KEYS.agentTurnFiles ||
|
||||
key[2] !== agentId
|
||||
) {
|
||||
return false
|
||||
}
|
||||
// When a turnId was supplied, scope to just that turn's
|
||||
// entry. Otherwise flush every cached turn for this agent.
|
||||
return turnId ? key[3] === turnId : true
|
||||
},
|
||||
}),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tiny mutation wrapper so the Outputs rail's "Refresh" button can
|
||||
* surface an `isPending` indicator while the new query is in flight.
|
||||
* No body — just triggers `refetch` on the rail's query for this
|
||||
* agent and resolves when it settles.
|
||||
*/
|
||||
export function useRefreshAgentOutputs(agentId: string) {
|
||||
const queryClient = useQueryClient()
|
||||
const { baseUrl } = useAgentServerUrl()
|
||||
return useMutation({
|
||||
mutationFn: async () => {
|
||||
await queryClient.refetchQueries({
|
||||
queryKey: [AGENT_QUERY_KEYS.agentOutputs, baseUrl, agentId],
|
||||
exact: true,
|
||||
})
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Single-file preview hook used by the inline artifact card and the
|
||||
* Outputs rail's preview Sheet. Always opt-in (`enabled`) — the
|
||||
* preview is fetched only when the user clicks a row, never
|
||||
* eagerly.
|
||||
*/
|
||||
|
||||
import { useQuery } from '@tanstack/react-query'
|
||||
import {
|
||||
AGENT_QUERY_KEYS,
|
||||
agentsFetch,
|
||||
} from '@/entrypoints/app/agents/useAgents'
|
||||
import { useAgentServerUrl } from '@/lib/browseros/useBrowserOSProviders'
|
||||
import type { FilePreview } from './types'
|
||||
|
||||
export function useFilePreview(fileId: string | null, enabled = true) {
|
||||
const {
|
||||
baseUrl,
|
||||
isLoading: urlLoading,
|
||||
error: urlError,
|
||||
} = useAgentServerUrl()
|
||||
|
||||
const query = useQuery<FilePreview, Error>({
|
||||
queryKey: [AGENT_QUERY_KEYS.filePreview, baseUrl, fileId],
|
||||
queryFn: async () => {
|
||||
return agentsFetch<FilePreview>(
|
||||
baseUrl as string,
|
||||
`/files/${encodeURIComponent(fileId as string)}/preview`,
|
||||
)
|
||||
},
|
||||
enabled: Boolean(baseUrl) && !urlLoading && enabled && Boolean(fileId),
|
||||
// Previews are immutable for a given fileId — once loaded, never
|
||||
// refetch on focus / reconnect. They go stale only when the
|
||||
// underlying file is removed (rare in v1; no rename / delete).
|
||||
staleTime: Infinity,
|
||||
gcTime: 5 * 60 * 1000,
|
||||
})
|
||||
|
||||
return {
|
||||
preview: query.data ?? null,
|
||||
loading: query.isLoading || urlLoading,
|
||||
error: query.error ?? urlError,
|
||||
refetch: query.refetch,
|
||||
}
|
||||
}
|
||||
96
packages/browseros-agent/apps/agent/lib/attachments.test.ts
Normal file
96
packages/browseros-agent/apps/agent/lib/attachments.test.ts
Normal file
@@ -0,0 +1,96 @@
|
||||
import { describe, expect, it } from 'bun:test'
|
||||
import { stageAttachment } from './attachments'
|
||||
|
||||
function restoreGlobal(name: string, value: unknown) {
|
||||
if (value === undefined) {
|
||||
Reflect.deleteProperty(globalThis, name)
|
||||
return
|
||||
}
|
||||
Reflect.set(globalThis, name, value)
|
||||
}
|
||||
|
||||
describe('stageAttachment', () => {
|
||||
it('stages pasted clipboard images that do not have a filename', async () => {
|
||||
const file = new File([new Uint8Array([1, 2, 3])], '', {
|
||||
type: 'image/png',
|
||||
})
|
||||
|
||||
const result = await stageAttachment(file)
|
||||
|
||||
expect(result.ok).toBe(true)
|
||||
if (!result.ok) throw new Error(result.error.message)
|
||||
expect(result.attachment.kind).toBe('image')
|
||||
expect(result.attachment.name).toBe('image')
|
||||
expect(result.attachment.mediaType).toBe('image/png')
|
||||
expect(result.attachment.dataUrl).toStartWith('data:image/png;base64,')
|
||||
expect(result.attachment.payload).toMatchObject({
|
||||
kind: 'image',
|
||||
mediaType: 'image/png',
|
||||
dataUrl: result.attachment.dataUrl,
|
||||
})
|
||||
})
|
||||
|
||||
it('uses the recompressed blob media type for large pasted images', async () => {
|
||||
const originalCreateImageBitmap = Reflect.get(
|
||||
globalThis,
|
||||
'createImageBitmap',
|
||||
)
|
||||
const originalOffscreenCanvas = Reflect.get(globalThis, 'OffscreenCanvas')
|
||||
const originalHTMLCanvasElement = Reflect.get(
|
||||
globalThis,
|
||||
'HTMLCanvasElement',
|
||||
)
|
||||
|
||||
class FakeOffscreenCanvas {
|
||||
width: number
|
||||
height: number
|
||||
|
||||
constructor(width: number, height: number) {
|
||||
this.width = width
|
||||
this.height = height
|
||||
}
|
||||
|
||||
getContext() {
|
||||
return {
|
||||
drawImage() {},
|
||||
}
|
||||
}
|
||||
|
||||
async convertToBlob(options: { type?: string }) {
|
||||
return new Blob([new Uint8Array([9, 8, 7])], {
|
||||
type: options.type ?? 'image/jpeg',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
Reflect.set(globalThis, 'createImageBitmap', async () => ({
|
||||
width: 4096,
|
||||
height: 2048,
|
||||
close() {},
|
||||
}))
|
||||
Reflect.set(globalThis, 'OffscreenCanvas', FakeOffscreenCanvas)
|
||||
Reflect.set(globalThis, 'HTMLCanvasElement', class HTMLCanvasElement {})
|
||||
|
||||
const file = new File([new Uint8Array(2 * 1024 * 1024)], 'shot.png', {
|
||||
type: 'image/png',
|
||||
})
|
||||
|
||||
const result = await stageAttachment(file)
|
||||
|
||||
expect(result.ok).toBe(true)
|
||||
if (!result.ok) throw new Error(result.error.message)
|
||||
expect(result.attachment.mediaType).toBe('image/jpeg')
|
||||
expect(result.attachment.dataUrl).toStartWith('data:image/jpeg;base64,')
|
||||
expect(result.attachment.payload).toMatchObject({
|
||||
kind: 'image',
|
||||
mediaType: 'image/jpeg',
|
||||
dataUrl: result.attachment.dataUrl,
|
||||
})
|
||||
} finally {
|
||||
restoreGlobal('createImageBitmap', originalCreateImageBitmap)
|
||||
restoreGlobal('OffscreenCanvas', originalOffscreenCanvas)
|
||||
restoreGlobal('HTMLCanvasElement', originalHTMLCanvasElement)
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -100,6 +100,7 @@ export async function stageAttachment(
|
||||
try {
|
||||
const compressed = await compressImageIfNeeded(file)
|
||||
const dataUrl = await readAsDataUrl(compressed)
|
||||
const encodedMediaType = compressed.type || mediaType
|
||||
// Rough byte ceiling — `data:image/png;base64,...` doubles size with
|
||||
// base64. Reject early so we never POST something the route will 400.
|
||||
if (dataUrl.length > MAX_IMAGE_BYTES * 2) {
|
||||
@@ -118,12 +119,12 @@ export async function stageAttachment(
|
||||
attachment: {
|
||||
id: makeId(),
|
||||
kind: 'image',
|
||||
mediaType,
|
||||
mediaType: encodedMediaType,
|
||||
name: file.name || 'image',
|
||||
dataUrl,
|
||||
payload: {
|
||||
kind: 'image',
|
||||
mediaType,
|
||||
mediaType: encodedMediaType,
|
||||
dataUrl,
|
||||
name: file.name || undefined,
|
||||
},
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
import { describe, expect, it } from 'bun:test'
|
||||
import {
|
||||
cloudSyncHelpUrl,
|
||||
cloudSyncSignInLinks,
|
||||
privacyPolicyUrl,
|
||||
termsOfServiceUrl,
|
||||
} from './productUrls'
|
||||
|
||||
describe('cloud sync sign-in links', () => {
|
||||
it('points to the public legal and cloud sync documentation URLs', () => {
|
||||
expect(termsOfServiceUrl).toBe('https://browseros.com/terms')
|
||||
expect(privacyPolicyUrl).toBe('https://browseros.com/privacy')
|
||||
expect(cloudSyncHelpUrl).toBe(
|
||||
'https://docs.browseros.com/features/sync-to-cloud',
|
||||
)
|
||||
})
|
||||
|
||||
it('includes legal and cloud sync documentation links in display order', () => {
|
||||
expect(cloudSyncSignInLinks).toEqual([
|
||||
{ label: 'Terms of Service', url: termsOfServiceUrl },
|
||||
{ label: 'Privacy Policy', url: privacyPolicyUrl },
|
||||
{ label: 'Learn more about cloud sync', url: cloudSyncHelpUrl },
|
||||
])
|
||||
})
|
||||
})
|
||||
@@ -23,6 +23,26 @@ export const githubOrgUrl = 'https://github.com/browseros-ai'
|
||||
*/
|
||||
export const privacyPolicyUrl = 'https://browseros.com/privacy'
|
||||
|
||||
/**
|
||||
* @public
|
||||
*/
|
||||
export const termsOfServiceUrl = 'https://browseros.com/terms'
|
||||
|
||||
/**
|
||||
* @public
|
||||
*/
|
||||
export const cloudSyncHelpUrl =
|
||||
'https://docs.browseros.com/features/sync-to-cloud'
|
||||
|
||||
/**
|
||||
* @public
|
||||
*/
|
||||
export const cloudSyncSignInLinks = [
|
||||
{ label: 'Terms of Service', url: termsOfServiceUrl },
|
||||
{ label: 'Privacy Policy', url: privacyPolicyUrl },
|
||||
{ label: 'Learn more about cloud sync', url: cloudSyncHelpUrl },
|
||||
] as const
|
||||
|
||||
/**
|
||||
* @public
|
||||
*/
|
||||
|
||||
@@ -2,7 +2,10 @@ import { storage } from '@wxt-dev/storage'
|
||||
import type { UIMessage } from 'ai'
|
||||
import { useEffect, useState } from 'react'
|
||||
import { useSessionInfo } from '../auth/sessionStorage'
|
||||
import { removeConversationExecutionHistory } from '../execution-history/storage'
|
||||
import {
|
||||
clearConversationExecutionHistory,
|
||||
removeConversationExecutionHistory,
|
||||
} from '../execution-history/storage'
|
||||
import { uploadConversationsToGraphql } from './uploadConversationsToGraphql'
|
||||
|
||||
const MAX_CONVERSATIONS = 50
|
||||
@@ -46,6 +49,11 @@ export function useConversations() {
|
||||
await removeConversationExecutionHistory(id)
|
||||
}
|
||||
|
||||
const clearConversations = async () => {
|
||||
await conversationStorage.setValue([])
|
||||
await clearConversationExecutionHistory()
|
||||
}
|
||||
|
||||
const saveConversation = async (id: string, messages: UIMessage[]) => {
|
||||
const current = (await conversationStorage.getValue()) ?? []
|
||||
const existingIndex = current.findIndex((c) => c.id === id)
|
||||
@@ -90,6 +98,7 @@ export function useConversations() {
|
||||
return {
|
||||
conversations,
|
||||
removeConversation,
|
||||
clearConversations,
|
||||
saveConversation,
|
||||
getConversation,
|
||||
}
|
||||
|
||||
@@ -82,6 +82,10 @@ export async function removeConversationExecutionHistory(
|
||||
await executionHistoryStorage.setValue(rest)
|
||||
}
|
||||
|
||||
export async function clearConversationExecutionHistory(): Promise<void> {
|
||||
await executionHistoryStorage.setValue({})
|
||||
}
|
||||
|
||||
export async function removeConversationExecutionTask(args: {
|
||||
conversationId: string
|
||||
taskId: string
|
||||
|
||||
@@ -0,0 +1,125 @@
|
||||
import { describe, expect, it } from 'bun:test'
|
||||
import {
|
||||
analyzeWorkflowUsage,
|
||||
detectWorkflowAdvisorCommand,
|
||||
formatWorkflowAnalysisResponse,
|
||||
normalizeToolSequence,
|
||||
} from './advisor'
|
||||
import type { WorkflowUsageRecord } from './types'
|
||||
|
||||
describe('workflow usage advisor', () => {
|
||||
it('detects explicit workflow advisor commands only', () => {
|
||||
expect(detectWorkflowAdvisorCommand('analyze my workflow')).toBe('analyze')
|
||||
expect(
|
||||
detectWorkflowAdvisorCommand('what patterns do you see in my workflow?'),
|
||||
).toBe('analyze')
|
||||
expect(
|
||||
detectWorkflowAdvisorCommand('what patterns do you see in this log?'),
|
||||
).toBeNull()
|
||||
expect(detectWorkflowAdvisorCommand('what patterns do you see?')).toBeNull()
|
||||
expect(
|
||||
detectWorkflowAdvisorCommand('what can be automated from my tool usage?'),
|
||||
).toBe('analyze')
|
||||
expect(
|
||||
detectWorkflowAdvisorCommand('what can be automated in this code?'),
|
||||
).toBeNull()
|
||||
expect(detectWorkflowAdvisorCommand('suggest skills')).toBeNull()
|
||||
expect(
|
||||
detectWorkflowAdvisorCommand('suggest skills from my workflow usage'),
|
||||
).toBe('analyze')
|
||||
expect(
|
||||
detectWorkflowAdvisorCommand('what workflow usage data is stored?'),
|
||||
).toBe('view')
|
||||
expect(detectWorkflowAdvisorCommand('show workflow usage data')).toBe(
|
||||
'view',
|
||||
)
|
||||
expect(detectWorkflowAdvisorCommand('clear skill suggestion data')).toBe(
|
||||
'clear',
|
||||
)
|
||||
expect(detectWorkflowAdvisorCommand('summarize this page')).toBeNull()
|
||||
})
|
||||
|
||||
it('assigns suggestion ids after ranking', () => {
|
||||
const analysis = analyzeWorkflowUsage([
|
||||
record('1', ['search', 'open'], 100),
|
||||
record('2', ['search', 'open'], 200),
|
||||
record('3', ['new_page', 'navigate', 'get_page_content'], 300),
|
||||
record('4', ['new_page', 'navigate', 'get_page_content'], 400),
|
||||
record('5', ['new_page', 'navigate', 'get_page_content'], 500),
|
||||
])
|
||||
|
||||
expect(analysis.suggestions.map((suggestion) => suggestion.id)).toEqual([
|
||||
'workflow-1',
|
||||
'workflow-2',
|
||||
])
|
||||
expect(analysis.suggestions[0]).toMatchObject({
|
||||
id: 'workflow-1',
|
||||
runCount: 3,
|
||||
pattern: ['new_page', 'navigate', 'get_page_content'],
|
||||
})
|
||||
expect(analysis.suggestions[1]).toMatchObject({
|
||||
id: 'workflow-2',
|
||||
runCount: 2,
|
||||
pattern: ['search', 'open'],
|
||||
})
|
||||
})
|
||||
|
||||
it('keeps workflow usage view commands separate from analysis', () => {
|
||||
expect(detectWorkflowAdvisorCommand('show workflow usage data')).toBe(
|
||||
'view',
|
||||
)
|
||||
expect(detectWorkflowAdvisorCommand('list workflow patterns')).toBe('view')
|
||||
expect(detectWorkflowAdvisorCommand('analyze workflow patterns')).toBe(
|
||||
'analyze',
|
||||
)
|
||||
})
|
||||
|
||||
it('normalizes command sequences without retaining repeated adjacent tools', () => {
|
||||
expect(normalizeToolSequence([' new_page ', 'new_page', 'open'])).toEqual([
|
||||
'new_page',
|
||||
'open',
|
||||
])
|
||||
})
|
||||
|
||||
it('suggests repeated local tool-name patterns', () => {
|
||||
const analysis = analyzeWorkflowUsage([
|
||||
record('1', ['new_page', 'navigate', 'get_page_content'], 100),
|
||||
record('2', ['new_page', 'navigate', 'get_page_content'], 200),
|
||||
record('3', ['search', 'open'], 300),
|
||||
])
|
||||
|
||||
expect(analysis.totalRuns).toBe(3)
|
||||
expect(analysis.suggestions).toHaveLength(1)
|
||||
expect(analysis.suggestions[0]).toMatchObject({
|
||||
runCount: 2,
|
||||
pattern: ['new_page', 'navigate', 'get_page_content'],
|
||||
})
|
||||
})
|
||||
|
||||
it('formats concrete suggestions with a privacy note', () => {
|
||||
const response = formatWorkflowAnalysisResponse(
|
||||
analyzeWorkflowUsage([
|
||||
record('1', ['new_page', 'navigate', 'get_page_content'], 100),
|
||||
record('2', ['new_page', 'navigate', 'get_page_content'], 200),
|
||||
]),
|
||||
)
|
||||
|
||||
expect(response).toContain('Pattern: Open page -> Navigate -> Read page')
|
||||
expect(response).toContain('Create a "Open Page to Read Page Skill" skill')
|
||||
expect(response).toContain('does not include URLs')
|
||||
expect(response).toContain('tool inputs')
|
||||
})
|
||||
})
|
||||
|
||||
function record(
|
||||
id: string,
|
||||
toolNames: string[],
|
||||
recordedAt: number,
|
||||
): WorkflowUsageRecord {
|
||||
return {
|
||||
id,
|
||||
source: 'sidepanel-chat',
|
||||
recordedAt,
|
||||
toolNames,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,253 @@
|
||||
import type {
|
||||
WorkflowSkillSuggestion,
|
||||
WorkflowUsageAnalysis,
|
||||
WorkflowUsageRecord,
|
||||
} from './types'
|
||||
|
||||
export type WorkflowAdvisorCommand = 'analyze' | 'view' | 'clear'
|
||||
|
||||
const DEFAULT_MIN_RUNS = 2
|
||||
const DEFAULT_SUGGESTION_LIMIT = 3
|
||||
const MIN_PATTERN_LENGTH = 2
|
||||
const MAX_PATTERN_LENGTH = 8
|
||||
|
||||
const PRIVACY_NOTE =
|
||||
'This analysis uses only local tool-name sequences. BrowserOS does not include URLs, page content, prompts, tool inputs, or tool outputs in this workflow pattern data.'
|
||||
|
||||
const TOOL_LABELS: Record<string, string> = {
|
||||
click: 'Click',
|
||||
extract_data: 'Extract data',
|
||||
filesystem_read: 'Read file',
|
||||
filesystem_write: 'Write file',
|
||||
get_page_content: 'Read page',
|
||||
new_page: 'Open page',
|
||||
navigate: 'Navigate',
|
||||
open: 'Open',
|
||||
screenshot: 'Screenshot',
|
||||
search: 'Search',
|
||||
type: 'Type',
|
||||
}
|
||||
|
||||
function normalizeCommandText(text: string): string {
|
||||
return text.toLowerCase().replace(/\s+/g, ' ').trim()
|
||||
}
|
||||
|
||||
export function detectWorkflowAdvisorCommand(
|
||||
text: string,
|
||||
): WorkflowAdvisorCommand | null {
|
||||
const normalized = normalizeCommandText(text)
|
||||
if (!normalized) return null
|
||||
|
||||
const mentionsWorkflowData =
|
||||
normalized.includes('workflow usage') ||
|
||||
normalized.includes('usage pattern') ||
|
||||
normalized.includes('workflow pattern') ||
|
||||
normalized.includes('skill suggestion')
|
||||
const mentionsWorkflowScope =
|
||||
mentionsWorkflowData ||
|
||||
normalized.includes('my workflow') ||
|
||||
normalized.includes('my workflows') ||
|
||||
normalized.includes('tool usage')
|
||||
|
||||
if (
|
||||
mentionsWorkflowData &&
|
||||
/\b(clear|delete|reset|forget)\b/.test(normalized)
|
||||
) {
|
||||
return 'clear'
|
||||
}
|
||||
|
||||
if (
|
||||
mentionsWorkflowData &&
|
||||
(/\b(show|view|list|display)\b/.test(normalized) ||
|
||||
normalized.includes('what workflow usage data'))
|
||||
) {
|
||||
return 'view'
|
||||
}
|
||||
|
||||
if (
|
||||
normalized.includes('analyze my workflow') ||
|
||||
normalized.includes('analyse my workflow') ||
|
||||
(normalized.includes('what patterns do you see') &&
|
||||
mentionsWorkflowScope) ||
|
||||
(normalized.includes('suggest skills') && mentionsWorkflowScope) ||
|
||||
normalized.includes('find skill suggestions') ||
|
||||
(normalized.includes('what can be automated') && mentionsWorkflowScope) ||
|
||||
normalized.includes('analyze workflow patterns') ||
|
||||
normalized.includes('analyse workflow patterns')
|
||||
) {
|
||||
return 'analyze'
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
function normalizeToolName(toolName: string): string {
|
||||
return toolName.trim().toLowerCase()
|
||||
}
|
||||
|
||||
export function normalizeToolSequence(toolNames: string[]): string[] {
|
||||
const sequence: string[] = []
|
||||
for (const rawName of toolNames) {
|
||||
const toolName = normalizeToolName(rawName)
|
||||
if (!toolName) continue
|
||||
if (sequence[sequence.length - 1] === toolName) continue
|
||||
sequence.push(toolName)
|
||||
}
|
||||
return sequence.slice(0, MAX_PATTERN_LENGTH)
|
||||
}
|
||||
|
||||
function labelTool(toolName: string): string {
|
||||
const normalized = normalizeToolName(toolName)
|
||||
const known = TOOL_LABELS[normalized]
|
||||
if (known) return known
|
||||
|
||||
return normalized
|
||||
.replace(/^(browser|tool|mcp)[_-]/, '')
|
||||
.replace(/[_-]+/g, ' ')
|
||||
.replace(/\b\w/g, (char) => char.toUpperCase())
|
||||
}
|
||||
|
||||
function titleCase(value: string): string {
|
||||
return value.replace(/\b\w/g, (char) => char.toUpperCase())
|
||||
}
|
||||
|
||||
function buildSuggestionTitle(pattern: string[]): string {
|
||||
const labels = pattern.map((toolName) => titleCase(labelTool(toolName)))
|
||||
const uniqueLabels = Array.from(new Set(labels))
|
||||
if (uniqueLabels.length <= 2) return `${uniqueLabels.join(' + ')} Skill`
|
||||
return `${uniqueLabels[0]} to ${uniqueLabels.at(-1)} Skill`
|
||||
}
|
||||
|
||||
function buildBenefit(pattern: string[]): string {
|
||||
const patternLabel = pattern.map(labelTool).join(' -> ')
|
||||
return `Turn the repeated ${patternLabel} sequence into reusable skill instructions.`
|
||||
}
|
||||
|
||||
function compareSuggestions(
|
||||
left: Omit<WorkflowSkillSuggestion, 'id'>,
|
||||
right: Omit<WorkflowSkillSuggestion, 'id'>,
|
||||
): number {
|
||||
return (
|
||||
right.runCount - left.runCount ||
|
||||
right.pattern.length - left.pattern.length ||
|
||||
right.lastUsedAt - left.lastUsedAt
|
||||
)
|
||||
}
|
||||
|
||||
export function analyzeWorkflowUsage(
|
||||
records: WorkflowUsageRecord[],
|
||||
options?: { minRuns?: number; limit?: number },
|
||||
): WorkflowUsageAnalysis {
|
||||
const minRuns = options?.minRuns ?? DEFAULT_MIN_RUNS
|
||||
const limit = options?.limit ?? DEFAULT_SUGGESTION_LIMIT
|
||||
const groups = new Map<
|
||||
string,
|
||||
{ pattern: string[]; runCount: number; lastUsedAt: number }
|
||||
>()
|
||||
|
||||
for (const record of records) {
|
||||
const pattern = normalizeToolSequence(record.toolNames)
|
||||
if (pattern.length < MIN_PATTERN_LENGTH) continue
|
||||
|
||||
const key = pattern.join('\u001f')
|
||||
const existing = groups.get(key)
|
||||
groups.set(key, {
|
||||
pattern,
|
||||
runCount: (existing?.runCount ?? 0) + 1,
|
||||
lastUsedAt: Math.max(existing?.lastUsedAt ?? 0, record.recordedAt),
|
||||
})
|
||||
}
|
||||
|
||||
const suggestions = Array.from(groups.values())
|
||||
.filter((group) => group.runCount >= minRuns)
|
||||
.map((group): Omit<WorkflowSkillSuggestion, 'id'> => {
|
||||
const pattern = group.pattern
|
||||
return {
|
||||
title: buildSuggestionTitle(pattern),
|
||||
runCount: group.runCount,
|
||||
pattern,
|
||||
lastUsedAt: group.lastUsedAt,
|
||||
benefit: buildBenefit(pattern),
|
||||
}
|
||||
})
|
||||
.sort(compareSuggestions)
|
||||
.slice(0, limit)
|
||||
.map((suggestion, index): WorkflowSkillSuggestion => {
|
||||
return {
|
||||
...suggestion,
|
||||
id: `workflow-${index + 1}`,
|
||||
}
|
||||
})
|
||||
|
||||
return {
|
||||
totalRuns: records.length,
|
||||
eligibleRuns: Array.from(groups.values()).reduce(
|
||||
(count, group) => count + group.runCount,
|
||||
0,
|
||||
),
|
||||
suggestions,
|
||||
}
|
||||
}
|
||||
|
||||
export function formatWorkflowAnalysisResponse(
|
||||
analysis: WorkflowUsageAnalysis,
|
||||
): string {
|
||||
if (analysis.suggestions.length === 0) {
|
||||
return [
|
||||
"I don't have enough repeated local tool patterns to suggest a custom skill yet.",
|
||||
'',
|
||||
PRIVACY_NOTE,
|
||||
'',
|
||||
`Tracked runs: ${analysis.totalRuns}. Eligible repeated-tool runs: ${analysis.eligibleRuns}.`,
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
const suggestionLines = analysis.suggestions.map((suggestion, index) =>
|
||||
[
|
||||
`${index + 1}. ${suggestion.title} -> ${suggestion.runCount} times`,
|
||||
` Pattern: ${suggestion.pattern.map(labelTool).join(' -> ')}`,
|
||||
` Suggestion: Create a "${suggestion.title}" skill for this exact command sequence.`,
|
||||
` Benefit: ${suggestion.benefit}`,
|
||||
].join('\n'),
|
||||
)
|
||||
|
||||
return [
|
||||
`Found ${analysis.suggestions.length} potential automation${analysis.suggestions.length === 1 ? '' : 's'}:`,
|
||||
'',
|
||||
...suggestionLines,
|
||||
'',
|
||||
PRIVACY_NOTE,
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
export function formatWorkflowUsageDataResponse(
|
||||
records: WorkflowUsageRecord[],
|
||||
): string {
|
||||
if (records.length === 0) {
|
||||
return [
|
||||
'No local workflow usage patterns are stored yet.',
|
||||
'',
|
||||
PRIVACY_NOTE,
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
const recentRecords = records
|
||||
.slice()
|
||||
.sort((left, right) => right.recordedAt - left.recordedAt)
|
||||
.slice(0, 10)
|
||||
|
||||
return [
|
||||
`Stored local workflow runs: ${records.length}`,
|
||||
'',
|
||||
...recentRecords.map(
|
||||
(record, index) =>
|
||||
`${index + 1}. ${normalizeToolSequence(record.toolNames).map(labelTool).join(' -> ')}`,
|
||||
),
|
||||
'',
|
||||
PRIVACY_NOTE,
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
export function formatWorkflowUsageClearedResponse(): string {
|
||||
return 'Cleared the local workflow usage pattern data. No URLs, page content, prompts, tool inputs, or tool outputs were stored in this data.'
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
import { beforeEach, describe, expect, it, mock } from 'bun:test'
|
||||
import type { WorkflowUsageRecord, WorkflowUsageStore } from './types'
|
||||
|
||||
let storedValue: WorkflowUsageStore | null = null
|
||||
|
||||
mock.module('@wxt-dev/storage', () => ({
|
||||
storage: {
|
||||
defineItem: () => ({
|
||||
getValue: async () => (storedValue ? structuredClone(storedValue) : null),
|
||||
setValue: async (value: WorkflowUsageStore) => {
|
||||
await Promise.resolve()
|
||||
storedValue = structuredClone(value)
|
||||
},
|
||||
}),
|
||||
},
|
||||
}))
|
||||
|
||||
const {
|
||||
clearWorkflowUsageRecords,
|
||||
getWorkflowUsageRecords,
|
||||
recordWorkflowUsage,
|
||||
} = await import('./storage')
|
||||
|
||||
describe('workflow usage storage', () => {
|
||||
beforeEach(() => {
|
||||
storedValue = { version: 1, records: [] }
|
||||
})
|
||||
|
||||
it('serializes concurrent record writes without dropping records', async () => {
|
||||
await Promise.all([
|
||||
recordWorkflowUsage(record('record-1', ['new_page', 'navigate'], 100)),
|
||||
recordWorkflowUsage(
|
||||
record('record-2', ['search', 'get_page_content'], 200),
|
||||
),
|
||||
])
|
||||
|
||||
expect((await getWorkflowUsageRecords()).map((item) => item.id)).toEqual([
|
||||
'record-1',
|
||||
'record-2',
|
||||
])
|
||||
})
|
||||
|
||||
it('keeps clear operations ordered with pending writes', async () => {
|
||||
const write = recordWorkflowUsage(
|
||||
record('record-1', ['new_page', 'navigate'], 100),
|
||||
)
|
||||
const clear = clearWorkflowUsageRecords()
|
||||
|
||||
await Promise.all([write, clear])
|
||||
|
||||
expect(await getWorkflowUsageRecords()).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
function record(
|
||||
id: string,
|
||||
toolNames: string[],
|
||||
recordedAt: number,
|
||||
): WorkflowUsageRecord {
|
||||
return {
|
||||
id,
|
||||
source: 'sidepanel-chat',
|
||||
recordedAt,
|
||||
toolNames,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,115 @@
|
||||
import { storage } from '@wxt-dev/storage'
|
||||
import type { ExecutionTaskRecord } from '@/lib/execution-history/types'
|
||||
import { normalizeToolSequence } from './advisor'
|
||||
import type {
|
||||
WorkflowUsageRecord,
|
||||
WorkflowUsageSource,
|
||||
WorkflowUsageStore,
|
||||
} from './types'
|
||||
|
||||
const MAX_WORKFLOW_USAGE_RECORDS = 300
|
||||
let pendingWorkflowUsageStorageUpdate: Promise<void> = Promise.resolve()
|
||||
|
||||
export const workflowUsageStorage = storage.defineItem<WorkflowUsageStore>(
|
||||
'local:workflowUsagePatterns',
|
||||
{
|
||||
fallback: { version: 1, records: [] },
|
||||
version: 1,
|
||||
},
|
||||
)
|
||||
|
||||
export function createWorkflowUsageRecord(input: {
|
||||
id: string
|
||||
source: WorkflowUsageSource
|
||||
toolNames: string[]
|
||||
recordedAt?: number
|
||||
}): WorkflowUsageRecord | null {
|
||||
const toolNames = normalizeToolSequence(input.toolNames)
|
||||
if (toolNames.length === 0) return null
|
||||
|
||||
return {
|
||||
id: input.id,
|
||||
source: input.source,
|
||||
recordedAt: input.recordedAt ?? Date.now(),
|
||||
toolNames,
|
||||
}
|
||||
}
|
||||
|
||||
export function createWorkflowUsageRecordFromExecutionTask(
|
||||
task: ExecutionTaskRecord,
|
||||
): WorkflowUsageRecord | null {
|
||||
return createWorkflowUsageRecord({
|
||||
id: `execution-task:${task.id}`,
|
||||
source: 'sidepanel-chat',
|
||||
recordedAt: Date.parse(task.completedAt ?? task.startedAt),
|
||||
toolNames: task.steps.map((step) => step.toolName),
|
||||
})
|
||||
}
|
||||
|
||||
export async function recordWorkflowUsage(
|
||||
record: WorkflowUsageRecord | null,
|
||||
): Promise<void> {
|
||||
if (!record) return
|
||||
|
||||
await enqueueWorkflowUsageStorageUpdate(async () => {
|
||||
const current = (await workflowUsageStorage.getValue()) ?? {
|
||||
version: 1,
|
||||
records: [],
|
||||
}
|
||||
await workflowUsageStorage.setValue(
|
||||
mergeWorkflowUsageRecord(current, record),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
function enqueueWorkflowUsageStorageUpdate(
|
||||
update: () => Promise<void>,
|
||||
): Promise<void> {
|
||||
const runUpdate = pendingWorkflowUsageStorageUpdate
|
||||
.catch(() => {
|
||||
// Keep later writes moving even if an earlier storage call failed.
|
||||
})
|
||||
.then(update)
|
||||
|
||||
pendingWorkflowUsageStorageUpdate = runUpdate.catch(() => {
|
||||
// Store the rejection for the caller while leaving the queue usable.
|
||||
})
|
||||
|
||||
return runUpdate
|
||||
}
|
||||
|
||||
function mergeWorkflowUsageRecord(
|
||||
current: WorkflowUsageStore | null | undefined,
|
||||
record: WorkflowUsageRecord,
|
||||
): WorkflowUsageStore {
|
||||
const store = current ?? {
|
||||
version: 1,
|
||||
records: [],
|
||||
}
|
||||
const recordsById = new Map(
|
||||
store.records.map((existing) => [existing.id, existing]),
|
||||
)
|
||||
recordsById.set(record.id, record)
|
||||
|
||||
const records = Array.from(recordsById.values())
|
||||
.sort((left, right) => left.recordedAt - right.recordedAt)
|
||||
.slice(-MAX_WORKFLOW_USAGE_RECORDS)
|
||||
|
||||
return { version: 1, records }
|
||||
}
|
||||
|
||||
export async function getWorkflowUsageRecords(): Promise<
|
||||
WorkflowUsageRecord[]
|
||||
> {
|
||||
await pendingWorkflowUsageStorageUpdate.catch(() => {
|
||||
// Preserve existing read behavior after a failed write.
|
||||
})
|
||||
const current = await workflowUsageStorage.getValue()
|
||||
return current?.records ?? []
|
||||
}
|
||||
|
||||
export async function clearWorkflowUsageRecords(): Promise<void> {
|
||||
await enqueueWorkflowUsageStorageUpdate(async () => {
|
||||
await workflowUsageStorage.setValue({ version: 1, records: [] })
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
export type WorkflowUsageSource = 'sidepanel-chat' | 'agent-harness-chat'
|
||||
|
||||
export interface WorkflowUsageRecord {
|
||||
id: string
|
||||
source: WorkflowUsageSource
|
||||
recordedAt: number
|
||||
toolNames: string[]
|
||||
}
|
||||
|
||||
export interface WorkflowUsageStore {
|
||||
version: 1
|
||||
records: WorkflowUsageRecord[]
|
||||
}
|
||||
|
||||
export interface WorkflowSkillSuggestion {
|
||||
id: string
|
||||
title: string
|
||||
runCount: number
|
||||
pattern: string[]
|
||||
lastUsedAt: number
|
||||
benefit: string
|
||||
}
|
||||
|
||||
export interface WorkflowUsageAnalysis {
|
||||
totalRuns: number
|
||||
eligibleRuns: number
|
||||
suggestions: WorkflowSkillSuggestion[]
|
||||
}
|
||||
@@ -503,7 +503,7 @@ async function scenarioConfig(): Promise<void> {
|
||||
await tearDown(s)
|
||||
return
|
||||
}
|
||||
const newValue = target.options![0].value
|
||||
const newValue = target.options?.[0].value
|
||||
console.log(`[config] setting configId=${target.id} value=${newValue}`)
|
||||
try {
|
||||
// @ts-expect-error - input shape varies
|
||||
|
||||
@@ -9,6 +9,7 @@ import { LLM_PROVIDERS } from '@browseros/shared/schemas/llm'
|
||||
import { createOpenRouter } from '@openrouter/ai-sdk-provider'
|
||||
import type { LanguageModel } from 'ai'
|
||||
import { createBrowserOSFetch } from '../lib/browseros-fetch'
|
||||
import { createGeminiComputerUseFetch } from '../lib/clients/llm/gemini-computer-use-fetch'
|
||||
import {
|
||||
createMockBrowserOSLanguageModel,
|
||||
shouldUseMockBrowserOSLLM,
|
||||
@@ -41,7 +42,12 @@ function createGoogleFactory(
|
||||
config: ResolvedAgentConfig,
|
||||
): (modelId: string) => unknown {
|
||||
if (!config.apiKey) throw new Error('Google provider requires apiKey')
|
||||
return createGoogleGenerativeAI({ apiKey: config.apiKey })
|
||||
const fetch = createGeminiComputerUseFetch(config.model)
|
||||
return createGoogleGenerativeAI({
|
||||
apiKey: config.apiKey,
|
||||
...(config.baseUrl && { baseURL: config.baseUrl }),
|
||||
...(fetch && { fetch }),
|
||||
})
|
||||
}
|
||||
|
||||
function createOpenRouterFactory(
|
||||
|
||||
@@ -35,15 +35,18 @@ import {
|
||||
type AgentDefinitionWithActivity,
|
||||
AgentHarnessService,
|
||||
type GatewayStatusSnapshot,
|
||||
HermesProviderConfigInvalidError,
|
||||
InvalidAgentUpdateError,
|
||||
MessageQueueFullError,
|
||||
type OpenClawProvisioner,
|
||||
OpenClawProvisionerUnavailableError,
|
||||
type ProducedFileEntry,
|
||||
type ProducedFilesRailGroup,
|
||||
type QueuedMessage,
|
||||
TurnAlreadyActiveError,
|
||||
UnknownAgentError,
|
||||
} from '../services/agents/agent-harness-service'
|
||||
import type { OpenClawGatewayChatClient } from '../services/openclaw/openclaw-gateway-chat-client'
|
||||
import type { FilePreview } from '../services/openclaw/file-preview'
|
||||
import type { Env } from '../types'
|
||||
import { resolveBrowserContextPageIds } from '../utils/resolve-browser-context-page-ids'
|
||||
|
||||
@@ -95,6 +98,23 @@ type AgentRouteService = {
|
||||
messageId: string
|
||||
}): Promise<boolean>
|
||||
listQueuedMessages(agentId: string): Promise<QueuedMessage[]>
|
||||
|
||||
// Files API — Phase 3 of TKT-762.
|
||||
listAgentFiles(
|
||||
agentId: string,
|
||||
options?: { limit?: number },
|
||||
): Promise<ProducedFilesRailGroup[]>
|
||||
listAgentFilesForTurn(
|
||||
agentId: string,
|
||||
turnId: string,
|
||||
): Promise<ProducedFileEntry[]>
|
||||
previewProducedFile(fileId: string): Promise<FilePreview | null>
|
||||
resolveProducedFileForDownload(fileId: string): Promise<{
|
||||
absolutePath: string
|
||||
fileName: string
|
||||
mimeType: string
|
||||
size: number
|
||||
} | null>
|
||||
}
|
||||
|
||||
type AgentRouteDeps = {
|
||||
@@ -109,18 +129,19 @@ type AgentRouteDeps = {
|
||||
openclawGateway?: OpenclawGatewayAccessor
|
||||
/**
|
||||
* Optional. Enables the image-attachment carve-out for OpenClaw
|
||||
* agents — image-bearing turns route through the gateway HTTP
|
||||
* `/v1/chat/completions` instead of the ACP bridge (which drops
|
||||
* image content blocks).
|
||||
*/
|
||||
openclawGatewayChat?: OpenClawGatewayChatClient
|
||||
/**
|
||||
* Required to dual-create/delete `openclaw` adapter agents on the
|
||||
* gateway side. Without this, openclaw create requests fail with 503.
|
||||
*/
|
||||
openclawProvisioner?: OpenClawProvisioner
|
||||
/** Optional override; defaults to a fresh in-memory checker. */
|
||||
adapterHealth?: AdapterHealthChecker
|
||||
/**
|
||||
* Optional listener attached to the constructed harness. Receives
|
||||
* turn lifecycle events for every running agent. Wired by the server
|
||||
* to feed OpenClaw's ClawSession dashboard from the same stream the
|
||||
* chat panel sees, so no second WS observer is needed.
|
||||
*/
|
||||
onTurnLifecycle?: import('../services/agents/agent-harness-service').TurnLifecycleListener
|
||||
}
|
||||
|
||||
type SidepanelAgentChatRequest = {
|
||||
@@ -139,267 +160,381 @@ export function createAgentRoutes(deps: AgentRouteDeps = {}) {
|
||||
new AgentHarnessService({
|
||||
browserosServerPort: deps.browserosServerPort,
|
||||
openclawGateway: deps.openclawGateway,
|
||||
openclawGatewayChat: deps.openclawGatewayChat,
|
||||
openclawProvisioner: deps.openclawProvisioner,
|
||||
})
|
||||
if (deps.onTurnLifecycle && service instanceof AgentHarnessService) {
|
||||
service.onTurnLifecycle(deps.onTurnLifecycle)
|
||||
}
|
||||
// One checker per route mount. Cached probes refresh every 5min;
|
||||
// tests can swap in an alternate via deps if needed.
|
||||
const adapterHealth = deps.adapterHealth ?? new AdapterHealthChecker()
|
||||
|
||||
return new Hono<Env>()
|
||||
.get('/adapters', async (c) => {
|
||||
const adapters = await Promise.all(
|
||||
AGENT_ADAPTER_CATALOG.map(async (descriptor) => ({
|
||||
...descriptor,
|
||||
health: await adapterHealth.getHealth(descriptor.id),
|
||||
})),
|
||||
)
|
||||
return c.json({ adapters })
|
||||
})
|
||||
.get('/', async (c) => {
|
||||
// Single round-trip the agents page consumes: enriched agents
|
||||
// (status + lastUsedAt) plus the gateway lifecycle snapshot the
|
||||
// GatewayStatusBar / GatewayStateCards / ControlPlaneAlert used
|
||||
// to fetch from `/claw/status`. Lets the page poll one endpoint.
|
||||
const [agents, gateway] = await Promise.all([
|
||||
service.listAgentsWithActivity(),
|
||||
service.getGatewayStatus(),
|
||||
])
|
||||
return c.json({ agents, gateway })
|
||||
})
|
||||
.post('/', async (c) => {
|
||||
const parsed = await parseCreateAgentBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
try {
|
||||
return c.json({ agent: await service.createAgent(parsed) })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.post('/:agentId/sidepanel/chat', async (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const parsed = await parseSidepanelAgentChatBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
|
||||
try {
|
||||
const agent = await service.getAgent(agentId)
|
||||
if (!agent) return c.json({ error: 'Unknown agent' }, 404)
|
||||
|
||||
let browserContext = parsed.browserContext
|
||||
if (deps.browser) {
|
||||
browserContext = await resolveBrowserContextPageIds(
|
||||
deps.browser,
|
||||
browserContext,
|
||||
)
|
||||
}
|
||||
|
||||
const userContent = formatUserMessage(
|
||||
parsed.message,
|
||||
browserContext,
|
||||
parsed.selectedText,
|
||||
parsed.selectedTextSource,
|
||||
return (
|
||||
new Hono<Env>()
|
||||
.get('/adapters', async (c) => {
|
||||
const adapters = await Promise.all(
|
||||
AGENT_ADAPTER_CATALOG.map(async (descriptor) => ({
|
||||
...descriptor,
|
||||
health: await adapterHealth.getHealth(descriptor.id),
|
||||
})),
|
||||
)
|
||||
const message = parsed.userSystemPrompt?.trim()
|
||||
? `${parsed.userSystemPrompt.trim()}\n\n${userContent}`
|
||||
: userContent
|
||||
return c.json({ adapters })
|
||||
})
|
||||
.get('/', async (c) => {
|
||||
// Single round-trip the agents page consumes: enriched agents
|
||||
// (status + lastUsedAt) plus the gateway lifecycle snapshot the
|
||||
// GatewayStatusBar / GatewayStateCards / ControlPlaneAlert used
|
||||
// to fetch from `/claw/status`. Lets the page poll one endpoint.
|
||||
const [agents, gateway] = await Promise.all([
|
||||
service.listAgentsWithActivity(),
|
||||
service.getGatewayStatus(),
|
||||
])
|
||||
return c.json({ agents, gateway })
|
||||
})
|
||||
.post('/', async (c) => {
|
||||
const parsed = await parseCreateAgentBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
try {
|
||||
return c.json({ agent: await service.createAgent(parsed) })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.post('/:agentId/sidepanel/chat', async (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const parsed = await parseSidepanelAgentChatBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
|
||||
try {
|
||||
const agent = await service.getAgent(agentId)
|
||||
if (!agent) return c.json({ error: 'Unknown agent' }, 404)
|
||||
|
||||
let browserContext = parsed.browserContext
|
||||
if (deps.browser) {
|
||||
browserContext = await resolveBrowserContextPageIds(
|
||||
deps.browser,
|
||||
browserContext,
|
||||
)
|
||||
}
|
||||
|
||||
const userContent = formatUserMessage(
|
||||
parsed.message,
|
||||
browserContext,
|
||||
parsed.selectedText,
|
||||
parsed.selectedTextSource,
|
||||
)
|
||||
const message = parsed.userSystemPrompt?.trim()
|
||||
? `${parsed.userSystemPrompt.trim()}\n\n${userContent}`
|
||||
: userContent
|
||||
|
||||
let started: { turnId: string; frames: ReadableStream<TurnFrame> }
|
||||
try {
|
||||
started = await service.startTurn({
|
||||
agentId: agent.id,
|
||||
message,
|
||||
cwd: parsed.userWorkingDir,
|
||||
})
|
||||
} catch (err) {
|
||||
if (err instanceof TurnAlreadyActiveError) {
|
||||
return c.json(
|
||||
{
|
||||
error: 'Turn already active',
|
||||
turnId: err.turnId,
|
||||
attachUrl: `/agents/${agent.id}/chat/stream?turnId=${err.turnId}`,
|
||||
},
|
||||
409,
|
||||
)
|
||||
}
|
||||
throw err
|
||||
}
|
||||
|
||||
let didRequestCancel = false
|
||||
const cancelStartedTurn = () => {
|
||||
if (didRequestCancel) return
|
||||
didRequestCancel = true
|
||||
service.cancelTurn({
|
||||
agentId: agent.id,
|
||||
turnId: started.turnId,
|
||||
reason: 'sidepanel stream cancelled',
|
||||
})
|
||||
}
|
||||
if (c.req.raw.signal.aborted) {
|
||||
cancelStartedTurn()
|
||||
} else {
|
||||
c.req.raw.signal.addEventListener('abort', cancelStartedTurn, {
|
||||
once: true,
|
||||
})
|
||||
}
|
||||
|
||||
const events = turnFramesToAgentEvents(started.frames, {
|
||||
onCancel: cancelStartedTurn,
|
||||
})
|
||||
|
||||
return createAcpUIMessageStreamResponse(events, {
|
||||
headers: {
|
||||
'X-Session-Id': 'main',
|
||||
'X-Turn-Id': started.turnId,
|
||||
},
|
||||
})
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/:agentId', async (c) => {
|
||||
try {
|
||||
const agent = await service.getAgent(c.req.param('agentId'))
|
||||
if (!agent) return c.json({ error: 'Unknown agent' }, 404)
|
||||
return c.json({ agent })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.delete('/:agentId', async (c) => {
|
||||
try {
|
||||
return c.json({
|
||||
success: await service.deleteAgent(c.req.param('agentId')),
|
||||
})
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.patch('/:agentId', async (c) => {
|
||||
const parsed = await parseAgentPatchBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
try {
|
||||
const agent = await service.updateAgent(
|
||||
c.req.param('agentId'),
|
||||
parsed.patch,
|
||||
)
|
||||
if (!agent) return c.json({ error: 'Unknown agent' }, 404)
|
||||
return c.json({ agent })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/:agentId/sessions/main/history', async (c) => {
|
||||
try {
|
||||
return c.json(await service.getHistory(c.req.param('agentId')))
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.post('/:agentId/chat', async (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const parsed = await parseChatBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
|
||||
let started: { turnId: string; frames: ReadableStream<TurnFrame> }
|
||||
try {
|
||||
started = await service.startTurn({
|
||||
agentId: agent.id,
|
||||
message,
|
||||
cwd: parsed.userWorkingDir,
|
||||
agentId,
|
||||
message: parsed.message,
|
||||
attachments: parsed.attachments,
|
||||
cwd: parsed.cwd,
|
||||
})
|
||||
} catch (err) {
|
||||
if (err instanceof TurnAlreadyActiveError) {
|
||||
// Caller can attach via GET /chat/stream?turnId=… instead.
|
||||
return c.json(
|
||||
{
|
||||
error: 'Turn already active',
|
||||
turnId: err.turnId,
|
||||
attachUrl: `/agents/${agent.id}/chat/stream?turnId=${err.turnId}`,
|
||||
attachUrl: `/agents/${agentId}/chat/stream?turnId=${err.turnId}`,
|
||||
},
|
||||
409,
|
||||
)
|
||||
}
|
||||
throw err
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
|
||||
let didRequestCancel = false
|
||||
const cancelStartedTurn = () => {
|
||||
if (didRequestCancel) return
|
||||
didRequestCancel = true
|
||||
service.cancelTurn({
|
||||
agentId: agent.id,
|
||||
turnId: started.turnId,
|
||||
reason: 'sidepanel stream cancelled',
|
||||
})
|
||||
}
|
||||
if (c.req.raw.signal.aborted) {
|
||||
cancelStartedTurn()
|
||||
} else {
|
||||
c.req.raw.signal.addEventListener('abort', cancelStartedTurn, {
|
||||
once: true,
|
||||
})
|
||||
}
|
||||
|
||||
const events = turnFramesToAgentEvents(started.frames, {
|
||||
onCancel: cancelStartedTurn,
|
||||
return streamTurnFrames(c, started.frames, {
|
||||
turnId: started.turnId,
|
||||
})
|
||||
|
||||
return createAcpUIMessageStreamResponse(events, {
|
||||
headers: {
|
||||
'X-Session-Id': 'main',
|
||||
'X-Turn-Id': started.turnId,
|
||||
},
|
||||
})
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/:agentId', async (c) => {
|
||||
try {
|
||||
const agent = await service.getAgent(c.req.param('agentId'))
|
||||
if (!agent) return c.json({ error: 'Unknown agent' }, 404)
|
||||
return c.json({ agent })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.delete('/:agentId', async (c) => {
|
||||
try {
|
||||
return c.json({
|
||||
success: await service.deleteAgent(c.req.param('agentId')),
|
||||
})
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.patch('/:agentId', async (c) => {
|
||||
const parsed = await parseAgentPatchBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
try {
|
||||
const agent = await service.updateAgent(
|
||||
c.req.param('agentId'),
|
||||
parsed.patch,
|
||||
)
|
||||
if (!agent) return c.json({ error: 'Unknown agent' }, 404)
|
||||
return c.json({ agent })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/:agentId/sessions/main/history', async (c) => {
|
||||
try {
|
||||
return c.json(await service.getHistory(c.req.param('agentId')))
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.post('/:agentId/chat', async (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const parsed = await parseChatBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
|
||||
let started: { turnId: string; frames: ReadableStream<TurnFrame> }
|
||||
try {
|
||||
started = await service.startTurn({
|
||||
agentId,
|
||||
message: parsed.message,
|
||||
attachments: parsed.attachments,
|
||||
cwd: parsed.cwd,
|
||||
})
|
||||
} catch (err) {
|
||||
if (err instanceof TurnAlreadyActiveError) {
|
||||
// Caller can attach via GET /chat/stream?turnId=… instead.
|
||||
return c.json(
|
||||
{
|
||||
error: 'Turn already active',
|
||||
turnId: err.turnId,
|
||||
attachUrl: `/agents/${agentId}/chat/stream?turnId=${err.turnId}`,
|
||||
},
|
||||
409,
|
||||
)
|
||||
}
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
|
||||
return streamTurnFrames(c, started.frames, {
|
||||
turnId: started.turnId,
|
||||
})
|
||||
})
|
||||
.get('/:agentId/chat/active', (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const info = service.getActiveTurn(agentId, 'main')
|
||||
return c.json({ active: info })
|
||||
})
|
||||
.get('/:agentId/chat/stream', (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const url = new URL(c.req.url)
|
||||
const queryTurnId = url.searchParams.get('turnId')?.trim() || undefined
|
||||
const turnId =
|
||||
queryTurnId ?? service.getActiveTurn(agentId, 'main')?.turnId
|
||||
if (!turnId) {
|
||||
return c.json({ error: 'No active turn for this agent' }, 404)
|
||||
}
|
||||
const lastEventId =
|
||||
c.req.header('Last-Event-ID') ??
|
||||
url.searchParams.get('lastSeq') ??
|
||||
undefined
|
||||
const lastSeq = parseLastSeq(lastEventId)
|
||||
const frames = service.attachTurn({ turnId, lastSeq })
|
||||
if (!frames) {
|
||||
return c.json({ error: 'Unknown turn' }, 404)
|
||||
}
|
||||
return streamTurnFrames(c, frames, { turnId })
|
||||
})
|
||||
.post('/:agentId/chat/cancel', async (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const body = await readJsonBody(c)
|
||||
const turnId =
|
||||
'value' in body && typeof body.value.turnId === 'string'
|
||||
? body.value.turnId.trim() || undefined
|
||||
: undefined
|
||||
const reason =
|
||||
'value' in body && typeof body.value.reason === 'string'
|
||||
? body.value.reason
|
||||
: undefined
|
||||
const cancelled = service.cancelTurn({ agentId, turnId, reason })
|
||||
return c.json({ cancelled })
|
||||
})
|
||||
.get('/:agentId/queue', async (c) => {
|
||||
try {
|
||||
const queue = await service.listQueuedMessages(c.req.param('agentId'))
|
||||
return c.json({ queue })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.post('/:agentId/queue', async (c) => {
|
||||
const parsed = await parseEnqueueBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
try {
|
||||
const queued = await service.enqueueMessage({
|
||||
agentId: c.req.param('agentId'),
|
||||
message: parsed.message,
|
||||
attachments: parsed.attachments,
|
||||
})
|
||||
return c.json({ queued })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.delete('/:agentId/queue/:messageId', async (c) => {
|
||||
try {
|
||||
const removed = await service.removeQueuedMessage({
|
||||
agentId: c.req.param('agentId'),
|
||||
messageId: c.req.param('messageId'),
|
||||
})
|
||||
if (!removed) return c.json({ error: 'Queued message not found' }, 404)
|
||||
return c.json({ removed })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/:agentId/chat/active', (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const info = service.getActiveTurn(agentId, 'main')
|
||||
return c.json({ active: info })
|
||||
})
|
||||
.get('/:agentId/chat/stream', (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const url = new URL(c.req.url)
|
||||
const queryTurnId = url.searchParams.get('turnId')?.trim() || undefined
|
||||
const turnId =
|
||||
queryTurnId ?? service.getActiveTurn(agentId, 'main')?.turnId
|
||||
if (!turnId) {
|
||||
return c.json({ error: 'No active turn for this agent' }, 404)
|
||||
}
|
||||
const lastEventId =
|
||||
c.req.header('Last-Event-ID') ??
|
||||
url.searchParams.get('lastSeq') ??
|
||||
undefined
|
||||
const lastSeq = parseLastSeq(lastEventId)
|
||||
const frames = service.attachTurn({ turnId, lastSeq })
|
||||
if (!frames) {
|
||||
return c.json({ error: 'Unknown turn' }, 404)
|
||||
}
|
||||
return streamTurnFrames(c, frames, { turnId })
|
||||
})
|
||||
.post('/:agentId/chat/cancel', async (c) => {
|
||||
const agentId = c.req.param('agentId')
|
||||
const body = await readJsonBody(c)
|
||||
const turnId =
|
||||
'value' in body && typeof body.value.turnId === 'string'
|
||||
? body.value.turnId.trim() || undefined
|
||||
: undefined
|
||||
const reason =
|
||||
'value' in body && typeof body.value.reason === 'string'
|
||||
? body.value.reason
|
||||
: undefined
|
||||
const cancelled = service.cancelTurn({ agentId, turnId, reason })
|
||||
return c.json({ cancelled })
|
||||
})
|
||||
.get('/:agentId/queue', async (c) => {
|
||||
try {
|
||||
const queue = await service.listQueuedMessages(c.req.param('agentId'))
|
||||
return c.json({ queue })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.post('/:agentId/queue', async (c) => {
|
||||
const parsed = await parseEnqueueBody(c)
|
||||
if ('error' in parsed) return c.json({ error: parsed.error }, 400)
|
||||
try {
|
||||
const queued = await service.enqueueMessage({
|
||||
agentId: c.req.param('agentId'),
|
||||
message: parsed.message,
|
||||
attachments: parsed.attachments,
|
||||
})
|
||||
return c.json({ queued })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.delete('/:agentId/queue/:messageId', async (c) => {
|
||||
try {
|
||||
const removed = await service.removeQueuedMessage({
|
||||
agentId: c.req.param('agentId'),
|
||||
messageId: c.req.param('messageId'),
|
||||
})
|
||||
if (!removed)
|
||||
return c.json({ error: 'Queued message not found' }, 404)
|
||||
return c.json({ removed })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
|
||||
// ── Files (TKT-762) ────────────────────────────────────────────
|
||||
//
|
||||
// V1 surfaces files OpenClaw agents produce inside their workspace
|
||||
// dir (`~/.browseros/vm/openclaw/.openclaw/workspace[-<name>]/`)
|
||||
// as outputs, attributed back to the chat turn that produced them
|
||||
// by the per-turn workspace diff in
|
||||
// `agent-harness-service.runDetachedTurn`. Adapter-gated to
|
||||
// openclaw on the service side; for claude / codex these endpoints
|
||||
// simply return empty lists.
|
||||
//
|
||||
// The file-id-scoped endpoints (`/files/:fileId/{preview,download}`)
|
||||
// accept an opaque `fileId` and resolve the on-disk path
|
||||
// server-side, so the client never sees a raw path and traversal
|
||||
// is impossible by construction.
|
||||
|
||||
.get('/:agentId/files', async (c) => {
|
||||
try {
|
||||
const groups = await service.listAgentFiles(
|
||||
c.req.param('agentId'),
|
||||
parseAgentFilesLimit(c.req.query('limit')),
|
||||
)
|
||||
return c.json({ groups })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/:agentId/files/turn/:turnId', async (c) => {
|
||||
try {
|
||||
const files = await service.listAgentFilesForTurn(
|
||||
c.req.param('agentId'),
|
||||
c.req.param('turnId'),
|
||||
)
|
||||
return c.json({ files })
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/files/:fileId/preview', async (c) => {
|
||||
try {
|
||||
const preview = await service.previewProducedFile(
|
||||
c.req.param('fileId'),
|
||||
)
|
||||
if (!preview || preview.kind === 'missing') {
|
||||
return c.json({ error: 'File not found' }, 404)
|
||||
}
|
||||
return c.json(preview)
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
.get('/files/:fileId/download', async (c) => {
|
||||
try {
|
||||
const resolved = await service.resolveProducedFileForDownload(
|
||||
c.req.param('fileId'),
|
||||
)
|
||||
if (!resolved) return c.json({ error: 'File not found' }, 404)
|
||||
|
||||
// Stream raw bytes via Bun's lazy file handle. Sets
|
||||
// Content-Disposition so browsers save instead of preview.
|
||||
const file = Bun.file(resolved.absolutePath)
|
||||
return new Response(file.stream(), {
|
||||
headers: {
|
||||
'Content-Type': resolved.mimeType,
|
||||
'Content-Length': String(resolved.size),
|
||||
'Content-Disposition': `attachment; ${encodeRfc6266Filename(resolved.fileName)}`,
|
||||
'Cache-Control': 'no-store',
|
||||
},
|
||||
})
|
||||
} catch (err) {
|
||||
return handleAgentRouteError(c, err)
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
/** Hard cap on `?limit=` for /agents/:id/files — guards against
|
||||
* a caller-supplied huge value forcing a per-agent table scan. */
|
||||
const MAX_FILES_LIMIT = 500
|
||||
|
||||
/**
|
||||
* Parse + clamp the `limit` query for /agents/:id/files. Returns
|
||||
* `undefined` when the param is absent or unparseable so the
|
||||
* service falls back to its own default.
|
||||
*/
|
||||
function parseAgentFilesLimit(
|
||||
raw: string | undefined,
|
||||
): { limit: number } | undefined {
|
||||
if (!raw) return undefined
|
||||
const parsed = Number.parseInt(raw, 10)
|
||||
if (!Number.isFinite(parsed)) return undefined
|
||||
return { limit: Math.min(Math.max(1, parsed), MAX_FILES_LIMIT) }
|
||||
}
|
||||
|
||||
/**
|
||||
* RFC 6266 / RFC 5987 filename attributes for `Content-Disposition`.
|
||||
* Returns the `filename="..."` attribute (always) plus a
|
||||
* percent-encoded `filename*=UTF-8''…` attribute when the name
|
||||
* contains non-ASCII characters, so browsers download with the
|
||||
* original name even on stricter HTTP clients.
|
||||
*/
|
||||
function encodeRfc6266Filename(filename: string): string {
|
||||
// Strip CRLFs and quotes (header injection guard).
|
||||
const safe = filename.replace(/["\r\n]/g, '_')
|
||||
// Detect non-ASCII; emit the RFC 5987 fallback attribute when
|
||||
// present. `encodeURIComponent` is the standard browser-safe
|
||||
// percent-encoder for this purpose.
|
||||
const hasNonAscii = /[^ -~]/.test(safe)
|
||||
if (!hasNonAscii) return `filename="${safe}"`
|
||||
return `filename="${safe}"; filename*=UTF-8''${encodeURIComponent(safe)}`
|
||||
}
|
||||
|
||||
function turnFramesToAgentEvents(
|
||||
@@ -540,11 +675,14 @@ async function parseCreateAgentBody(c: Context<Env>): Promise<
|
||||
? record.reasoningEffort.trim()
|
||||
: undefined
|
||||
|
||||
// OpenClaw agents resolve their model from the gateway-side provider
|
||||
// config rather than from the harness catalog. Skip catalog model
|
||||
// validation for that adapter; everything else still uses the catalog.
|
||||
// OpenClaw and Hermes resolve their model from per-agent provider
|
||||
// config (gateway / config.yaml) rather than from the harness catalog.
|
||||
// Skip catalog model validation for those adapters — both have an
|
||||
// empty `models: []` in the catalog by design — everything else still
|
||||
// uses the catalog for validation.
|
||||
if (
|
||||
record.adapter !== 'openclaw' &&
|
||||
record.adapter !== 'hermes' &&
|
||||
!isSupportedAgentModel(record.adapter, modelId)
|
||||
) {
|
||||
return { error: 'Invalid modelId' }
|
||||
@@ -781,6 +919,9 @@ function handleAgentRouteError(c: Context<Env>, err: unknown) {
|
||||
if (err instanceof InvalidAgentUpdateError) {
|
||||
return c.json({ error: err.message }, 400)
|
||||
}
|
||||
if (err instanceof HermesProviderConfigInvalidError) {
|
||||
return c.json({ error: err.message }, 400)
|
||||
}
|
||||
if (err instanceof MessageQueueFullError) {
|
||||
return c.json({ error: err.message }, 429)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { mkdir } from 'node:fs/promises'
|
||||
import { mkdir, rm } from 'node:fs/promises'
|
||||
import { zValidator } from '@hono/zod-validator'
|
||||
import { Hono } from 'hono'
|
||||
import { z } from 'zod'
|
||||
@@ -26,4 +26,9 @@ export function createMemoryRoutes() {
|
||||
await Bun.write(getCoreMemoryPath(), content)
|
||||
return c.json({ success: true })
|
||||
})
|
||||
.delete('/', async (c) => {
|
||||
await rm(getMemoryDir(), { recursive: true, force: true })
|
||||
await mkdir(getMemoryDir(), { recursive: true })
|
||||
return c.json({ success: true })
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { zValidator } from '@hono/zod-validator'
|
||||
import { Hono } from 'hono'
|
||||
import { z } from 'zod'
|
||||
import { readSoul, writeSoul } from '../../lib/soul'
|
||||
import { readSoul, resetSoulTemplate, writeSoul } from '../../lib/soul'
|
||||
|
||||
const WriteSoulSchema = z.object({
|
||||
content: z.string(),
|
||||
@@ -18,4 +18,8 @@ export function createSoulRoutes() {
|
||||
const result = await writeSoul(content)
|
||||
return c.json(result)
|
||||
})
|
||||
.delete('/', async (c) => {
|
||||
const result = await resetSoulTemplate()
|
||||
return c.json(result)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ import {
|
||||
connectKlavisInBackground,
|
||||
type KlavisProxyRef,
|
||||
} from './services/klavis/strata-proxy'
|
||||
import { OpenClawGatewayChatClient } from './services/openclaw/openclaw-gateway-chat-client'
|
||||
import { convertOpenClawHistoryToAgentHistory } from './services/openclaw/history-mapper'
|
||||
import { getOpenClawService } from './services/openclaw/openclaw-service'
|
||||
import type { Env, HttpServerConfig } from './types'
|
||||
import { defaultCorsConfig } from './utils/cors'
|
||||
@@ -137,16 +137,11 @@ export async function createHttpServer(config: HttpServerConfig) {
|
||||
browserosServerPort: port,
|
||||
browser,
|
||||
openclawGateway: {
|
||||
getGatewayToken: () => getOpenClawService().getGatewayToken(),
|
||||
getContainerName: () => OPENCLAW_GATEWAY_CONTAINER_NAME,
|
||||
getLimaHomeDir: () => getLimaHomeDir(),
|
||||
getLimactlPath: () => resolveBundledLimactl(resourcesDir),
|
||||
getVmName: () => VM_NAME,
|
||||
},
|
||||
openclawGatewayChat: new OpenClawGatewayChatClient(
|
||||
() => getOpenClawService().getPort(),
|
||||
async () => getOpenClawService().getGatewayToken(),
|
||||
),
|
||||
openclawProvisioner: {
|
||||
createAgent: (input) => getOpenClawService().createAgent(input),
|
||||
removeAgent: (agentId) => getOpenClawService().removeAgent(agentId),
|
||||
@@ -159,6 +154,23 @@ export async function createHttpServer(config: HttpServerConfig) {
|
||||
}))
|
||||
},
|
||||
getStatus: () => getOpenClawService().getStatus(),
|
||||
getAgentHistory: async (agentId) => {
|
||||
// Aggregated across the agent's main + every sub-session
|
||||
// (cron / hook / channel) so autonomous turns surface in
|
||||
// the chat panel alongside user-initiated ones.
|
||||
const raw = await getOpenClawService().getSessionHistory(
|
||||
`agent:${agentId}:main`,
|
||||
)
|
||||
return convertOpenClawHistoryToAgentHistory(agentId, raw)
|
||||
},
|
||||
},
|
||||
onTurnLifecycle: (agent, event) => {
|
||||
if (agent.adapter !== 'openclaw') return
|
||||
getOpenClawService().recordAgentTurnEvent(
|
||||
agent.id,
|
||||
agent.sessionKey,
|
||||
event,
|
||||
)
|
||||
},
|
||||
}),
|
||||
)
|
||||
|
||||
@@ -24,6 +24,8 @@ import {
|
||||
type QueuedMessage,
|
||||
type QueuedMessageAttachment,
|
||||
} from '../../../lib/agents/message-queue'
|
||||
import { writeHermesPerAgentProvider } from '../hermes/hermes-paths'
|
||||
import { getHermesProviderMapping } from '../hermes/hermes-provider-map'
|
||||
|
||||
export {
|
||||
MessageQueueFullError,
|
||||
@@ -31,14 +33,26 @@ export {
|
||||
type QueuedMessageAttachment,
|
||||
} from '../../../lib/agents/message-queue'
|
||||
|
||||
import { basename } from 'node:path'
|
||||
import type {
|
||||
AgentHistoryPage,
|
||||
AgentRowSnapshot,
|
||||
AgentRuntime,
|
||||
AgentStreamEvent,
|
||||
} from '../../../lib/agents/types'
|
||||
import { getOpenClawDir } from '../../../lib/browseros-dir'
|
||||
import { logger } from '../../../lib/logger'
|
||||
import type { OpenClawGatewayChatClient } from '../openclaw/openclaw-gateway-chat-client'
|
||||
import {
|
||||
buildFilePreview,
|
||||
detectMimeType,
|
||||
type FilePreview,
|
||||
} from '../openclaw/file-preview'
|
||||
import { getHostWorkspaceDir } from '../openclaw/openclaw-env'
|
||||
import {
|
||||
type FileSnapshot,
|
||||
type ProducedFileRow,
|
||||
ProducedFilesStore,
|
||||
} from '../openclaw/produced-files-store'
|
||||
|
||||
export type AgentLiveness = 'working' | 'idle' | 'asleep' | 'error'
|
||||
|
||||
@@ -120,6 +134,15 @@ export interface OpenClawProvisioner {
|
||||
* gateway is not configured at all).
|
||||
*/
|
||||
getStatus?(): Promise<GatewayStatusSnapshot | null>
|
||||
/**
|
||||
* Optional. When wired, the harness uses this for `getHistory` on
|
||||
* openclaw-adapter agents so the chat panel sees autonomous
|
||||
* (cron / hook / channel) turns alongside user-typed turns. Without
|
||||
* this, history reads come from AcpxRuntime's local session record
|
||||
* which only contains user-initiated turns — autonomous activity
|
||||
* fires correctly but stays invisible to the panel.
|
||||
*/
|
||||
getAgentHistory?(agentId: string): Promise<AgentHistoryPage>
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -152,12 +175,47 @@ export interface GatewayStatusSnapshot {
|
||||
| null
|
||||
}
|
||||
|
||||
/**
|
||||
* Per-turn event the harness emits to subscribers. Lets services that
|
||||
* want to track liveness for a specific adapter (e.g. OpenClaw's
|
||||
* ClawSession dashboard) react to the same stream the chat panel sees,
|
||||
* without each adapter spawning its own gateway-side observer.
|
||||
*/
|
||||
export type TurnLifecycleEvent =
|
||||
| { type: 'turn_started' }
|
||||
| { type: 'turn_event'; event: AgentStreamEvent }
|
||||
| { type: 'turn_ended'; error?: string }
|
||||
|
||||
export type TurnLifecycleListener = (
|
||||
agent: {
|
||||
id: string
|
||||
adapter: AgentDefinition['adapter']
|
||||
sessionKey: string
|
||||
},
|
||||
event: TurnLifecycleEvent,
|
||||
) => void
|
||||
|
||||
export class AgentHarnessService {
|
||||
private readonly agentStore: AgentStore
|
||||
private readonly runtime: AgentRuntime
|
||||
private readonly openclawProvisioner: OpenClawProvisioner | null
|
||||
private readonly turnRegistry: TurnRegistry
|
||||
private readonly messageQueue: FileMessageQueue
|
||||
private readonly turnLifecycleListeners = new Set<TurnLifecycleListener>()
|
||||
/**
|
||||
* Optional override for the BrowserOS dir used by Hermes per-agent
|
||||
* provider config writes. Defaults to the global `getBrowserosDir()`
|
||||
* lookup at write time when undefined; tests can inject a tmp dir.
|
||||
*/
|
||||
private readonly browserosDir: string | undefined
|
||||
/**
|
||||
* Lazy-initialised so tests that swap in a fake `agentStore` don't
|
||||
* eagerly hit `getDb()` (which throws when the test harness hasn't
|
||||
* called `initializeDb`). Tests that exercise file attribution can
|
||||
* inject an explicit store via `deps.producedFilesStore`.
|
||||
*/
|
||||
private explicitProducedFilesStore: ProducedFilesStore | null = null
|
||||
private cachedProducedFilesStore: ProducedFilesStore | null = null
|
||||
private inFlightReconcile: Promise<void> | null = null
|
||||
// In-memory liveness tracker. Lost on server restart (acceptable —
|
||||
// `lastUsedAt` survives via the acpx session record's `lastUsedAt`,
|
||||
@@ -173,11 +231,12 @@ export class AgentHarnessService {
|
||||
agentStore?: AgentStore
|
||||
runtime?: AgentRuntime
|
||||
browserosServerPort?: number
|
||||
browserosDir?: string
|
||||
openclawGateway?: OpenclawGatewayAccessor
|
||||
openclawGatewayChat?: OpenClawGatewayChatClient
|
||||
openclawProvisioner?: OpenClawProvisioner
|
||||
turnRegistry?: TurnRegistry
|
||||
messageQueue?: FileMessageQueue
|
||||
producedFilesStore?: ProducedFilesStore
|
||||
} = {},
|
||||
) {
|
||||
this.agentStore = deps.agentStore ?? new DbAgentStore()
|
||||
@@ -186,11 +245,14 @@ export class AgentHarnessService {
|
||||
new AcpxRuntime({
|
||||
browserosServerPort: deps.browserosServerPort,
|
||||
openclawGateway: deps.openclawGateway,
|
||||
openclawGatewayChat: deps.openclawGatewayChat,
|
||||
})
|
||||
this.openclawProvisioner = deps.openclawProvisioner ?? null
|
||||
this.turnRegistry = deps.turnRegistry ?? new TurnRegistry()
|
||||
this.messageQueue = deps.messageQueue ?? new FileMessageQueue()
|
||||
this.browserosDir = deps.browserosDir
|
||||
if (deps.producedFilesStore) {
|
||||
this.explicitProducedFilesStore = deps.producedFilesStore
|
||||
}
|
||||
// Drain any agents whose queue file survived a restart. The check
|
||||
// for `getActiveFor` inside `maybeStartNextFromQueue` guards
|
||||
// against double-firing if the in-memory turn registry happens to
|
||||
@@ -314,6 +376,39 @@ export class AgentHarnessService {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Subscribe to turn lifecycle events for every running agent. Returns
|
||||
* an unsubscribe function. Listeners are best-effort: a throwing
|
||||
* listener does not break the turn.
|
||||
*/
|
||||
onTurnLifecycle(listener: TurnLifecycleListener): () => void {
|
||||
this.turnLifecycleListeners.add(listener)
|
||||
return () => this.turnLifecycleListeners.delete(listener)
|
||||
}
|
||||
|
||||
private emitTurnLifecycle(
|
||||
agent: AgentDefinition,
|
||||
event: TurnLifecycleEvent,
|
||||
): void {
|
||||
if (this.turnLifecycleListeners.size === 0) return
|
||||
const summary = {
|
||||
id: agent.id,
|
||||
adapter: agent.adapter,
|
||||
sessionKey: agent.sessionKey,
|
||||
}
|
||||
for (const listener of this.turnLifecycleListeners) {
|
||||
try {
|
||||
listener(summary, event)
|
||||
} catch (err) {
|
||||
logger.warn('Turn lifecycle listener threw', {
|
||||
agentId: agent.id,
|
||||
eventType: event.type,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Mark `agentId` as actively running a turn. */
|
||||
notifyTurnStarted(agentId: string): void {
|
||||
this.activity.set(agentId, { status: 'working', lastEventAt: Date.now() })
|
||||
@@ -448,8 +543,24 @@ export class AgentHarnessService {
|
||||
}
|
||||
|
||||
async createAgent(input: CreateAgentInput): Promise<AgentDefinition> {
|
||||
if (input.adapter === 'hermes') {
|
||||
// Validate before touching the store so we don't leave an orphan
|
||||
// record on the unhappy path.
|
||||
assertHermesProviderInputValid(input)
|
||||
}
|
||||
|
||||
const agent = await this.agentStore.create(input)
|
||||
|
||||
if (agent.adapter === 'hermes') {
|
||||
try {
|
||||
await this.writeHermesPerAgentProvider(agent.id, input)
|
||||
} catch (err) {
|
||||
await this.agentStore.delete(agent.id).catch(() => {})
|
||||
throw err
|
||||
}
|
||||
return agent
|
||||
}
|
||||
|
||||
if (agent.adapter !== 'openclaw') {
|
||||
return agent
|
||||
}
|
||||
@@ -490,6 +601,35 @@ export class AgentHarnessService {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write Hermes' per-agent config.yaml + .env into the on-host home
|
||||
* dir. Caller must have already run assertHermesProviderInputValid;
|
||||
* any throw here is a real I/O failure and must roll back the agent
|
||||
* record.
|
||||
*/
|
||||
private async writeHermesPerAgentProvider(
|
||||
agentId: string,
|
||||
input: CreateAgentInput,
|
||||
): Promise<void> {
|
||||
// Non-null assertions are safe: assertHermesProviderInputValid ran
|
||||
// first and rejects when any required field is missing.
|
||||
const mapping = getHermesProviderMapping(input.providerType as string)
|
||||
if (!mapping) {
|
||||
throw new HermesProviderConfigInvalidError(
|
||||
`Provider type "${input.providerType}" is not supported by Hermes`,
|
||||
)
|
||||
}
|
||||
await writeHermesPerAgentProvider({
|
||||
browserosDir: this.browserosDir,
|
||||
agentId,
|
||||
providerId: mapping.hermesProvider,
|
||||
envVarName: mapping.envVarName,
|
||||
apiKey: (input.apiKey as string).trim(),
|
||||
modelId: (input.modelId as string).trim(),
|
||||
baseUrl: input.baseUrl?.trim() || mapping.defaultBaseUrl,
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Pulls every gateway-side OpenClaw agent into the harness store as a
|
||||
* harness record (idempotent, safe to call repeatedly). This lets
|
||||
@@ -599,9 +739,112 @@ export class AgentHarnessService {
|
||||
|
||||
async getHistory(agentId: string): Promise<AgentHistoryPage> {
|
||||
const agent = await this.requireAgent(agentId)
|
||||
// OpenClaw agents persist conversation in the gateway, not in the
|
||||
// AcpxRuntime's local session record. Reading the local record
|
||||
// would miss autonomous (cron / hook / channel) turns. Route
|
||||
// through the provisioner so the panel sees the full history.
|
||||
if (
|
||||
agent.adapter === 'openclaw' &&
|
||||
this.openclawProvisioner?.getAgentHistory
|
||||
) {
|
||||
return this.openclawProvisioner.getAgentHistory(agentId)
|
||||
}
|
||||
return this.runtime.getHistory({ agent, sessionId: 'main' })
|
||||
}
|
||||
|
||||
// ── Produced files (Files rail / inline artifact card) ───────────
|
||||
|
||||
/**
|
||||
* Outputs-rail data for one agent. Returns groups of files keyed
|
||||
* by the assistant turn that produced them, newest first. Empty
|
||||
* array when the agent hasn't produced anything yet, or when the
|
||||
* adapter doesn't track outputs (claude / codex — see Phase 2
|
||||
* commit).
|
||||
*/
|
||||
async listAgentFiles(
|
||||
agentId: string,
|
||||
options: { limit?: number } = {},
|
||||
): Promise<ProducedFilesRailGroup[]> {
|
||||
const agent = await this.requireAgent(agentId)
|
||||
const store = this.tryGetProducedFilesStore()
|
||||
if (!store) return []
|
||||
const rows = await store.listByAgent(agent.id, options)
|
||||
return store
|
||||
.groupByTurn(rows)
|
||||
.map(({ turnId, turnPrompt, createdAt, files }) => ({
|
||||
turnId,
|
||||
turnPrompt,
|
||||
createdAt,
|
||||
files: files.map(toProducedFileEntry),
|
||||
}))
|
||||
}
|
||||
|
||||
/**
|
||||
* Inline-card data for one assistant turn. Used by the SSE
|
||||
* `produced_files` event consumer to refresh metadata after the
|
||||
* turn completes; also handy for direct fetches by clients that
|
||||
* missed the live event.
|
||||
*/
|
||||
async listAgentFilesForTurn(
|
||||
agentId: string,
|
||||
turnId: string,
|
||||
): Promise<ProducedFileEntry[]> {
|
||||
await this.requireAgent(agentId)
|
||||
const store = this.tryGetProducedFilesStore()
|
||||
if (!store) return []
|
||||
const rows = await store.listByTurn(turnId)
|
||||
return rows.map(toProducedFileEntry)
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a preview payload for a single file. Returns null when the
|
||||
* file id is unknown OR the on-disk path no longer exists. The
|
||||
* route layer maps null → 404.
|
||||
*/
|
||||
async previewProducedFile(fileId: string): Promise<FilePreview | null> {
|
||||
const store = this.tryGetProducedFilesStore()
|
||||
if (!store) return null
|
||||
const row = await store.findById(fileId)
|
||||
if (!row) return null
|
||||
const agent = await this.agentStore.get(row.agentDefinitionId)
|
||||
if (!agent || agent.adapter !== 'openclaw') return null
|
||||
const workspaceDir = getHostWorkspaceDir(getOpenClawDir(), agent.name)
|
||||
const resolved = await store.resolveFilePath({ fileId, workspaceDir })
|
||||
if (!resolved) return null
|
||||
return buildFilePreview(resolved.absolutePath)
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a file id to an absolute on-disk path + metadata for the
|
||||
* download route to stream. Null when the file id is unknown or
|
||||
* the path escaped the workspace root (containment check happens
|
||||
* inside `producedFilesStore.resolveFilePath`).
|
||||
*/
|
||||
async resolveProducedFileForDownload(fileId: string): Promise<{
|
||||
absolutePath: string
|
||||
fileName: string
|
||||
mimeType: string
|
||||
size: number
|
||||
} | null> {
|
||||
const store = this.tryGetProducedFilesStore()
|
||||
if (!store) return null
|
||||
const row = await store.findById(fileId)
|
||||
if (!row) return null
|
||||
const agent = await this.agentStore.get(row.agentDefinitionId)
|
||||
if (!agent || agent.adapter !== 'openclaw') return null
|
||||
const workspaceDir = getHostWorkspaceDir(getOpenClawDir(), agent.name)
|
||||
const resolved = await store.resolveFilePath({ fileId, workspaceDir })
|
||||
if (!resolved) return null
|
||||
const mimeType = await detectMimeType(resolved.absolutePath)
|
||||
const fileName = basename(row.path)
|
||||
return {
|
||||
absolutePath: resolved.absolutePath,
|
||||
fileName,
|
||||
mimeType,
|
||||
size: row.size,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Kick off a new agent turn that survives the caller's HTTP lifetime.
|
||||
* Events are pushed into a per-turn buffer; the returned `frames`
|
||||
@@ -627,6 +870,7 @@ export class AgentHarnessService {
|
||||
prompt: input.message,
|
||||
})
|
||||
this.notifyTurnStarted(agent.id)
|
||||
this.emitTurnLifecycle(agent, { type: 'turn_started' })
|
||||
|
||||
// Kick off the runtime call in the background. The per-turn
|
||||
// AbortController — NOT the HTTP request signal — is what cancels
|
||||
@@ -728,6 +972,26 @@ export class AgentHarnessService {
|
||||
const turn = this.turnRegistry.get(turnId)
|
||||
if (!turn) return
|
||||
let lastErrorMessage: string | undefined
|
||||
|
||||
// Bracket openclaw turns with a workspace snapshot so any file the
|
||||
// agent produces during the turn is attributable back to it (rail
|
||||
// + inline artifact UX). Adapter-gated for v1 — Claude / Codex
|
||||
// write to the user's host filesystem and don't need this; their
|
||||
// outputs are already visible via the user's own tools.
|
||||
const isOpenclaw = agent.adapter === 'openclaw'
|
||||
const workspaceDir = isOpenclaw ? this.resolveSafeWorkspaceDir(agent) : null
|
||||
const producedFilesStore = workspaceDir
|
||||
? this.tryGetProducedFilesStore()
|
||||
: null
|
||||
const workspaceSnapshot =
|
||||
workspaceDir && producedFilesStore
|
||||
? await this.snapshotWorkspaceForTurn(
|
||||
agent,
|
||||
workspaceDir,
|
||||
producedFilesStore,
|
||||
)
|
||||
: null
|
||||
|
||||
try {
|
||||
const upstream = await this.runtime.send({
|
||||
agent,
|
||||
@@ -746,6 +1010,7 @@ export class AgentHarnessService {
|
||||
if (done) break
|
||||
if (value.type === 'error') lastErrorMessage = value.message
|
||||
this.turnRegistry.pushEvent(turnId, value)
|
||||
this.emitTurnLifecycle(agent, { type: 'turn_event', event: value })
|
||||
}
|
||||
} finally {
|
||||
try {
|
||||
@@ -782,10 +1047,141 @@ export class AgentHarnessService {
|
||||
})
|
||||
}
|
||||
} finally {
|
||||
// Attribute any files the agent produced during this turn. We
|
||||
// run on success, error, AND inside `finally` so an upstream
|
||||
// failure mid-turn that still managed to write files doesn't
|
||||
// lose them. We skip only when the user explicitly cancelled —
|
||||
// in that case the side effects shouldn't be surfaced as
|
||||
// "outputs you asked for."
|
||||
if (
|
||||
workspaceDir &&
|
||||
workspaceSnapshot !== null &&
|
||||
producedFilesStore &&
|
||||
!turn.abortController.signal.aborted
|
||||
) {
|
||||
await this.attributeTurnFiles({
|
||||
producedFilesStore,
|
||||
workspaceDir,
|
||||
before: workspaceSnapshot,
|
||||
agent,
|
||||
turnId,
|
||||
turnPrompt: input.message,
|
||||
})
|
||||
}
|
||||
this.notifyTurnEnded(agent.id, {
|
||||
ok: lastErrorMessage === undefined,
|
||||
error: lastErrorMessage,
|
||||
})
|
||||
this.emitTurnLifecycle(agent, {
|
||||
type: 'turn_ended',
|
||||
error: lastErrorMessage,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the host-side workspace dir for an openclaw agent,
|
||||
* returning `null` when the agent's display name fails the
|
||||
* path-traversal guard. Logs a warning so the safety-disabled
|
||||
* case is observable in production.
|
||||
*/
|
||||
private resolveSafeWorkspaceDir(agent: AgentDefinition): string | null {
|
||||
try {
|
||||
return getHostWorkspaceDir(getOpenClawDir(), agent.name)
|
||||
} catch (err) {
|
||||
logger.warn('Skipping openclaw file attribution: unsafe agent name', {
|
||||
agentId: agent.id,
|
||||
agentName: agent.name,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pre-turn workspace snapshot. Returns `null` on any failure so
|
||||
* the rest of the turn flow continues without file attribution.
|
||||
*/
|
||||
private async snapshotWorkspaceForTurn(
|
||||
agent: AgentDefinition,
|
||||
workspaceDir: string,
|
||||
producedFilesStore: ProducedFilesStore,
|
||||
): Promise<FileSnapshot | null> {
|
||||
try {
|
||||
return await producedFilesStore.snapshotWorkspace(workspaceDir)
|
||||
} catch (err) {
|
||||
logger.warn(
|
||||
'Failed to snapshot openclaw workspace; file attribution disabled for this turn',
|
||||
{
|
||||
agentId: agent.id,
|
||||
workspaceDir,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
},
|
||||
)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Lazily resolve the produced-files store. Returns `null` if the
|
||||
* SQLite handle isn't initialised yet — keeps the harness usable in
|
||||
* tests + during early server boot, where chat turns are unlikely
|
||||
* but allowed.
|
||||
*/
|
||||
private tryGetProducedFilesStore(): ProducedFilesStore | null {
|
||||
if (this.explicitProducedFilesStore) return this.explicitProducedFilesStore
|
||||
if (this.cachedProducedFilesStore) return this.cachedProducedFilesStore
|
||||
try {
|
||||
this.cachedProducedFilesStore = new ProducedFilesStore()
|
||||
return this.cachedProducedFilesStore
|
||||
} catch (err) {
|
||||
logger.warn(
|
||||
'Produced-files store unavailable; turn-level file attribution disabled',
|
||||
{ error: err instanceof Error ? err.message : String(err) },
|
||||
)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Diff the workspace, persist new/modified files, and emit a
|
||||
* `produced_files` event so subscribers can render the inline
|
||||
* artifact card. Tolerant of all errors — a failure here must
|
||||
* never block the rest of the turn-end bookkeeping.
|
||||
*/
|
||||
private async attributeTurnFiles(input: {
|
||||
producedFilesStore: ProducedFilesStore
|
||||
workspaceDir: string
|
||||
before: FileSnapshot
|
||||
agent: AgentDefinition
|
||||
turnId: string
|
||||
turnPrompt: string
|
||||
}): Promise<void> {
|
||||
try {
|
||||
const rows = await input.producedFilesStore.finalizeTurn({
|
||||
agentDefinitionId: input.agent.id,
|
||||
sessionKey: input.agent.sessionKey,
|
||||
turnId: input.turnId,
|
||||
turnPrompt: input.turnPrompt,
|
||||
workspaceDir: input.workspaceDir,
|
||||
before: input.before,
|
||||
})
|
||||
if (rows.length === 0) return
|
||||
this.turnRegistry.pushEvent(input.turnId, {
|
||||
type: 'produced_files',
|
||||
files: rows.map((row) => ({
|
||||
id: row.id,
|
||||
path: row.path,
|
||||
size: row.size,
|
||||
mtimeMs: row.mtimeMs,
|
||||
})),
|
||||
})
|
||||
} catch (err) {
|
||||
logger.warn('Failed to attribute produced files for turn', {
|
||||
agentId: input.agent.id,
|
||||
turnId: input.turnId,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -846,6 +1242,48 @@ export class InvalidAgentUpdateError extends Error {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Thrown when a Hermes adapter agent is created without a complete
|
||||
* provider config (provider type, API key, model id; base URL when the
|
||||
* provider mapping requires it). Surfaces as a 400 in the route layer.
|
||||
*/
|
||||
export class HermesProviderConfigInvalidError extends Error {
|
||||
constructor(message: string) {
|
||||
super(message)
|
||||
this.name = 'HermesProviderConfigInvalidError'
|
||||
}
|
||||
}
|
||||
|
||||
function assertHermesProviderInputValid(input: CreateAgentInput): void {
|
||||
const providerType = input.providerType?.trim()
|
||||
if (!providerType) {
|
||||
throw new HermesProviderConfigInvalidError(
|
||||
'Hermes agent requires providerType (pick a provider configured in BrowserOS AI Settings)',
|
||||
)
|
||||
}
|
||||
const mapping = getHermesProviderMapping(providerType)
|
||||
if (!mapping) {
|
||||
throw new HermesProviderConfigInvalidError(
|
||||
`Provider type "${providerType}" is not supported by Hermes`,
|
||||
)
|
||||
}
|
||||
if (!input.apiKey?.trim()) {
|
||||
throw new HermesProviderConfigInvalidError(
|
||||
'Hermes agent requires apiKey from the selected provider',
|
||||
)
|
||||
}
|
||||
if (!input.modelId?.trim()) {
|
||||
throw new HermesProviderConfigInvalidError(
|
||||
'Hermes agent requires modelId from the selected provider',
|
||||
)
|
||||
}
|
||||
if (mapping.requiresBaseUrl && !input.baseUrl?.trim()) {
|
||||
throw new HermesProviderConfigInvalidError(
|
||||
`Provider type "${providerType}" requires baseUrl`,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Thrown when `startTurn` is called for an agent that already has an
|
||||
* in-flight turn. The route layer maps this to 409 + the existing
|
||||
@@ -860,3 +1298,38 @@ export class TurnAlreadyActiveError extends Error {
|
||||
this.name = 'TurnAlreadyActiveError'
|
||||
}
|
||||
}
|
||||
|
||||
// ── Files API DTO ────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Wire shape for one produced-file entry returned by the rail and
|
||||
* inline-card endpoints. Trimmed from the on-disk row — clients
|
||||
* never see `agentDefinitionId` or `sessionKey`.
|
||||
*/
|
||||
export interface ProducedFileEntry {
|
||||
id: string
|
||||
path: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
createdAt: number
|
||||
detectedBy: 'diff' | 'tool'
|
||||
}
|
||||
|
||||
export interface ProducedFilesRailGroup {
|
||||
turnId: string
|
||||
/** First non-blank line of the user prompt that initiated this turn. */
|
||||
turnPrompt: string
|
||||
createdAt: number
|
||||
files: ProducedFileEntry[]
|
||||
}
|
||||
|
||||
function toProducedFileEntry(row: ProducedFileRow): ProducedFileEntry {
|
||||
return {
|
||||
id: row.id,
|
||||
path: row.path,
|
||||
size: row.size,
|
||||
mtimeMs: row.mtimeMs,
|
||||
createdAt: row.createdAt,
|
||||
detectedBy: row.detectedBy,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,99 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Host-side path helpers for the Hermes container.
|
||||
*
|
||||
* Hermes per-agent state lives under the BrowserOS-managed VM state
|
||||
* directory (so it's reachable inside the Lima VM via the existing
|
||||
* vm/ → /mnt/browseros/vm bind mount). The Hermes container then bind-
|
||||
* mounts the guest-side path (/mnt/browseros/vm/hermes/harness) into
|
||||
* /data/agents/harness, so `HERMES_HOME` ends up pointing at a path
|
||||
* the container can actually open.
|
||||
*/
|
||||
|
||||
import { mkdir, writeFile } from 'node:fs/promises'
|
||||
import { join } from 'node:path'
|
||||
import { getVmStateDir } from '../../../lib/browseros-dir'
|
||||
|
||||
/** Top-level Hermes state directory: `<browserosDir>/vm/hermes`. */
|
||||
export function getHermesHostStateDir(browserosDir?: string): string {
|
||||
return join(
|
||||
browserosDir ? join(browserosDir, 'vm') : getVmStateDir(),
|
||||
'hermes',
|
||||
)
|
||||
}
|
||||
|
||||
/** Per-agent harness root: `<browserosDir>/vm/hermes/harness`. */
|
||||
export function getHermesHarnessHostDir(browserosDir?: string): string {
|
||||
return join(getHermesHostStateDir(browserosDir), 'harness')
|
||||
}
|
||||
|
||||
/**
|
||||
* Per-agent home directory on the host. The Hermes container reads
|
||||
* `config.yaml` + `.env` from here via the harness bind mount; both
|
||||
* files are written at agent-create time by AgentHarnessService and
|
||||
* stay constant across turns.
|
||||
*/
|
||||
export function getHermesAgentHomeHostDir(input: {
|
||||
browserosDir?: string
|
||||
agentId: string
|
||||
}): string {
|
||||
return join(
|
||||
getHermesHarnessHostDir(input.browserosDir),
|
||||
input.agentId,
|
||||
'home',
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a Hermes per-agent provider config into the on-host home dir.
|
||||
* The dir lives under <browserosDir>/vm/hermes/harness/<agentId>/home/
|
||||
* which is bind-mounted into the container at /data/agents/harness/<id>/home/.
|
||||
*
|
||||
* Idempotent: writes always overwrite (last-write-wins). The provider
|
||||
* id, env var name, and credentials must be supplied by the caller —
|
||||
* Hermes agents always carry their own config; there is no
|
||||
* `~/.hermes/` fallback.
|
||||
*/
|
||||
export async function writeHermesPerAgentProvider(input: {
|
||||
browserosDir?: string
|
||||
agentId: string
|
||||
providerId: string
|
||||
envVarName: string
|
||||
apiKey: string
|
||||
modelId: string
|
||||
baseUrl?: string
|
||||
}): Promise<void> {
|
||||
const home = getHermesAgentHomeHostDir({
|
||||
browserosDir: input.browserosDir,
|
||||
agentId: input.agentId,
|
||||
})
|
||||
await mkdir(home, { recursive: true })
|
||||
|
||||
// Hermes' `provider: custom` requires a `base_url` — without one the
|
||||
// model loader rejects with `unknown provider 'custom'`. Callers that
|
||||
// use a named Hermes provider (e.g. anthropic, openrouter) can omit
|
||||
// baseUrl and Hermes resolves the URL itself.
|
||||
if (input.providerId === 'custom' && !input.baseUrl) {
|
||||
throw new Error(
|
||||
'Hermes provider "custom" requires base_url; set HermesProviderMapping.defaultBaseUrl or supply input.baseUrl',
|
||||
)
|
||||
}
|
||||
const yamlLines = [
|
||||
'model:',
|
||||
` default: ${JSON.stringify(input.modelId)}`,
|
||||
` provider: ${JSON.stringify(input.providerId)}`,
|
||||
]
|
||||
if (input.baseUrl) {
|
||||
yamlLines.push(` base_url: ${JSON.stringify(input.baseUrl)}`)
|
||||
}
|
||||
yamlLines.push('')
|
||||
await writeFile(join(home, 'config.yaml'), yamlLines.join('\n'), {
|
||||
mode: 0o600,
|
||||
})
|
||||
|
||||
const envLines: string[] = [`${input.envVarName}=${input.apiKey}`, '']
|
||||
await writeFile(join(home, '.env'), envLines.join('\n'), { mode: 0o600 })
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Translation table from BrowserOS LLM provider types (the values that
|
||||
* live in `LlmProviderConfig.type` on the extension side) to Hermes
|
||||
* runtime configuration. Hermes itself only knows a small fixed set of
|
||||
* provider keys; BrowserOS exposes a richer registry, so we explicitly
|
||||
* gate which BrowserOS provider types Hermes can consume.
|
||||
*
|
||||
* The set of allowed BrowserOS provider types is shared with the
|
||||
* frontend via `HERMES_SUPPORTED_BROWSEROS_PROVIDER_TYPES`. Adding a
|
||||
* new type there without an entry here will fail the type check below
|
||||
* (every supported type must have a mapping).
|
||||
*
|
||||
* Anything not listed is rejected at agent-create time with a clear
|
||||
* error — there is no `~/.hermes/` fallback.
|
||||
*/
|
||||
import {
|
||||
HERMES_SUPPORTED_BROWSEROS_PROVIDER_TYPES,
|
||||
type HermesSupportedBrowserosProviderType,
|
||||
} from '@browseros/shared/constants/hermes'
|
||||
|
||||
export interface HermesProviderMapping {
|
||||
/** Hermes' own provider key written into `model.provider` in config.yaml. */
|
||||
hermesProvider: string
|
||||
/** Env var Hermes reads the API key from (written into per-agent `.env`). */
|
||||
envVarName: string
|
||||
/** True when the harness must require an explicit baseUrl from input. */
|
||||
requiresBaseUrl: boolean
|
||||
/**
|
||||
* Used when `hermesProvider === 'custom'` and the input has no
|
||||
* baseUrl — Hermes treats `provider: custom` as "call this URL
|
||||
* directly", so `base_url` must always end up in config.yaml.
|
||||
*/
|
||||
defaultBaseUrl?: string
|
||||
}
|
||||
|
||||
const HERMES_PROVIDER_MAP: Record<
|
||||
HermesSupportedBrowserosProviderType,
|
||||
HermesProviderMapping
|
||||
> = {
|
||||
anthropic: {
|
||||
hermesProvider: 'anthropic',
|
||||
envVarName: 'ANTHROPIC_API_KEY',
|
||||
requiresBaseUrl: false,
|
||||
},
|
||||
// Hermes (v2026.4.x) has no provider key named `"openai"`. Per the
|
||||
// upstream docs, `provider: custom` + `base_url` is the canonical
|
||||
// shape for any OpenAI-compatible endpoint with an API key — Hermes
|
||||
// skips provider lookup and calls the URL directly. Used for both
|
||||
// pure OpenAI (default base URL) and openai-compatible (caller URL).
|
||||
openai: {
|
||||
hermesProvider: 'custom',
|
||||
envVarName: 'OPENAI_API_KEY',
|
||||
requiresBaseUrl: false,
|
||||
defaultBaseUrl: 'https://api.openai.com/v1',
|
||||
},
|
||||
openrouter: {
|
||||
hermesProvider: 'openrouter',
|
||||
envVarName: 'OPENROUTER_API_KEY',
|
||||
requiresBaseUrl: false,
|
||||
},
|
||||
'openai-compatible': {
|
||||
hermesProvider: 'custom',
|
||||
envVarName: 'OPENAI_API_KEY',
|
||||
requiresBaseUrl: true,
|
||||
},
|
||||
}
|
||||
|
||||
export function isHermesSupportedProviderType(
|
||||
providerType: string,
|
||||
): providerType is HermesSupportedBrowserosProviderType {
|
||||
return (
|
||||
HERMES_SUPPORTED_BROWSEROS_PROVIDER_TYPES as readonly string[]
|
||||
).includes(providerType)
|
||||
}
|
||||
|
||||
export function getHermesProviderMapping(
|
||||
providerType: string,
|
||||
): HermesProviderMapping | undefined {
|
||||
if (!isHermesSupportedProviderType(providerType)) return undefined
|
||||
return HERMES_PROVIDER_MAP[providerType]
|
||||
}
|
||||
@@ -52,7 +52,6 @@ export type GatewayContainerSpec = {
|
||||
hostPort: number
|
||||
hostHome: string
|
||||
envFilePath: string
|
||||
gatewayToken?: string
|
||||
timezone: string
|
||||
}
|
||||
|
||||
@@ -414,9 +413,7 @@ export class ContainerRuntime {
|
||||
TZ: input.timezone,
|
||||
PATH: GATEWAY_PATH,
|
||||
NPM_CONFIG_PREFIX: GATEWAY_NPM_PREFIX,
|
||||
...(input.gatewayToken
|
||||
? { OPENCLAW_GATEWAY_TOKEN: input.gatewayToken }
|
||||
: {}),
|
||||
OPENCLAW_GATEWAY_PRIVATE_INGRESS_NO_AUTH: '1',
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,335 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Helpers used by the `/claw/files/:id/preview` and
|
||||
* `/claw/files/:id/download` routes:
|
||||
*
|
||||
* - MIME-type detection (extension first, magic-byte fallback for
|
||||
* ambiguous extensions).
|
||||
* - Bounded text-snippet reader for inline previews.
|
||||
* - Image bytes reader for the rail's thumbnails.
|
||||
*
|
||||
* No streaming code lives here — the download route streams via Hono
|
||||
* directly. This module only handles the small in-memory reads the
|
||||
* preview UX needs.
|
||||
*/
|
||||
|
||||
import { open, stat } from 'node:fs/promises'
|
||||
import { extname } from 'node:path'
|
||||
|
||||
/** Hard cap on the inline text snippet returned by the preview API. */
|
||||
export const TEXT_PREVIEW_MAX_BYTES = 1 * 1024 * 1024 // 1 MB
|
||||
|
||||
/** Hard cap on inline image bytes returned as a base64 data URL. */
|
||||
export const IMAGE_PREVIEW_MAX_BYTES = 4 * 1024 * 1024 // 4 MB
|
||||
|
||||
const MIME_BY_EXTENSION: Record<string, string> = {
|
||||
'.txt': 'text/plain',
|
||||
'.md': 'text/markdown',
|
||||
'.markdown': 'text/markdown',
|
||||
'.json': 'application/json',
|
||||
'.jsonl': 'application/x-ndjson',
|
||||
'.csv': 'text/csv',
|
||||
'.tsv': 'text/tab-separated-values',
|
||||
'.xml': 'application/xml',
|
||||
'.yaml': 'application/yaml',
|
||||
'.yml': 'application/yaml',
|
||||
'.toml': 'application/toml',
|
||||
'.ini': 'text/plain',
|
||||
'.log': 'text/plain',
|
||||
'.html': 'text/html',
|
||||
'.htm': 'text/html',
|
||||
'.css': 'text/css',
|
||||
'.js': 'text/javascript',
|
||||
'.mjs': 'text/javascript',
|
||||
'.cjs': 'text/javascript',
|
||||
'.ts': 'text/typescript',
|
||||
'.tsx': 'text/typescript',
|
||||
'.jsx': 'text/javascript',
|
||||
'.py': 'text/x-python',
|
||||
'.rb': 'text/x-ruby',
|
||||
'.go': 'text/x-go',
|
||||
'.rs': 'text/x-rust',
|
||||
'.java': 'text/x-java',
|
||||
'.kt': 'text/x-kotlin',
|
||||
'.swift': 'text/x-swift',
|
||||
'.c': 'text/x-c',
|
||||
'.h': 'text/x-c',
|
||||
'.cpp': 'text/x-c++',
|
||||
'.hpp': 'text/x-c++',
|
||||
'.sh': 'application/x-sh',
|
||||
'.zsh': 'application/x-sh',
|
||||
'.bash': 'application/x-sh',
|
||||
'.sql': 'application/sql',
|
||||
'.png': 'image/png',
|
||||
'.jpg': 'image/jpeg',
|
||||
'.jpeg': 'image/jpeg',
|
||||
'.gif': 'image/gif',
|
||||
'.webp': 'image/webp',
|
||||
'.bmp': 'image/bmp',
|
||||
'.svg': 'image/svg+xml',
|
||||
'.ico': 'image/x-icon',
|
||||
'.heic': 'image/heic',
|
||||
'.heif': 'image/heif',
|
||||
'.pdf': 'application/pdf',
|
||||
'.zip': 'application/zip',
|
||||
'.tar': 'application/x-tar',
|
||||
'.gz': 'application/gzip',
|
||||
'.tgz': 'application/gzip',
|
||||
'.bz2': 'application/x-bzip2',
|
||||
'.7z': 'application/x-7z-compressed',
|
||||
'.mp3': 'audio/mpeg',
|
||||
'.wav': 'audio/wav',
|
||||
'.ogg': 'audio/ogg',
|
||||
'.mp4': 'video/mp4',
|
||||
'.webm': 'video/webm',
|
||||
'.mov': 'video/quicktime',
|
||||
'.docx':
|
||||
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
||||
'.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
||||
'.pptx':
|
||||
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
|
||||
}
|
||||
|
||||
/**
|
||||
* Magic-byte signatures for cases where the extension is missing or
|
||||
* misleading. Only covers the formats whose preview path differs from
|
||||
* the default binary path (text vs image vs PDF vs other).
|
||||
*/
|
||||
const MAGIC_BYTE_SIGNATURES: Array<{
|
||||
mime: string
|
||||
matches: (head: Uint8Array) => boolean
|
||||
}> = [
|
||||
{
|
||||
mime: 'image/png',
|
||||
matches: (h) =>
|
||||
h[0] === 0x89 &&
|
||||
h[1] === 0x50 &&
|
||||
h[2] === 0x4e &&
|
||||
h[3] === 0x47 &&
|
||||
h[4] === 0x0d &&
|
||||
h[5] === 0x0a,
|
||||
},
|
||||
{
|
||||
mime: 'image/jpeg',
|
||||
matches: (h) => h[0] === 0xff && h[1] === 0xd8 && h[2] === 0xff,
|
||||
},
|
||||
{
|
||||
mime: 'image/gif',
|
||||
matches: (h) =>
|
||||
h[0] === 0x47 && h[1] === 0x49 && h[2] === 0x46 && h[3] === 0x38,
|
||||
},
|
||||
{
|
||||
mime: 'image/webp',
|
||||
matches: (h) =>
|
||||
h[0] === 0x52 &&
|
||||
h[1] === 0x49 &&
|
||||
h[2] === 0x46 &&
|
||||
h[3] === 0x46 &&
|
||||
h[8] === 0x57 &&
|
||||
h[9] === 0x45 &&
|
||||
h[10] === 0x42 &&
|
||||
h[11] === 0x50,
|
||||
},
|
||||
{
|
||||
mime: 'application/pdf',
|
||||
matches: (h) =>
|
||||
h[0] === 0x25 && h[1] === 0x50 && h[2] === 0x44 && h[3] === 0x46,
|
||||
},
|
||||
]
|
||||
|
||||
const MAGIC_BYTE_PROBE_LEN = 12
|
||||
|
||||
/**
|
||||
* Best-effort MIME detection. Tries the extension map first, then
|
||||
* falls back to magic-byte sniffing for the formats whose preview
|
||||
* path differs from the default binary handling. Returns
|
||||
* `application/octet-stream` when we can't tell.
|
||||
*/
|
||||
export async function detectMimeType(absolutePath: string): Promise<string> {
|
||||
const fromExtension = MIME_BY_EXTENSION[extname(absolutePath).toLowerCase()]
|
||||
if (fromExtension) return fromExtension
|
||||
|
||||
let head: Uint8Array
|
||||
try {
|
||||
const handle = await open(absolutePath, 'r')
|
||||
try {
|
||||
const buffer = new Uint8Array(MAGIC_BYTE_PROBE_LEN)
|
||||
const { bytesRead } = await handle.read(
|
||||
buffer,
|
||||
0,
|
||||
MAGIC_BYTE_PROBE_LEN,
|
||||
0,
|
||||
)
|
||||
head = buffer.subarray(0, bytesRead)
|
||||
} finally {
|
||||
await handle.close()
|
||||
}
|
||||
} catch {
|
||||
return 'application/octet-stream'
|
||||
}
|
||||
|
||||
for (const sig of MAGIC_BYTE_SIGNATURES) {
|
||||
if (sig.matches(head)) return sig.mime
|
||||
}
|
||||
|
||||
if (looksLikeText(head)) return 'text/plain'
|
||||
return 'application/octet-stream'
|
||||
}
|
||||
|
||||
export type PreviewKind = 'text' | 'image' | 'pdf' | 'binary' | 'missing'
|
||||
|
||||
export interface BasePreview {
|
||||
kind: PreviewKind
|
||||
mimeType: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
export interface TextPreview extends BasePreview {
|
||||
kind: 'text'
|
||||
snippet: string
|
||||
/** True when the on-disk file is larger than `TEXT_PREVIEW_MAX_BYTES`. */
|
||||
truncated: boolean
|
||||
}
|
||||
|
||||
export interface ImagePreview extends BasePreview {
|
||||
kind: 'image'
|
||||
/** Base64 data URL (incl. `data:` prefix) suitable for `<img src>`. */
|
||||
dataUrl: string
|
||||
}
|
||||
|
||||
export interface PdfPreview extends BasePreview {
|
||||
kind: 'pdf'
|
||||
}
|
||||
|
||||
export interface BinaryPreview extends BasePreview {
|
||||
kind: 'binary'
|
||||
}
|
||||
|
||||
export interface MissingPreview {
|
||||
kind: 'missing'
|
||||
}
|
||||
|
||||
export type FilePreview =
|
||||
| TextPreview
|
||||
| ImagePreview
|
||||
| PdfPreview
|
||||
| BinaryPreview
|
||||
| MissingPreview
|
||||
|
||||
/**
|
||||
* Build a preview payload for the inline-card / rail preview Sheet.
|
||||
* Reads at most `TEXT_PREVIEW_MAX_BYTES` (text) or
|
||||
* `IMAGE_PREVIEW_MAX_BYTES` (image) into memory; everything else
|
||||
* returns a metadata-only `binary` preview and the UI offers a
|
||||
* download instead.
|
||||
*/
|
||||
export async function buildFilePreview(
|
||||
absolutePath: string,
|
||||
): Promise<FilePreview> {
|
||||
let stats: Awaited<ReturnType<typeof stat>>
|
||||
try {
|
||||
stats = await stat(absolutePath)
|
||||
} catch {
|
||||
return { kind: 'missing' }
|
||||
}
|
||||
|
||||
const mimeType = await detectMimeType(absolutePath)
|
||||
const base = {
|
||||
mimeType,
|
||||
size: stats.size,
|
||||
mtimeMs: stats.mtimeMs,
|
||||
} as const
|
||||
|
||||
if (mimeType === 'application/pdf') {
|
||||
return { kind: 'pdf', ...base }
|
||||
}
|
||||
|
||||
if (isTextMime(mimeType)) {
|
||||
return readTextPreview(absolutePath, base)
|
||||
}
|
||||
|
||||
if (isImageMime(mimeType)) {
|
||||
return readImagePreview(absolutePath, base)
|
||||
}
|
||||
|
||||
return { kind: 'binary', ...base }
|
||||
}
|
||||
|
||||
async function readTextPreview(
|
||||
absolutePath: string,
|
||||
base: { mimeType: string; size: number; mtimeMs: number },
|
||||
): Promise<TextPreview> {
|
||||
const handle = await open(absolutePath, 'r')
|
||||
try {
|
||||
const length = Math.min(base.size, TEXT_PREVIEW_MAX_BYTES)
|
||||
const buffer = new Uint8Array(length)
|
||||
const { bytesRead } = await handle.read(buffer, 0, length, 0)
|
||||
const snippet = new TextDecoder('utf-8', { fatal: false }).decode(
|
||||
buffer.subarray(0, bytesRead),
|
||||
)
|
||||
return {
|
||||
kind: 'text',
|
||||
...base,
|
||||
snippet,
|
||||
truncated: base.size > TEXT_PREVIEW_MAX_BYTES,
|
||||
}
|
||||
} finally {
|
||||
await handle.close()
|
||||
}
|
||||
}
|
||||
|
||||
async function readImagePreview(
|
||||
absolutePath: string,
|
||||
base: { mimeType: string; size: number; mtimeMs: number },
|
||||
): Promise<ImagePreview | BinaryPreview> {
|
||||
if (base.size > IMAGE_PREVIEW_MAX_BYTES) {
|
||||
// Too big to inline — let the user download.
|
||||
return { kind: 'binary', ...base }
|
||||
}
|
||||
const handle = await open(absolutePath, 'r')
|
||||
try {
|
||||
const buffer = new Uint8Array(base.size)
|
||||
await handle.read(buffer, 0, base.size, 0)
|
||||
const dataUrl = `data:${base.mimeType};base64,${Buffer.from(buffer).toString('base64')}`
|
||||
return { kind: 'image', ...base, dataUrl }
|
||||
} finally {
|
||||
await handle.close()
|
||||
}
|
||||
}
|
||||
|
||||
function isTextMime(mime: string): boolean {
|
||||
if (mime.startsWith('text/')) return true
|
||||
return (
|
||||
mime === 'application/json' ||
|
||||
mime === 'application/x-ndjson' ||
|
||||
mime === 'application/xml' ||
|
||||
mime === 'application/yaml' ||
|
||||
mime === 'application/toml' ||
|
||||
mime === 'application/sql' ||
|
||||
mime === 'application/x-sh'
|
||||
)
|
||||
}
|
||||
|
||||
function isImageMime(mime: string): boolean {
|
||||
return mime.startsWith('image/') && mime !== 'image/svg+xml'
|
||||
// SVG is text — let it go through the text path so users can read
|
||||
// markup, not view a base64 blob.
|
||||
}
|
||||
|
||||
/**
|
||||
* Crude text-vs-binary heuristic for files whose extension and magic
|
||||
* bytes both fail to identify them. Counts NUL bytes — text files
|
||||
* essentially never contain them; binaries usually do.
|
||||
*/
|
||||
function looksLikeText(head: Uint8Array): boolean {
|
||||
if (head.length === 0) return true
|
||||
let nulCount = 0
|
||||
for (const byte of head) {
|
||||
if (byte === 0) nulCount += 1
|
||||
}
|
||||
return nulCount === 0
|
||||
}
|
||||
@@ -0,0 +1,311 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Converts an aggregated OpenClaw session history (rich content blocks
|
||||
* across the agent's main + sub-sessions) into the flat AgentHistoryPage
|
||||
* shape the chat panel consumes.
|
||||
*
|
||||
* Input: OpenClawSessionHistory.messages — each message has `content`
|
||||
* that is either a string OR an array of typed blocks
|
||||
* ({type: 'text'|'thinking'|'toolCall'|'toolResult'}). The HTTP endpoint
|
||||
* returns the array form even though the type definition says string.
|
||||
*
|
||||
* Output: AgentHistoryEntry[] — flat text per entry, separate `reasoning`
|
||||
* and `toolCalls` fields the UI renders as collapsible sections.
|
||||
*
|
||||
* Tool result pairing: `toolCall` blocks emit on assistant messages;
|
||||
* the matching `toolResult` arrives in a later message (typically with
|
||||
* role 'tool' or 'toolResult'). We pair them by `toolCallId` so the
|
||||
* resulting AgentHistoryToolCall has both input and output.
|
||||
*/
|
||||
|
||||
import { unwrapBrowserosAcpUserMessage } from '../../../lib/agents/acpx-runtime'
|
||||
import type {
|
||||
AgentHistoryEntry,
|
||||
AgentHistoryToolCall,
|
||||
} from '../../../lib/agents/agent-types'
|
||||
import type { AgentHistoryPage } from '../../../lib/agents/types'
|
||||
import type {
|
||||
OpenClawSessionHistory,
|
||||
OpenClawSessionHistoryMessage,
|
||||
} from './openclaw-http-client'
|
||||
|
||||
const CRON_PROMPT_PREFIX_PATTERN =
|
||||
/^\[cron:[0-9a-f-]+ ([^\]]+)\]\s*([\s\S]*?)\n*Current time:[^\n]*(?:\n[\s\S]*)?$/
|
||||
const CRON_DELIVERY_TRAILER =
|
||||
/\n*Use the message tool if you need to notify the user directly[\s\S]*$/
|
||||
const QUEUED_MARKER_LINE =
|
||||
/^\[Queued user message that arrived while the previous turn was still active\]\s*$/m
|
||||
const SUBAGENT_CONTEXT_PREFIX = /^\[Subagent Context\][\s\S]*$/
|
||||
// Emitted by OpenClaw's acp-cli ahead of the BrowserOS envelope. Three
|
||||
// prefix shapes (any combination, in this stack order):
|
||||
//
|
||||
// 1. `[media attached: <internal-path> (<mime>)]` ← per attachment
|
||||
// 2. `[<weekday> <YYYY-MM-DD HH:MM> <TZ>]` ← injectTimestamp
|
||||
// 3. `[Working directory: <path>]` ← acp-cli prefixCwd
|
||||
//
|
||||
// Stacks #1 may appear multiple times (one per image). Stack #2 and #3
|
||||
// can render on the same line separated by a space. Each known prefix is
|
||||
// anchored on its content shape (not just `[…]`) to avoid clobbering
|
||||
// user-typed lines that happen to start with a bracket.
|
||||
const OPENCLAW_MEDIA_PREFIX_LINE = /^\[media attached:[^\]\n]*\]\n/
|
||||
const OPENCLAW_TIMESTAMP_PREFIX =
|
||||
/^\[(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun) \d{4}-\d{2}-\d{2} \d{2}:\d{2}[^\]\n]*\][ \t]*/
|
||||
const OPENCLAW_WORKDIR_PREFIX = /^\[Working directory: [^\]\n]*\]\n+/
|
||||
|
||||
function stripOpenClawAcpCliEnvelope(value: string): string {
|
||||
let s = value
|
||||
while (OPENCLAW_MEDIA_PREFIX_LINE.test(s)) {
|
||||
s = s.replace(OPENCLAW_MEDIA_PREFIX_LINE, '')
|
||||
}
|
||||
s = s.replace(OPENCLAW_TIMESTAMP_PREFIX, '')
|
||||
s = s.replace(OPENCLAW_WORKDIR_PREFIX, '')
|
||||
return s
|
||||
}
|
||||
|
||||
/**
|
||||
* Strip OpenClaw + BrowserOS scaffolding from a "user" message before
|
||||
* showing it in the chat panel.
|
||||
*
|
||||
* BrowserOS-side envelope (`<role>…</role>\n\n<user_request>…</user_request>`)
|
||||
* is delegated to `unwrapBrowserosAcpUserMessage`, which performs an
|
||||
* exact-string match against the same constants `buildBrowserosAcpPrompt`
|
||||
* uses to wrap. Matcher and wrapper live in the same repo, so the two
|
||||
* always travel together.
|
||||
*
|
||||
* OpenClaw's acp-cli prepends a `[Working directory: <path>]\n\n` line
|
||||
* before the BrowserOS envelope (see /app/dist/acp-cli-*.js, line 1361).
|
||||
* We strip that single line up-front so the `^<role>` anchor in
|
||||
* `unwrapBrowserosAcpUserMessage` matches.
|
||||
*
|
||||
* OpenClaw-injected scaffolding (cron prefix, queued-marker, subagent
|
||||
* context) is still pattern-matched here. Removing those requires either
|
||||
* an OpenClaw schema change exposing the structured trigger payload, or a
|
||||
* BrowserOS-side side-channel (cache cron payloads on `cron.add` and look
|
||||
* up by jobId). Tracked as the next cleanup; until then this is best-
|
||||
* effort with text-level patterns.
|
||||
*/
|
||||
export function cleanHistoryUserText(raw: string): string {
|
||||
if (!raw) return raw
|
||||
// Queued-marker case: this is structurally a multi-message blob, so
|
||||
// split first and recurse into each chunk. We keep the join character
|
||||
// narrow (single newline) so e.g. five cron payloads render as five
|
||||
// visually-separate lines rather than one wall of text.
|
||||
if (QUEUED_MARKER_LINE.test(raw)) {
|
||||
const chunks = raw
|
||||
.split(
|
||||
/^\[Queued user message that arrived while the previous turn was still active\]\s*$/m,
|
||||
)
|
||||
.map((chunk) => cleanSingleUserMessage(chunk))
|
||||
.filter((chunk) => chunk.length > 0)
|
||||
return chunks.join('\n')
|
||||
}
|
||||
return cleanSingleUserMessage(raw)
|
||||
}
|
||||
|
||||
function cleanSingleUserMessage(raw: string): string {
|
||||
const trimmed = raw.trim()
|
||||
if (!trimmed) return ''
|
||||
// Subagent context seed: pure scaffolding, drop entirely. The real
|
||||
// task lives in the subagent's system prompt; the user-message body
|
||||
// is just framing the model never produced.
|
||||
if (SUBAGENT_CONTEXT_PREFIX.test(trimmed)) {
|
||||
return ''
|
||||
}
|
||||
const cronMatch = CRON_PROMPT_PREFIX_PATTERN.exec(trimmed)
|
||||
if (cronMatch) {
|
||||
const payload = cronMatch[2] ?? ''
|
||||
return payload.replace(CRON_DELIVERY_TRAILER, '').trim()
|
||||
}
|
||||
// Strip OpenClaw's acp-cli envelope (media-attached lines + timestamp
|
||||
// + workdir) before delegating, so the BrowserOS unwrap helper's
|
||||
// `^<role>` anchor matches.
|
||||
const withoutEnvelope = stripOpenClawAcpCliEnvelope(trimmed)
|
||||
return unwrapBrowserosAcpUserMessage(withoutEnvelope).trim()
|
||||
}
|
||||
|
||||
type RichBlock =
|
||||
| { type: 'text'; text?: string }
|
||||
| { type: 'thinking'; thinking?: string; text?: string }
|
||||
| {
|
||||
type: 'toolCall'
|
||||
id?: string
|
||||
toolCallId?: string
|
||||
name?: string
|
||||
arguments?: unknown
|
||||
}
|
||||
| {
|
||||
type: 'toolResult'
|
||||
toolCallId?: string
|
||||
content?: unknown
|
||||
isError?: boolean
|
||||
}
|
||||
| { type: string; [key: string]: unknown }
|
||||
|
||||
// We hold the AgentHistoryToolCall reference itself in `pending` so a
|
||||
// later `toolResult` block mutates the same object that was already
|
||||
// pushed onto the assistant entry's `toolCalls` array.
|
||||
type PendingToolCall = AgentHistoryToolCall
|
||||
|
||||
export function convertOpenClawHistoryToAgentHistory(
|
||||
agentId: string,
|
||||
raw: OpenClawSessionHistory,
|
||||
): AgentHistoryPage {
|
||||
const items: AgentHistoryEntry[] = []
|
||||
// Resolved tool calls keyed by toolCallId — used to attach `output`
|
||||
// back to the assistant entry that issued the call once the tool
|
||||
// result arrives in a subsequent message.
|
||||
const pendingByToolCallId = new Map<string, PendingToolCall>()
|
||||
|
||||
let entryCounter = 0
|
||||
const nextId = () => `${agentId}:hist:${entryCounter++}`
|
||||
|
||||
for (const message of raw.messages) {
|
||||
const blocks = normalizeBlocks(message)
|
||||
const role = normalizeRole(message.role)
|
||||
|
||||
if (!role) {
|
||||
// 'system' / 'tool' messages aren't shown as their own chat entries;
|
||||
// tool results get folded into the assistant entry they complete.
|
||||
if (message.role === 'tool') {
|
||||
applyToolResults(blocks, pendingByToolCallId)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
const rawText = collectText(blocks).trim()
|
||||
const text = role === 'user' ? cleanHistoryUserText(rawText) : rawText
|
||||
const reasoningText = collectThinking(blocks).trim()
|
||||
const toolCallEntries = collectToolCalls(blocks, pendingByToolCallId)
|
||||
|
||||
// Skip empty entries. Two cases:
|
||||
// - User: cleaner returned empty after stripping scaffolding (e.g.
|
||||
// dropped Subagent Context message). No bubble to render.
|
||||
// - Assistant: model returned only thinking blocks (common with
|
||||
// MiniMax `thinking: minimal` for trivial prompts) and no text
|
||||
// or tools. The empty bubble + dangling reasoning collapsible
|
||||
// reads as broken UI; cleaner to drop the turn entirely.
|
||||
if (!text && toolCallEntries.length === 0) continue
|
||||
|
||||
const entry: AgentHistoryEntry = {
|
||||
id: message.messageId ?? nextId(),
|
||||
agentId,
|
||||
sessionId: 'main',
|
||||
role,
|
||||
text,
|
||||
createdAt: message.timestamp ?? 0,
|
||||
}
|
||||
if (reasoningText) {
|
||||
entry.reasoning = { text: reasoningText }
|
||||
}
|
||||
if (toolCallEntries.length > 0) {
|
||||
entry.toolCalls = toolCallEntries
|
||||
}
|
||||
|
||||
items.push(entry)
|
||||
}
|
||||
|
||||
return {
|
||||
agentId,
|
||||
sessionId: 'main',
|
||||
items,
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeBlocks(message: OpenClawSessionHistoryMessage): RichBlock[] {
|
||||
const content = (message as { content: unknown }).content
|
||||
if (typeof content === 'string') {
|
||||
return content ? [{ type: 'text', text: content }] : []
|
||||
}
|
||||
if (Array.isArray(content)) {
|
||||
return content as RichBlock[]
|
||||
}
|
||||
return []
|
||||
}
|
||||
|
||||
function normalizeRole(
|
||||
role: OpenClawSessionHistoryMessage['role'],
|
||||
): 'user' | 'assistant' | null {
|
||||
if (role === 'user' || role === 'assistant') return role
|
||||
return null
|
||||
}
|
||||
|
||||
function collectText(blocks: RichBlock[]): string {
|
||||
const parts: string[] = []
|
||||
for (const block of blocks) {
|
||||
if (block.type === 'text' && typeof block.text === 'string') {
|
||||
parts.push(block.text)
|
||||
}
|
||||
}
|
||||
return parts.join('\n')
|
||||
}
|
||||
|
||||
function collectThinking(blocks: RichBlock[]): string {
|
||||
const parts: string[] = []
|
||||
for (const block of blocks) {
|
||||
if (block.type === 'thinking') {
|
||||
const value =
|
||||
typeof block.thinking === 'string'
|
||||
? block.thinking
|
||||
: typeof block.text === 'string'
|
||||
? block.text
|
||||
: ''
|
||||
if (value) parts.push(value)
|
||||
}
|
||||
}
|
||||
return parts.join('\n\n')
|
||||
}
|
||||
|
||||
function collectToolCalls(
|
||||
blocks: RichBlock[],
|
||||
pending: Map<string, PendingToolCall>,
|
||||
): AgentHistoryToolCall[] {
|
||||
const out: AgentHistoryToolCall[] = []
|
||||
for (const block of blocks) {
|
||||
if (block.type !== 'toolCall') continue
|
||||
const callId =
|
||||
typeof block.toolCallId === 'string'
|
||||
? block.toolCallId
|
||||
: typeof block.id === 'string'
|
||||
? block.id
|
||||
: undefined
|
||||
if (!callId) continue
|
||||
const toolName = typeof block.name === 'string' ? block.name : 'unknown'
|
||||
const entry: AgentHistoryToolCall = {
|
||||
toolCallId: callId,
|
||||
toolName,
|
||||
status: 'completed',
|
||||
input: block.arguments,
|
||||
}
|
||||
out.push(entry)
|
||||
// Hold the same reference so a later toolResult mutates the entry
|
||||
// already pushed onto the assistant's toolCalls array.
|
||||
pending.set(callId, entry)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
function applyToolResults(
|
||||
blocks: RichBlock[],
|
||||
pending: Map<string, PendingToolCall>,
|
||||
): void {
|
||||
for (const block of blocks) {
|
||||
if (block.type !== 'toolResult') continue
|
||||
const callId =
|
||||
typeof block.toolCallId === 'string' ? block.toolCallId : undefined
|
||||
if (!callId) continue
|
||||
const entry = pending.get(callId)
|
||||
if (!entry) continue
|
||||
if (block.isError) {
|
||||
entry.status = 'failed'
|
||||
entry.error =
|
||||
typeof block.content === 'string'
|
||||
? block.content
|
||||
: JSON.stringify(block.content)
|
||||
} else {
|
||||
entry.output = block.content
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,10 +4,40 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import { join } from 'node:path'
|
||||
import { join, relative, resolve, sep } from 'node:path'
|
||||
|
||||
const STATE_DIR_NAME = '.openclaw'
|
||||
|
||||
/**
|
||||
* Path-traversal guard for `agent.name` before it gets joined into
|
||||
* the host workspace directory. The name is user-supplied at
|
||||
* agent-create time, and `path.join` happily resolves `..` /
|
||||
* absolute segments — so a name like `../../tmp` would point the
|
||||
* workspace at the user's home directory, the harness's pre-turn
|
||||
* snapshot would walk it, and `produced_files` rows would point at
|
||||
* arbitrary host paths that subsequent download / preview routes
|
||||
* would then serve as "agent outputs".
|
||||
*
|
||||
* Reject anything that isn't a flat, single-segment name composed
|
||||
* of safe filename characters. The check is intentionally
|
||||
* conservative — agent names are short slugs in practice.
|
||||
*/
|
||||
export function isAgentWorkspaceNameSafe(name: string): boolean {
|
||||
if (typeof name !== 'string') return false
|
||||
const trimmed = name.trim()
|
||||
if (trimmed === '' || trimmed === '.' || trimmed === '..') return false
|
||||
// No path separators, no NULs, no control chars (charCode < 0x20).
|
||||
for (let i = 0; i < trimmed.length; i++) {
|
||||
const code = trimmed.charCodeAt(i)
|
||||
if (code < 0x20) return false
|
||||
}
|
||||
if (/[\\/]/.test(trimmed)) return false
|
||||
// No `..` segments and no leading dot (avoid hidden / dotfile escapes).
|
||||
if (trimmed.startsWith('.')) return false
|
||||
if (trimmed.includes('..')) return false
|
||||
return true
|
||||
}
|
||||
|
||||
export function getOpenClawStateDir(openclawDir: string): string {
|
||||
return join(openclawDir, STATE_DIR_NAME)
|
||||
}
|
||||
@@ -24,10 +54,27 @@ export function getHostWorkspaceDir(
|
||||
openclawDir: string,
|
||||
agentName: string,
|
||||
): string {
|
||||
return join(
|
||||
getOpenClawStateDir(openclawDir),
|
||||
if (agentName !== 'main' && !isAgentWorkspaceNameSafe(agentName)) {
|
||||
throw new Error(
|
||||
`Refusing to compute workspace dir for unsafe agent name: ${agentName}`,
|
||||
)
|
||||
}
|
||||
const stateDir = getOpenClawStateDir(openclawDir)
|
||||
const candidate = resolve(
|
||||
stateDir,
|
||||
agentName === 'main' ? 'workspace' : `workspace-${agentName}`,
|
||||
)
|
||||
// Defensive containment check: even with a safe-looking name the
|
||||
// resolved path must live under the state dir. If it doesn't,
|
||||
// refuse rather than return a path the caller would then trust.
|
||||
const stateDirResolved = resolve(stateDir)
|
||||
const rel = relative(stateDirResolved, candidate)
|
||||
if (rel === '' || rel.startsWith('..') || rel.startsWith(`..${sep}`)) {
|
||||
throw new Error(
|
||||
`Resolved workspace dir escapes openclaw state dir: ${candidate}`,
|
||||
)
|
||||
}
|
||||
return candidate
|
||||
}
|
||||
|
||||
export function mergeEnvContent(
|
||||
|
||||
@@ -1,211 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Minimal OpenAI-compatible chat client against the OpenClaw gateway.
|
||||
* Used exclusively by the harness's image carve-out: when the user
|
||||
* attaches images to an OpenClaw agent, the harness diverts the turn
|
||||
* here instead of through the ACP bridge (which silently drops image
|
||||
* content blocks). The gateway's `/v1/chat/completions` endpoint
|
||||
* accepts OpenAI-style multimodal `image_url` parts.
|
||||
*
|
||||
* Output is normalized to `AgentStreamEvent` so the rest of the harness
|
||||
* pipeline (UI streaming, history persistence) doesn't care that the
|
||||
* transport is HTTP rather than ACP for this turn.
|
||||
*/
|
||||
|
||||
import type { AgentStreamEvent } from '../../../lib/agents/types'
|
||||
import { logger } from '../../../lib/logger'
|
||||
|
||||
export type OpenAIContentPart =
|
||||
| { type: 'text'; text: string }
|
||||
| { type: 'image_url'; image_url: { url: string } }
|
||||
|
||||
export interface OpenAIChatMessage {
|
||||
role: 'system' | 'user' | 'assistant'
|
||||
content: string | OpenAIContentPart[]
|
||||
}
|
||||
|
||||
export interface GatewayChatTurnInput {
|
||||
/** Gateway-side agent name. Equal to the harness id post Step 9 backfill. */
|
||||
agentId: string
|
||||
sessionKey: string
|
||||
messages: OpenAIChatMessage[]
|
||||
signal?: AbortSignal
|
||||
}
|
||||
|
||||
export class OpenClawGatewayChatClient {
|
||||
constructor(
|
||||
private readonly getHostPort: () => number,
|
||||
private readonly getToken: () => Promise<string>,
|
||||
) {}
|
||||
|
||||
async streamTurn(
|
||||
input: GatewayChatTurnInput,
|
||||
): Promise<ReadableStream<AgentStreamEvent>> {
|
||||
const token = await this.getToken()
|
||||
const response = await fetch(
|
||||
`http://127.0.0.1:${this.getHostPort()}/v1/chat/completions`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: resolveAgentModel(input.agentId),
|
||||
stream: true,
|
||||
messages: input.messages,
|
||||
user: `browseros:${input.agentId}:${input.sessionKey}`,
|
||||
}),
|
||||
signal: input.signal,
|
||||
},
|
||||
)
|
||||
|
||||
if (!response.ok) {
|
||||
const detail = await response.text().catch(() => '')
|
||||
throw new Error(
|
||||
detail || `OpenClaw gateway chat failed with status ${response.status}`,
|
||||
)
|
||||
}
|
||||
const body = response.body
|
||||
if (!body) {
|
||||
throw new Error('OpenClaw gateway chat response had no body')
|
||||
}
|
||||
|
||||
return new ReadableStream<AgentStreamEvent>({
|
||||
start(controller) {
|
||||
void pumpOpenAIChunks(body, controller, input.signal)
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
function resolveAgentModel(agentId: string): string {
|
||||
// The gateway routes `openclaw` → its default `main` provider config,
|
||||
// and `openclaw/<agentId>` → the per-agent provider config. Backfilled
|
||||
// legacy agents (`main`, orphans) can use the unprefixed form.
|
||||
return agentId === 'main' ? 'openclaw' : `openclaw/${agentId}`
|
||||
}
|
||||
|
||||
async function pumpOpenAIChunks(
|
||||
body: ReadableStream<Uint8Array>,
|
||||
controller: ReadableStreamDefaultController<AgentStreamEvent>,
|
||||
signal?: AbortSignal,
|
||||
): Promise<void> {
|
||||
const reader = body.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
let buffer = ''
|
||||
let closed = false
|
||||
let aborted = false
|
||||
let stopReason: string | undefined
|
||||
// Re-emit explicit signal aborts as a clean cancel rather than letting
|
||||
// the underlying `reader.read()` reject — keeps the controller in a
|
||||
// sensible state if the caller bails (e.g. tab close).
|
||||
const onAbort = () => {
|
||||
aborted = true
|
||||
void reader.cancel().catch(() => {})
|
||||
}
|
||||
signal?.addEventListener('abort', onAbort, { once: true })
|
||||
|
||||
const flushLine = (line: string) => {
|
||||
if (closed || !line.startsWith('data:')) return
|
||||
const payload = line.slice(5).trim()
|
||||
if (!payload || payload === '[DONE]') {
|
||||
finish()
|
||||
return
|
||||
}
|
||||
let parsed: unknown
|
||||
try {
|
||||
parsed = JSON.parse(payload)
|
||||
} catch {
|
||||
controller.enqueue({
|
||||
type: 'error',
|
||||
message: 'Failed to parse OpenClaw gateway chunk',
|
||||
})
|
||||
finish()
|
||||
return
|
||||
}
|
||||
const text = extractDeltaText(parsed)
|
||||
if (text) {
|
||||
controller.enqueue({
|
||||
type: 'text_delta',
|
||||
text,
|
||||
stream: 'output',
|
||||
rawType: 'agent_message_chunk',
|
||||
})
|
||||
}
|
||||
const finishReason = extractFinishReason(parsed)
|
||||
if (finishReason) {
|
||||
stopReason = finishReason === 'stop' ? 'end_turn' : finishReason
|
||||
finish()
|
||||
}
|
||||
}
|
||||
|
||||
const finish = () => {
|
||||
if (closed) return
|
||||
closed = true
|
||||
controller.enqueue({ type: 'done', stopReason: stopReason ?? 'end_turn' })
|
||||
controller.close()
|
||||
}
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
if (aborted) {
|
||||
if (!closed) {
|
||||
closed = true
|
||||
controller.close()
|
||||
}
|
||||
return
|
||||
}
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
buffer += decoder.decode(value, { stream: true })
|
||||
let idx = buffer.indexOf('\n\n')
|
||||
while (idx >= 0) {
|
||||
const event = buffer.slice(0, idx)
|
||||
buffer = buffer.slice(idx + 2)
|
||||
for (const line of event.split('\n')) flushLine(line)
|
||||
if (closed) return
|
||||
idx = buffer.indexOf('\n\n')
|
||||
}
|
||||
}
|
||||
if (!closed) {
|
||||
// Stream ended without an explicit [DONE]. Treat as natural end.
|
||||
finish()
|
||||
}
|
||||
} catch (err) {
|
||||
if (closed || aborted) return
|
||||
logger.warn('OpenClaw gateway chat stream errored', {
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
controller.enqueue({
|
||||
type: 'error',
|
||||
message: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
closed = true
|
||||
controller.close()
|
||||
} finally {
|
||||
signal?.removeEventListener('abort', onAbort)
|
||||
reader.releaseLock()
|
||||
}
|
||||
}
|
||||
|
||||
interface OpenAIStreamChunk {
|
||||
choices?: Array<{
|
||||
delta?: { content?: unknown }
|
||||
finish_reason?: string | null
|
||||
}>
|
||||
}
|
||||
|
||||
function extractDeltaText(value: unknown): string {
|
||||
const chunk = value as OpenAIStreamChunk
|
||||
const content = chunk?.choices?.[0]?.delta?.content
|
||||
return typeof content === 'string' ? content : ''
|
||||
}
|
||||
|
||||
function extractFinishReason(value: unknown): string | null {
|
||||
const chunk = value as OpenAIStreamChunk
|
||||
return chunk?.choices?.[0]?.finish_reason ?? null
|
||||
}
|
||||
@@ -44,6 +44,24 @@ export interface OpenClawSessionHistoryMessage {
|
||||
messageId?: string
|
||||
messageSeq?: number
|
||||
timestamp?: number
|
||||
/**
|
||||
* OpenClaw extension envelope. The gateway records the per-session
|
||||
* monotonic sequence on `__openclaw.seq` rather than the top-level
|
||||
* `messageSeq` field, so cursor logic reads from here. `id` is the
|
||||
* gateway's stable message id.
|
||||
*/
|
||||
__openclaw?: { id?: string; seq?: number }
|
||||
/**
|
||||
* Origin of this message when the response merges multiple sessions.
|
||||
* Absent on single-session responses for backward compatibility.
|
||||
*/
|
||||
source?: 'main' | 'cron' | 'hook' | 'channel' | 'other'
|
||||
/**
|
||||
* The session key this message originated from. Differs from the
|
||||
* top-level `sessionKey` when sub-sessions (e.g. cron runs) are merged
|
||||
* into a parent agent's main-session response.
|
||||
*/
|
||||
subSessionKey?: string
|
||||
}
|
||||
|
||||
export interface OpenClawSessionHistory {
|
||||
@@ -74,10 +92,7 @@ export type OpenClawSessionHistoryEvent =
|
||||
| { type: 'error'; data: { message: string } }
|
||||
|
||||
export class OpenClawHttpClient {
|
||||
constructor(
|
||||
private readonly hostPort: number,
|
||||
private readonly getToken: () => Promise<string>,
|
||||
) {}
|
||||
constructor(private readonly hostPort: number) {}
|
||||
|
||||
async getSessionHistory(
|
||||
sessionKey: string,
|
||||
@@ -103,15 +118,9 @@ export class OpenClawHttpClient {
|
||||
|
||||
async isAuthenticated(): Promise<boolean> {
|
||||
try {
|
||||
const token = await this.getToken()
|
||||
const response = await fetch(
|
||||
`http://127.0.0.1:${this.hostPort}/v1/models`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
},
|
||||
},
|
||||
{ method: 'GET' },
|
||||
)
|
||||
return response.ok
|
||||
} catch {
|
||||
@@ -124,15 +133,11 @@ export class OpenClawHttpClient {
|
||||
input: OpenClawSessionHistoryInput,
|
||||
extraHeaders: Record<string, string>,
|
||||
): Promise<Response> {
|
||||
const token = await this.getToken()
|
||||
const response = await fetch(
|
||||
`http://127.0.0.1:${this.hostPort}${buildHistoryPath(sessionKey, input)}`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
...extraHeaders,
|
||||
},
|
||||
headers: extraHeaders,
|
||||
signal: input.signal,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1,276 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Connects to the OpenClaw gateway's WebSocket control plane and pipes
|
||||
* chat broadcast events into a ClawSession state machine. The observer
|
||||
* is a transport layer only — it handles the WS connection lifecycle
|
||||
* (connect, handshake, reconnect) and delegates all state management
|
||||
* to ClawSession.
|
||||
*/
|
||||
|
||||
import WebSocket from 'ws'
|
||||
import { logger } from '../../../lib/logger'
|
||||
import type { ClawSession } from './claw-session'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Protocol types (subset of OpenClaw gateway protocol v3)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const PROTOCOL_VERSION = 3
|
||||
const HANDSHAKE_REQUEST_ID = 'connect'
|
||||
const RECONNECT_DELAY_MS = 5_000
|
||||
const CONNECT_TIMEOUT_MS = 10_000
|
||||
|
||||
interface RequestFrame {
|
||||
type: 'req'
|
||||
id: string
|
||||
method: string
|
||||
params: Record<string, unknown>
|
||||
}
|
||||
|
||||
type IncomingFrame =
|
||||
| { type: 'res'; id: string; ok: true; payload?: unknown }
|
||||
| {
|
||||
type: 'res'
|
||||
id: string
|
||||
ok: false
|
||||
error: { code: string; message: string }
|
||||
}
|
||||
| { type: 'event'; event: string; payload?: unknown }
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Observer
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export class OpenClawObserver {
|
||||
private ws: WebSocket | null = null
|
||||
private reconnectTimer: ReturnType<typeof setTimeout> | null = null
|
||||
private connected = false
|
||||
private closed = false
|
||||
private gatewayUrl: string | null = null
|
||||
private gatewayToken: string | null = null
|
||||
|
||||
constructor(private readonly session: ClawSession) {}
|
||||
|
||||
/** Start observing the gateway at the given URL with the given token. */
|
||||
connect(gatewayUrl: string, token: string): void {
|
||||
this.gatewayUrl = gatewayUrl
|
||||
this.gatewayToken = token
|
||||
this.closed = false
|
||||
this.doConnect()
|
||||
}
|
||||
|
||||
/** Stop observing and close the WebSocket. */
|
||||
disconnect(): void {
|
||||
this.closed = true
|
||||
this.clearReconnect()
|
||||
if (this.ws) {
|
||||
try {
|
||||
this.ws.close()
|
||||
} catch {}
|
||||
this.ws = null
|
||||
}
|
||||
this.connected = false
|
||||
}
|
||||
|
||||
/** Whether the observer has an active WS connection. */
|
||||
isConnected(): boolean {
|
||||
return this.connected
|
||||
}
|
||||
|
||||
// ── Private ─────────────────────────────────────────────────────────
|
||||
|
||||
private doConnect(): void {
|
||||
if (this.closed || !this.gatewayUrl || !this.gatewayToken) return
|
||||
|
||||
const wsUrl = this.gatewayUrl
|
||||
.replace(/^http:\/\//, 'ws://')
|
||||
.replace(/^https:\/\//, 'wss://')
|
||||
|
||||
logger.debug('OpenClaw observer connecting', { url: wsUrl })
|
||||
|
||||
const ws = new WebSocket(wsUrl)
|
||||
this.ws = ws
|
||||
|
||||
const connectTimeout = setTimeout(() => {
|
||||
logger.warn('OpenClaw observer handshake timeout')
|
||||
ws.terminate()
|
||||
}, CONNECT_TIMEOUT_MS)
|
||||
|
||||
let handshakeSent = false
|
||||
|
||||
ws.on('message', (raw) => {
|
||||
let frame: IncomingFrame
|
||||
try {
|
||||
frame = JSON.parse(raw.toString('utf8')) as IncomingFrame
|
||||
} catch {
|
||||
return
|
||||
}
|
||||
|
||||
// The gateway sends a connect.challenge event before accepting
|
||||
// the connect request. Send the handshake after receiving it.
|
||||
if (
|
||||
frame.type === 'event' &&
|
||||
frame.event === 'connect.challenge' &&
|
||||
!handshakeSent
|
||||
) {
|
||||
handshakeSent = true
|
||||
const connectReq: RequestFrame = {
|
||||
type: 'req',
|
||||
id: HANDSHAKE_REQUEST_ID,
|
||||
method: 'connect',
|
||||
params: {
|
||||
minProtocol: PROTOCOL_VERSION,
|
||||
maxProtocol: PROTOCOL_VERSION,
|
||||
client: {
|
||||
id: 'openclaw-tui',
|
||||
displayName: 'browseros-observer',
|
||||
version: '1.0.0',
|
||||
platform: 'node',
|
||||
mode: 'ui',
|
||||
},
|
||||
role: 'operator',
|
||||
scopes: ['operator.read'],
|
||||
auth: { token: this.gatewayToken },
|
||||
},
|
||||
}
|
||||
ws.send(JSON.stringify(connectReq))
|
||||
return
|
||||
}
|
||||
|
||||
// Handshake response
|
||||
if (frame.type === 'res' && frame.id === HANDSHAKE_REQUEST_ID) {
|
||||
clearTimeout(connectTimeout)
|
||||
if (frame.ok) {
|
||||
this.connected = true
|
||||
logger.info('OpenClaw observer connected')
|
||||
} else {
|
||||
logger.warn('OpenClaw observer handshake failed', {
|
||||
error: frame.error,
|
||||
})
|
||||
ws.close()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Broadcast events (only process after handshake completes)
|
||||
if (frame.type === 'event' && this.connected) {
|
||||
this.handleEvent(frame.event, frame.payload)
|
||||
}
|
||||
})
|
||||
|
||||
ws.on('close', () => {
|
||||
clearTimeout(connectTimeout)
|
||||
this.connected = false
|
||||
this.ws = null
|
||||
|
||||
// Reset any agents stuck in "working" to "unknown" — we missed
|
||||
// the final/end event because the WS closed mid-task. The
|
||||
// ClawSession will re-infer correct state from JSONL when the
|
||||
// observer reconnects and ensureObserverConnected() re-seeds.
|
||||
for (const [agentId, state] of this.session.getAllStates()) {
|
||||
if (state.status === 'working') {
|
||||
this.session.transition(agentId, 'unknown')
|
||||
}
|
||||
}
|
||||
|
||||
if (!this.closed) {
|
||||
logger.debug('OpenClaw observer disconnected, scheduling reconnect')
|
||||
this.scheduleReconnect()
|
||||
}
|
||||
})
|
||||
|
||||
ws.on('error', (err) => {
|
||||
clearTimeout(connectTimeout)
|
||||
logger.debug('OpenClaw observer WS error', {
|
||||
message: err.message,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
private handleEvent(eventName: string, payload: unknown): void {
|
||||
if (eventName === 'chat') {
|
||||
this.handleChatEvent(payload)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a gateway chat broadcast event and transition the ClawSession
|
||||
* state machine accordingly.
|
||||
*/
|
||||
private handleChatEvent(payload: unknown): void {
|
||||
if (!payload || typeof payload !== 'object') return
|
||||
const p = payload as Record<string, unknown>
|
||||
|
||||
const sessionKey = typeof p.sessionKey === 'string' ? p.sessionKey : null
|
||||
const state = typeof p.state === 'string' ? p.state : null
|
||||
|
||||
if (!sessionKey || !state) return
|
||||
|
||||
const agentId = extractAgentId(sessionKey)
|
||||
if (!agentId) return
|
||||
|
||||
if (state === 'delta' || state === 'streaming') {
|
||||
this.session.transition(agentId, 'working', {
|
||||
sessionKey,
|
||||
currentTool: extractToolName(p),
|
||||
})
|
||||
} else if (state === 'final' || state === 'end') {
|
||||
this.session.transition(agentId, 'idle', { sessionKey })
|
||||
} else if (state === 'error') {
|
||||
const errorMsg =
|
||||
typeof p.errorMessage === 'string'
|
||||
? p.errorMessage
|
||||
: typeof p.error === 'string'
|
||||
? p.error
|
||||
: 'Unknown error'
|
||||
this.session.transition(agentId, 'error', { sessionKey, error: errorMsg })
|
||||
}
|
||||
}
|
||||
|
||||
private scheduleReconnect(): void {
|
||||
this.clearReconnect()
|
||||
this.reconnectTimer = setTimeout(() => {
|
||||
this.reconnectTimer = null
|
||||
this.doConnect()
|
||||
}, RECONNECT_DELAY_MS)
|
||||
}
|
||||
|
||||
private clearReconnect(): void {
|
||||
if (this.reconnectTimer) {
|
||||
clearTimeout(this.reconnectTimer)
|
||||
this.reconnectTimer = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Extract agentId from an OpenClaw session key.
|
||||
* Format: "agent:<agentId>:..." — we take the segment after "agent:".
|
||||
*/
|
||||
function extractAgentId(sessionKey: string): string | null {
|
||||
if (!sessionKey.startsWith('agent:')) return null
|
||||
const colonIdx = sessionKey.indexOf(':', 6)
|
||||
if (colonIdx === -1) return sessionKey.slice(6)
|
||||
return sessionKey.slice(6, colonIdx)
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to extract a tool name from a chat event payload.
|
||||
*/
|
||||
function extractToolName(payload: Record<string, unknown>): string | null {
|
||||
if (typeof payload.toolName === 'string') return payload.toolName
|
||||
if (typeof payload.tool === 'string') return payload.tool
|
||||
const content = payload.content
|
||||
if (content && typeof content === 'object' && 'name' in content) {
|
||||
const name = (content as Record<string, unknown>).name
|
||||
if (typeof name === 'string') return name
|
||||
}
|
||||
return null
|
||||
}
|
||||
@@ -17,6 +17,7 @@ import {
|
||||
OPENCLAW_IMAGE,
|
||||
} from '@browseros/shared/constants/openclaw'
|
||||
import { DEFAULT_PORTS } from '@browseros/shared/constants/ports'
|
||||
import type { AgentStreamEvent } from '../../../lib/agents/types'
|
||||
import { getOpenClawDir } from '../../../lib/browseros-dir'
|
||||
import { logger } from '../../../lib/logger'
|
||||
import { withProcessLock } from '../../../lib/process-lock'
|
||||
@@ -40,6 +41,7 @@ import {
|
||||
type OpenClawAgentRecord,
|
||||
OpenClawCliClient,
|
||||
type OpenClawConfigBatchEntry,
|
||||
type OpenClawSessionEntry,
|
||||
} from './openclaw-cli-client'
|
||||
import {
|
||||
buildOpenClawCliProviderModelRef,
|
||||
@@ -61,8 +63,8 @@ import {
|
||||
OpenClawHttpClient,
|
||||
type OpenClawSessionHistory,
|
||||
type OpenClawSessionHistoryEvent,
|
||||
type OpenClawSessionHistoryMessage,
|
||||
} from './openclaw-http-client'
|
||||
import { OpenClawObserver } from './openclaw-observer'
|
||||
import {
|
||||
type ResolvedOpenClawProviderConfig,
|
||||
resolveSupportedOpenClawProvider,
|
||||
@@ -234,6 +236,104 @@ function getOpenClawBrowserOSSessionPrefix(agentId: string): string {
|
||||
return `agent:${agentId}:openai-user:browseros:${agentId}:`
|
||||
}
|
||||
|
||||
const MAIN_SESSION_KEY_PATTERN = /^agent:([^:]+):main$/
|
||||
|
||||
/**
|
||||
* Extract the agent id from a main-session key (e.g. `agent:research:main`
|
||||
* → `research`). Returns null when the key isn't a top-level main session,
|
||||
* which signals the caller to use the per-session fetch path.
|
||||
*/
|
||||
function extractAgentIdFromMainSessionKey(sessionKey: string): string | null {
|
||||
const match = MAIN_SESSION_KEY_PATTERN.exec(sessionKey)
|
||||
return match?.[1] ?? null
|
||||
}
|
||||
|
||||
/**
|
||||
* Classify a session key by its source. The pattern is `agent:<id>:<kind>:...`;
|
||||
* the third segment identifies how the session was started.
|
||||
*/
|
||||
function parseSessionSource(
|
||||
sessionKey: string,
|
||||
): NonNullable<OpenClawSessionHistoryMessage['source']> {
|
||||
const parts = sessionKey.split(':')
|
||||
if (parts[0] !== 'agent' || parts.length < 3) return 'other'
|
||||
switch (parts[2]) {
|
||||
case 'main':
|
||||
return 'main'
|
||||
case 'cron':
|
||||
return 'cron'
|
||||
case 'hook':
|
||||
return 'hook'
|
||||
case 'channel':
|
||||
return 'channel'
|
||||
default:
|
||||
return 'other'
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Per-session monotonic sequence. Gateway encodes it inside the
|
||||
* `__openclaw` extension envelope; the legacy top-level `messageSeq`
|
||||
* field exists in the type but is rarely populated.
|
||||
*/
|
||||
function resolveMessageSeq(msg: OpenClawSessionHistoryMessage): number | null {
|
||||
const fromEnvelope = msg.__openclaw?.seq
|
||||
if (typeof fromEnvelope === 'number' && Number.isFinite(fromEnvelope)) {
|
||||
return fromEnvelope
|
||||
}
|
||||
if (typeof msg.messageSeq === 'number' && Number.isFinite(msg.messageSeq)) {
|
||||
return msg.messageSeq
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Stable chronological order across sessions. Falls back to seq
|
||||
* when timestamps tie or are missing, preserving intra-session order.
|
||||
*/
|
||||
function compareMessageOrder(
|
||||
a: OpenClawSessionHistoryMessage,
|
||||
b: OpenClawSessionHistoryMessage,
|
||||
): number {
|
||||
const aTs = a.timestamp ?? 0
|
||||
const bTs = b.timestamp ?? 0
|
||||
if (aTs !== bTs) return aTs - bTs
|
||||
return (resolveMessageSeq(a) ?? 0) - (resolveMessageSeq(b) ?? 0)
|
||||
}
|
||||
|
||||
/**
|
||||
* Compound cursor for the aggregated history endpoint. Maps each
|
||||
* session key to either:
|
||||
* - a `messageSeq` to fetch BEFORE on the next page (more historical),
|
||||
* - or `null` meaning the session is exhausted and should be skipped.
|
||||
*
|
||||
* Encoded as base64url JSON for URL-safe transport in `?cursor=`.
|
||||
*/
|
||||
type CompoundCursor = Record<string, number | null>
|
||||
|
||||
function decodeCompoundCursor(encoded: string | undefined): CompoundCursor {
|
||||
if (!encoded) return {}
|
||||
try {
|
||||
const json = Buffer.from(encoded, 'base64url').toString('utf8')
|
||||
const parsed = JSON.parse(json)
|
||||
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
|
||||
const out: CompoundCursor = {}
|
||||
for (const [k, v] of Object.entries(parsed)) {
|
||||
if (typeof v === 'number' || v === null) out[k] = v
|
||||
}
|
||||
return out
|
||||
}
|
||||
} catch {
|
||||
// Malformed cursors are treated as "first page" — preferable to
|
||||
// erroring out the entire history fetch on a bad client cursor.
|
||||
}
|
||||
return {}
|
||||
}
|
||||
|
||||
function encodeCompoundCursor(cursor: CompoundCursor): string {
|
||||
return Buffer.from(JSON.stringify(cursor), 'utf8').toString('base64url')
|
||||
}
|
||||
|
||||
export interface AgentOverview {
|
||||
agentId: string
|
||||
status: AgentLiveStatus
|
||||
@@ -260,8 +360,6 @@ export class OpenClawService {
|
||||
private httpClient: OpenClawHttpClient
|
||||
private openclawDir: string
|
||||
private hostPort = OPENCLAW_GATEWAY_CONTAINER_PORT
|
||||
private token: string
|
||||
private tokenLoaded = false
|
||||
private lastError: string | null = null
|
||||
private browserosServerPort: number
|
||||
private resourcesDir: string | null
|
||||
@@ -272,7 +370,6 @@ export class OpenClawService {
|
||||
private stopLogTail: (() => void) | null = null
|
||||
private lifecycleLock: Promise<void> = Promise.resolve()
|
||||
private clawSession = new ClawSession()
|
||||
private observer = new OpenClawObserver(this.clawSession)
|
||||
|
||||
constructor(config: OpenClawServiceConfig = {}) {
|
||||
this.openclawDir = getOpenClawDir()
|
||||
@@ -281,13 +378,9 @@ export class OpenClawService {
|
||||
projectDir: this.openclawDir,
|
||||
browserosRoot: config.browserosDir,
|
||||
})
|
||||
this.token = crypto.randomUUID()
|
||||
this.cliClient = new OpenClawCliClient(this.runtime)
|
||||
this.bootstrapCliClient = this.buildBootstrapCliClient()
|
||||
this.httpClient = new OpenClawHttpClient(
|
||||
this.hostPort,
|
||||
async () => this.token,
|
||||
)
|
||||
this.httpClient = new OpenClawHttpClient(this.hostPort)
|
||||
this.browserosServerPort =
|
||||
config.browserosServerPort ?? DEFAULT_PORTS.server
|
||||
this.resourcesDir = config.resourcesDir ?? null
|
||||
@@ -323,19 +416,6 @@ export class OpenClawService {
|
||||
return this.hostPort
|
||||
}
|
||||
|
||||
/**
|
||||
* Current gateway auth token. The token string is loaded from
|
||||
* `gateway.auth.token` in the persisted openclaw.json during setup,
|
||||
* with a freshly generated UUID as fallback. Exposed so the ACPx
|
||||
* harness can pass it to spawned `openclaw acp` child processes via
|
||||
* the documented `OPENCLAW_GATEWAY_TOKEN` env var (avoids both the
|
||||
* `--token` process-listing leak and reliance on a token-file path
|
||||
* that doesn't exist as a discrete file inside the container).
|
||||
*/
|
||||
getGatewayToken(): string {
|
||||
return this.token
|
||||
}
|
||||
|
||||
/** Subscribe to real-time agent status changes from the ClawSession state machine. */
|
||||
onAgentStatusChange(
|
||||
listener: (agentId: string, state: AgentSessionState) => void,
|
||||
@@ -348,6 +428,70 @@ export class OpenClawService {
|
||||
return this.clawSession.getState(agentId)
|
||||
}
|
||||
|
||||
/**
|
||||
* Drive the live-status state machine from a turn lifecycle event the
|
||||
* AgentHarnessService observed. Replaces the previous WS observer
|
||||
* pipeline that re-tapped the same gateway events; the harness already
|
||||
* sees them as ACP `session/update` notifications, so we forward those
|
||||
* here. Caller passes the stream events verbatim.
|
||||
*
|
||||
* `tool_call` and `tool_call_update` populate `currentTool` so the
|
||||
* dashboard SSE keeps its existing payload shape. `done` clears
|
||||
* working state to `idle`; `error` keeps a sticky error badge.
|
||||
*/
|
||||
recordAgentTurnEvent(
|
||||
agentId: string,
|
||||
sessionKey: string,
|
||||
event:
|
||||
| { type: 'turn_started' }
|
||||
| { type: 'turn_event'; event: AgentStreamEvent }
|
||||
| { type: 'turn_ended'; error?: string },
|
||||
): void {
|
||||
if (event.type === 'turn_started') {
|
||||
this.clawSession.transition(agentId, 'working', { sessionKey })
|
||||
return
|
||||
}
|
||||
if (event.type === 'turn_ended') {
|
||||
if (event.error !== undefined) {
|
||||
this.clawSession.transition(agentId, 'error', {
|
||||
sessionKey,
|
||||
error: event.error,
|
||||
})
|
||||
} else {
|
||||
this.clawSession.transition(agentId, 'idle', { sessionKey })
|
||||
}
|
||||
return
|
||||
}
|
||||
const inner = event.event
|
||||
if (inner.type === 'tool_call') {
|
||||
this.clawSession.transition(agentId, 'working', {
|
||||
sessionKey,
|
||||
currentTool: inner.title ?? null,
|
||||
})
|
||||
return
|
||||
}
|
||||
if (inner.type === 'error') {
|
||||
this.clawSession.transition(agentId, 'error', {
|
||||
sessionKey,
|
||||
error: inner.message,
|
||||
})
|
||||
return
|
||||
}
|
||||
if (inner.type === 'done') {
|
||||
this.clawSession.transition(agentId, 'idle', { sessionKey })
|
||||
return
|
||||
}
|
||||
if (inner.type === 'text_delta') {
|
||||
// Heartbeat — keep the existing `working` row fresh; preserve
|
||||
// the last-known currentTool by passing it through.
|
||||
const prev = this.clawSession.getState(agentId)
|
||||
this.clawSession.transition(agentId, 'working', {
|
||||
sessionKey,
|
||||
currentTool: prev.currentTool,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ── Lifecycle ────────────────────────────────────────────────────────
|
||||
|
||||
/** Warm the VM and gateway image so later setup/start avoids registry work. */
|
||||
@@ -394,14 +538,13 @@ export class OpenClawService {
|
||||
providerKeyCount: Object.keys(provider.envValues).length,
|
||||
})
|
||||
|
||||
await this.refreshGatewayAuthToken()
|
||||
await this.ensureGatewayPortAllocated(logProgress)
|
||||
|
||||
logProgress('Bootstrapping OpenClaw config...')
|
||||
await this.bootstrapCliClient.runOnboard({
|
||||
acceptRisk: true,
|
||||
authChoice: 'skip',
|
||||
gatewayAuth: 'token',
|
||||
gatewayAuth: 'none',
|
||||
gatewayBind: 'lan',
|
||||
gatewayPort: OPENCLAW_GATEWAY_CONTAINER_PORT,
|
||||
installDaemon: false,
|
||||
@@ -418,8 +561,6 @@ export class OpenClawService {
|
||||
logProgress('Validating OpenClaw config...')
|
||||
await this.assertConfigValid(this.bootstrapCliClient)
|
||||
|
||||
await this.refreshGatewayAuthToken()
|
||||
|
||||
logProgress('Starting OpenClaw gateway...')
|
||||
await this.runtime.startGateway(
|
||||
this.buildGatewayRuntimeSpec(),
|
||||
@@ -478,8 +619,6 @@ export class OpenClawService {
|
||||
|
||||
await this.runtime.ensureReady(logProgress)
|
||||
|
||||
logProgress('Refreshing gateway auth token...')
|
||||
await this.refreshGatewayAuthToken()
|
||||
await this.ensureStateEnvFile()
|
||||
|
||||
await this.ensureGatewayPortAllocated(logProgress)
|
||||
@@ -533,7 +672,6 @@ export class OpenClawService {
|
||||
return this.withLifecycleLock('stop', async () => {
|
||||
logger.info('Stopping OpenClaw service', { hostPort: this.hostPort })
|
||||
this.controlPlaneStatus = 'disconnected'
|
||||
this.observer.disconnect()
|
||||
this.stopGatewayLogTail()
|
||||
await this.runtime.stopGateway()
|
||||
logger.info('OpenClaw container stopped')
|
||||
@@ -550,8 +688,6 @@ export class OpenClawService {
|
||||
this.controlPlaneStatus = 'reconnecting'
|
||||
await this.runtime.ensureReady(logProgress)
|
||||
this.stopGatewayLogTail()
|
||||
logProgress('Refreshing gateway auth token...')
|
||||
await this.refreshGatewayAuthToken()
|
||||
await this.ensureStateEnvFile()
|
||||
await this.ensureGatewayPortAllocated(logProgress)
|
||||
logProgress('Restarting OpenClaw gateway...')
|
||||
@@ -596,8 +732,6 @@ export class OpenClawService {
|
||||
throw new Error('OpenClaw gateway is not ready')
|
||||
}
|
||||
|
||||
logProgress('Reloading gateway auth token...')
|
||||
await this.refreshGatewayAuthToken()
|
||||
this.controlPlaneStatus = 'reconnecting'
|
||||
logProgress('Reconnecting control plane...')
|
||||
await this.runControlPlaneCall(() => this.cliClient.probe())
|
||||
@@ -607,7 +741,6 @@ export class OpenClawService {
|
||||
|
||||
async shutdown(): Promise<void> {
|
||||
this.controlPlaneStatus = 'disconnected'
|
||||
this.observer.disconnect()
|
||||
this.stopGatewayLogTail()
|
||||
try {
|
||||
await this.runtime.stopGateway()
|
||||
@@ -794,9 +927,155 @@ export class OpenClawService {
|
||||
input: { limit?: number; cursor?: string; signal?: AbortSignal } = {},
|
||||
): Promise<OpenClawSessionHistory> {
|
||||
await this.assertGatewayReady()
|
||||
return this.runControlPlaneCall(() =>
|
||||
this.httpClient.getSessionHistory(sessionKey, input),
|
||||
return this.runControlPlaneCall(async () => {
|
||||
const agentId = extractAgentIdFromMainSessionKey(sessionKey)
|
||||
if (!agentId) {
|
||||
return this.httpClient.getSessionHistory(sessionKey, input)
|
||||
}
|
||||
return this.fetchAggregatedAgentHistory(sessionKey, agentId, input)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Aggregates the agent's main session and every sub-session (cron,
|
||||
* hook, channel) into a single chronological response. The main
|
||||
* session's own messages are included; each sub-session's messages
|
||||
* are tagged with `source` and `subSessionKey` so the UI can
|
||||
* distinguish autonomous turns from user-driven turns.
|
||||
*
|
||||
* Pagination uses a compound cursor that encodes a per-session seq
|
||||
* for each session in scope (`{<sessionKey>: seq | null}`). Each page
|
||||
* fetches each non-exhausted session with its own per-session cursor,
|
||||
* merges messages across sessions by timestamp, slices to `limit`,
|
||||
* and emits a fresh compound cursor reflecting where each session
|
||||
* should resume on the next page. A session with `null` in the
|
||||
* cursor is exhausted and skipped.
|
||||
*
|
||||
* Sub-session fetches that fail are logged and dropped — partial
|
||||
* timelines are preferable to a hard failure that hides the main
|
||||
* session.
|
||||
*/
|
||||
private async fetchAggregatedAgentHistory(
|
||||
mainSessionKey: string,
|
||||
agentId: string,
|
||||
input: { limit?: number; cursor?: string; signal?: AbortSignal },
|
||||
): Promise<OpenClawSessionHistory> {
|
||||
const compoundIn = decodeCompoundCursor(input.cursor)
|
||||
const sessions = await this.cliClient
|
||||
.listSessions(agentId)
|
||||
.catch((err): OpenClawSessionEntry[] => {
|
||||
logger.warn(
|
||||
'Failed to list OpenClaw sub-sessions; falling back to main only',
|
||||
{ agentId, error: err instanceof Error ? err.message : String(err) },
|
||||
)
|
||||
return []
|
||||
})
|
||||
|
||||
// Build the candidate set from the agent's session directory plus
|
||||
// the main key (which may not appear in `sessions.list` if the file
|
||||
// hasn't been written yet for a fresh agent).
|
||||
const targetKeys = new Set<string>([mainSessionKey])
|
||||
for (const entry of sessions) {
|
||||
if (entry.key?.startsWith(`agent:${agentId}:`)) {
|
||||
targetKeys.add(entry.key)
|
||||
}
|
||||
}
|
||||
|
||||
// Only fetch sessions that aren't exhausted by the inbound cursor.
|
||||
// A session with `null` in the cursor is fully read; skip it on
|
||||
// subsequent pages.
|
||||
const activeKeys = Array.from(targetKeys).filter(
|
||||
(k) => compoundIn[k] !== null,
|
||||
)
|
||||
|
||||
const fetchedHistories = await Promise.all(
|
||||
activeKeys.map(async (key) => {
|
||||
const sessionCursor = compoundIn[key]
|
||||
try {
|
||||
const history = await this.httpClient.getSessionHistory(key, {
|
||||
limit: input.limit,
|
||||
cursor:
|
||||
typeof sessionCursor === 'number'
|
||||
? String(sessionCursor)
|
||||
: undefined,
|
||||
signal: input.signal,
|
||||
})
|
||||
return { key, history }
|
||||
} catch (err) {
|
||||
logger.warn('Failed to fetch OpenClaw sub-session history', {
|
||||
sessionKey: key,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
return null
|
||||
}
|
||||
}),
|
||||
)
|
||||
|
||||
type Annotated = OpenClawSessionHistoryMessage & { __sessionKey: string }
|
||||
const merged: Annotated[] = []
|
||||
let truncated = false
|
||||
for (const result of fetchedHistories) {
|
||||
if (!result) continue
|
||||
const source = parseSessionSource(result.key)
|
||||
const isMain = result.key === mainSessionKey
|
||||
for (const msg of result.history.messages) {
|
||||
merged.push({
|
||||
...msg,
|
||||
source,
|
||||
...(isMain ? {} : { subSessionKey: result.key }),
|
||||
__sessionKey: result.key,
|
||||
})
|
||||
}
|
||||
if (result.history.truncated) truncated = true
|
||||
}
|
||||
|
||||
merged.sort(compareMessageOrder)
|
||||
|
||||
// The merged window contains the latest portion fetched. We emit
|
||||
// up to `limit` messages from the END (newest), and compute the
|
||||
// resume position for each session as the seq of the EARLIEST
|
||||
// emitted message that came from that session.
|
||||
const limited =
|
||||
typeof input.limit === 'number' && input.limit > 0
|
||||
? merged.slice(-input.limit)
|
||||
: merged
|
||||
|
||||
const compoundOut: CompoundCursor = {}
|
||||
// Carry forward exhausted sessions so subsequent pages keep skipping them.
|
||||
for (const key of Array.from(targetKeys)) {
|
||||
if (compoundIn[key] === null) {
|
||||
compoundOut[key] = null
|
||||
}
|
||||
}
|
||||
for (const result of fetchedHistories) {
|
||||
if (!result) continue
|
||||
const key = result.key
|
||||
const earliestEmitted = limited.find((m) => m.__sessionKey === key)
|
||||
const sessionFetchHasMore = Boolean(result.history.hasMore)
|
||||
const droppedFromMerge =
|
||||
result.history.messages.length >
|
||||
limited.filter((m) => m.__sessionKey === key).length
|
||||
const sessionHasMore = sessionFetchHasMore || droppedFromMerge
|
||||
if (!sessionHasMore) {
|
||||
compoundOut[key] = null
|
||||
continue
|
||||
}
|
||||
const seq = earliestEmitted ? resolveMessageSeq(earliestEmitted) : null
|
||||
compoundOut[key] = seq
|
||||
}
|
||||
|
||||
const hasMore = Object.values(compoundOut).some(
|
||||
(v) => typeof v === 'number',
|
||||
)
|
||||
const messages = limited.map(({ __sessionKey: _drop, ...rest }) => rest)
|
||||
|
||||
return {
|
||||
sessionKey: mainSessionKey,
|
||||
messages,
|
||||
cursor: hasMore ? encodeCompoundCursor(compoundOut) : null,
|
||||
hasMore,
|
||||
truncated: truncated || limited.length < merged.length,
|
||||
}
|
||||
}
|
||||
|
||||
async streamSessionHistory(
|
||||
@@ -871,7 +1150,6 @@ export class OpenClawService {
|
||||
try {
|
||||
await this.runtime.ensureReady()
|
||||
|
||||
await this.refreshGatewayAuthToken()
|
||||
await this.ensureStateEnvFile()
|
||||
|
||||
const persistedPort = await readPersistedGatewayPort(this.openclawDir)
|
||||
@@ -1001,10 +1279,7 @@ export class OpenClawService {
|
||||
private setPort(hostPort: number): void {
|
||||
if (hostPort === this.hostPort) return
|
||||
this.hostPort = hostPort
|
||||
this.httpClient = new OpenClawHttpClient(
|
||||
this.hostPort,
|
||||
async () => this.token,
|
||||
)
|
||||
this.httpClient = new OpenClawHttpClient(this.hostPort)
|
||||
}
|
||||
|
||||
private async ensureGatewayPortAllocated(
|
||||
@@ -1037,25 +1312,13 @@ export class OpenClawService {
|
||||
}
|
||||
|
||||
private async isGatewayAuthenticated(hostPort: number): Promise<boolean> {
|
||||
if (!this.tokenLoaded) {
|
||||
logger.debug(
|
||||
'OpenClaw gateway port is ready before auth token is loaded',
|
||||
{
|
||||
hostPort,
|
||||
},
|
||||
)
|
||||
return false
|
||||
}
|
||||
|
||||
const client =
|
||||
hostPort === this.hostPort
|
||||
? this.httpClient
|
||||
: new OpenClawHttpClient(hostPort, async () => this.token)
|
||||
: new OpenClawHttpClient(hostPort)
|
||||
const authenticated = await client.isAuthenticated()
|
||||
if (!authenticated) {
|
||||
logger.warn('OpenClaw gateway port rejected current auth token', {
|
||||
hostPort,
|
||||
})
|
||||
logger.warn('OpenClaw gateway readiness probe failed', { hostPort })
|
||||
}
|
||||
return authenticated
|
||||
}
|
||||
@@ -1096,12 +1359,10 @@ export class OpenClawService {
|
||||
|
||||
private async runControlPlaneCall<T>(fn: () => Promise<T>): Promise<T> {
|
||||
try {
|
||||
await this.ensureTokenLoaded()
|
||||
const result = await fn()
|
||||
this.controlPlaneStatus = 'connected'
|
||||
this.lastGatewayError = null
|
||||
this.lastRecoveryReason = null
|
||||
this.ensureObserverConnected()
|
||||
return result
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
@@ -1113,20 +1374,10 @@ export class OpenClawService {
|
||||
}
|
||||
}
|
||||
|
||||
private ensureObserverConnected(): void {
|
||||
if (this.observer.isConnected()) return
|
||||
// ClawSession starts empty after the JSONL seed was removed; the WS
|
||||
// observer fills in agent status as events arrive.
|
||||
const url = `http://127.0.0.1:${this.hostPort}`
|
||||
this.observer.connect(url, this.token)
|
||||
}
|
||||
|
||||
private classifyControlPlaneError(
|
||||
error: unknown,
|
||||
): OpenClawGatewayRecoveryReason {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
if (message.includes('Unauthorized')) return 'token_mismatch'
|
||||
if (message.includes('token')) return 'token_mismatch'
|
||||
if (message.includes('not ready')) return 'container_not_ready'
|
||||
return 'unknown'
|
||||
}
|
||||
@@ -1354,7 +1605,6 @@ export class OpenClawService {
|
||||
hostPort: this.hostPort,
|
||||
hostHome: this.openclawDir,
|
||||
envFilePath: this.getStateEnvPath(),
|
||||
gatewayToken: this.tokenLoaded ? this.token : undefined,
|
||||
timezone: Intl.DateTimeFormat().resolvedOptions().timeZone,
|
||||
}
|
||||
}
|
||||
@@ -1459,50 +1709,6 @@ export class OpenClawService {
|
||||
return true
|
||||
}
|
||||
|
||||
private async ensureTokenLoaded(): Promise<void> {
|
||||
if (this.tokenLoaded) {
|
||||
return
|
||||
}
|
||||
if (!existsSync(this.getStateConfigPath())) {
|
||||
return
|
||||
}
|
||||
|
||||
await this.loadTokenFromConfig()
|
||||
}
|
||||
|
||||
private async refreshGatewayAuthToken(): Promise<void> {
|
||||
this.tokenLoaded = false
|
||||
if (!existsSync(this.getStateConfigPath())) {
|
||||
return
|
||||
}
|
||||
|
||||
await this.loadTokenFromConfig()
|
||||
}
|
||||
|
||||
private async loadTokenFromConfig(): Promise<void> {
|
||||
try {
|
||||
const config = JSON.parse(
|
||||
await readFile(this.getStateConfigPath(), 'utf-8'),
|
||||
) as {
|
||||
gateway?: {
|
||||
auth?: {
|
||||
token?: unknown
|
||||
}
|
||||
}
|
||||
}
|
||||
const token = config.gateway?.auth?.token
|
||||
if (typeof token === 'string' && token) {
|
||||
this.token = token
|
||||
this.tokenLoaded = true
|
||||
logger.info('Loaded OpenClaw gateway token from mounted config')
|
||||
}
|
||||
} catch (err) {
|
||||
logger.warn('Failed to load OpenClaw gateway token from mounted config', {
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
private createProgressLogger(
|
||||
onLog?: (msg: string) => void,
|
||||
): (msg: string) => void {
|
||||
|
||||
@@ -0,0 +1,359 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* SQLite-backed store for files an OpenClaw agent produced inside its
|
||||
* workspace during a chat turn. The detection model is a per-turn
|
||||
* snapshot diff: take a `(path → size, mtime)` map of the workspace
|
||||
* before the turn starts, re-scan after the SSE `done` event, and
|
||||
* write a row for any new or modified file.
|
||||
*
|
||||
* Adapter-agnostic by design — the watcher is injected with the
|
||||
* agent's workspace dir, so V2 can plug Claude / Codex turn lifecycle
|
||||
* into the same store with a different `workspaceDir`.
|
||||
*/
|
||||
|
||||
import { randomUUID } from 'node:crypto'
|
||||
import { realpath, stat } from 'node:fs/promises'
|
||||
import { relative, resolve, sep } from 'node:path'
|
||||
import { and, desc, eq } from 'drizzle-orm'
|
||||
import { type BrowserOsDatabase, getDb } from '../../../lib/db'
|
||||
import {
|
||||
agentDefinitions,
|
||||
type NewProducedFileRow,
|
||||
type ProducedFileRow,
|
||||
producedFiles,
|
||||
} from '../../../lib/db/schema'
|
||||
import { walkWorkspace } from './produced-files-walker'
|
||||
|
||||
const TURN_PROMPT_MAX_CHARS = 280
|
||||
|
||||
export interface FileSnapshotEntry {
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
/** A `(workspace-relative path → fs metadata)` snapshot of a workspace. */
|
||||
export type FileSnapshot = Map<string, FileSnapshotEntry>
|
||||
|
||||
export interface FinalizeTurnInput {
|
||||
agentDefinitionId: string
|
||||
sessionKey: string
|
||||
turnId: string
|
||||
/** Raw user prompt; truncated to `TURN_PROMPT_MAX_CHARS` before persist. */
|
||||
turnPrompt: string
|
||||
/** Absolute host path to the agent's workspace directory. */
|
||||
workspaceDir: string
|
||||
/** Snapshot taken before the turn began. */
|
||||
before: FileSnapshot
|
||||
}
|
||||
|
||||
export interface ResolvedFile {
|
||||
row: ProducedFileRow
|
||||
/** Absolute host path; guaranteed to live inside the original workspace. */
|
||||
absolutePath: string
|
||||
}
|
||||
|
||||
export class ProducedFilesStore {
|
||||
private readonly db: BrowserOsDatabase
|
||||
|
||||
constructor(options: { db?: BrowserOsDatabase } = {}) {
|
||||
this.db = options.db ?? getDb()
|
||||
}
|
||||
|
||||
/**
|
||||
* Walk the workspace and capture every file's size + mtime. Used to
|
||||
* bracket a chat turn so the post-turn diff knows what changed.
|
||||
*/
|
||||
async snapshotWorkspace(workspaceDir: string): Promise<FileSnapshot> {
|
||||
const snapshot: FileSnapshot = new Map()
|
||||
await walkWorkspace(workspaceDir, (relPath, metadata) => {
|
||||
snapshot.set(relPath, metadata)
|
||||
})
|
||||
return snapshot
|
||||
}
|
||||
|
||||
/**
|
||||
* Diff the live workspace against `before`, persist rows for any
|
||||
* new or modified file, return the rows so the chat-turn finalizer
|
||||
* can broadcast them on the SSE feed. Re-modifications update the
|
||||
* existing row in place (the `(agentDefinitionId, path)` unique
|
||||
* index makes the upsert deterministic).
|
||||
*/
|
||||
async finalizeTurn(input: FinalizeTurnInput): Promise<ProducedFileRow[]> {
|
||||
const after: FileSnapshot = await this.snapshotWorkspace(input.workspaceDir)
|
||||
const changed: Array<{ relPath: string; entry: FileSnapshotEntry }> = []
|
||||
for (const [relPath, entry] of after) {
|
||||
const previous = input.before.get(relPath)
|
||||
if (
|
||||
!previous ||
|
||||
previous.size !== entry.size ||
|
||||
previous.mtimeMs !== entry.mtimeMs
|
||||
) {
|
||||
changed.push({ relPath, entry })
|
||||
}
|
||||
}
|
||||
if (changed.length === 0) return []
|
||||
|
||||
const now = Date.now()
|
||||
const turnPrompt = truncatePrompt(input.turnPrompt)
|
||||
const rows: ProducedFileRow[] = []
|
||||
for (const { relPath, entry } of changed) {
|
||||
const row: NewProducedFileRow = {
|
||||
id: randomUUID(),
|
||||
agentDefinitionId: input.agentDefinitionId,
|
||||
sessionKey: input.sessionKey,
|
||||
turnId: input.turnId,
|
||||
turnPrompt,
|
||||
path: relPath,
|
||||
size: entry.size,
|
||||
mtimeMs: entry.mtimeMs,
|
||||
createdAt: now,
|
||||
detectedBy: 'diff',
|
||||
}
|
||||
// Upsert on (agent, path) — re-modifications win, no duplicates.
|
||||
const upserted = this.db
|
||||
.insert(producedFiles)
|
||||
.values(row)
|
||||
.onConflictDoUpdate({
|
||||
target: [producedFiles.agentDefinitionId, producedFiles.path],
|
||||
set: {
|
||||
sessionKey: row.sessionKey,
|
||||
turnId: row.turnId,
|
||||
turnPrompt: row.turnPrompt,
|
||||
size: row.size,
|
||||
mtimeMs: row.mtimeMs,
|
||||
createdAt: row.createdAt,
|
||||
detectedBy: row.detectedBy,
|
||||
},
|
||||
})
|
||||
.returning()
|
||||
.all()
|
||||
const persisted = upserted[0] ?? row
|
||||
rows.push(persisted as ProducedFileRow)
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
/** Inline-card query — files for a single assistant turn. */
|
||||
async listByTurn(turnId: string): Promise<ProducedFileRow[]> {
|
||||
return this.db
|
||||
.select()
|
||||
.from(producedFiles)
|
||||
.where(eq(producedFiles.turnId, turnId))
|
||||
.orderBy(desc(producedFiles.createdAt))
|
||||
.all()
|
||||
}
|
||||
|
||||
/**
|
||||
* Outputs-rail query — every file an agent has produced across all
|
||||
* sessions, newest first.
|
||||
*/
|
||||
async listByAgent(
|
||||
agentDefinitionId: string,
|
||||
options: { limit?: number } = {},
|
||||
): Promise<ProducedFileRow[]> {
|
||||
const limit = options.limit ?? 200
|
||||
return this.db
|
||||
.select()
|
||||
.from(producedFiles)
|
||||
.where(eq(producedFiles.agentDefinitionId, agentDefinitionId))
|
||||
.orderBy(desc(producedFiles.createdAt))
|
||||
.limit(limit)
|
||||
.all()
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a gateway-side OpenClaw agent name (e.g. `main`,
|
||||
* `chief-01`) to the corresponding `agentDefinitions.id` so file
|
||||
* rows can be FK'd back to the harness record.
|
||||
*
|
||||
* Two shapes exist on disk depending on how the agent was added:
|
||||
*
|
||||
* 1. Reconciled rows from `agentHarnessService.reconcileWithGateway`
|
||||
* use `id == openclawAgentId` directly
|
||||
* (see `agent-harness-service.ts:522`).
|
||||
* 2. BrowserOS-created rows use `id = oc-<uuid>` and store the
|
||||
* openclaw name in the `name` column (`db-agent-store.ts:55-65`).
|
||||
*
|
||||
* Lookup tries shape 1 first (direct id hit), then shape 2 by
|
||||
* `(adapter='openclaw', name)`.
|
||||
*/
|
||||
async resolveAgentDefinitionId(
|
||||
openclawAgentId: string,
|
||||
): Promise<string | null> {
|
||||
const directHit = this.db
|
||||
.select({ id: agentDefinitions.id })
|
||||
.from(agentDefinitions)
|
||||
.where(eq(agentDefinitions.id, openclawAgentId))
|
||||
.limit(1)
|
||||
.all()
|
||||
if (directHit[0]) return directHit[0].id
|
||||
|
||||
const byName = this.db
|
||||
.select({ id: agentDefinitions.id })
|
||||
.from(agentDefinitions)
|
||||
.where(
|
||||
and(
|
||||
eq(agentDefinitions.adapter, 'openclaw'),
|
||||
eq(agentDefinitions.name, openclawAgentId),
|
||||
),
|
||||
)
|
||||
.limit(1)
|
||||
.all()
|
||||
return byName[0]?.id ?? null
|
||||
}
|
||||
|
||||
/** Single-row lookup; null if the id is unknown. */
|
||||
async findById(id: string): Promise<ProducedFileRow | null> {
|
||||
const rows = this.db
|
||||
.select()
|
||||
.from(producedFiles)
|
||||
.where(eq(producedFiles.id, id))
|
||||
.limit(1)
|
||||
.all()
|
||||
return rows[0] ?? null
|
||||
}
|
||||
|
||||
/** Used by `removeRegisteredModel` and similar admin paths later on. */
|
||||
async deleteByAgent(agentDefinitionId: string): Promise<void> {
|
||||
this.db
|
||||
.delete(producedFiles)
|
||||
.where(eq(producedFiles.agentDefinitionId, agentDefinitionId))
|
||||
.run()
|
||||
}
|
||||
|
||||
/** Useful for hard-resetting a session's files (e.g. workspace clear). */
|
||||
async deleteBySession(sessionKey: string): Promise<void> {
|
||||
this.db
|
||||
.delete(producedFiles)
|
||||
.where(eq(producedFiles.sessionKey, sessionKey))
|
||||
.run()
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a stored file id to an absolute host path, after validating
|
||||
* that the on-disk path still lives inside `workspaceDir`. The HTTP
|
||||
* download / preview routes are the only callers; the workspace dir
|
||||
* is supplied by the openclaw service so this module stays
|
||||
* adapter-agnostic.
|
||||
*/
|
||||
async resolveFilePath(input: {
|
||||
fileId: string
|
||||
workspaceDir: string
|
||||
}): Promise<ResolvedFile | null> {
|
||||
const row = await this.findById(input.fileId)
|
||||
if (!row) return null
|
||||
|
||||
const absolutePath = await resolveSafeWorkspacePath(
|
||||
input.workspaceDir,
|
||||
row.path,
|
||||
)
|
||||
if (!absolutePath) return null
|
||||
return { row, absolutePath }
|
||||
}
|
||||
|
||||
/**
|
||||
* Group a flat list of rows by `turnId`, preserving the latest-first
|
||||
* order on the row level and keeping the most-recent group first.
|
||||
* The Outputs rail uses this shape directly.
|
||||
*/
|
||||
groupByTurn(rows: ProducedFileRow[]): Array<{
|
||||
turnId: string
|
||||
turnPrompt: string
|
||||
createdAt: number
|
||||
files: ProducedFileRow[]
|
||||
}> {
|
||||
const grouped = new Map<
|
||||
string,
|
||||
{
|
||||
turnId: string
|
||||
turnPrompt: string
|
||||
createdAt: number
|
||||
files: ProducedFileRow[]
|
||||
}
|
||||
>()
|
||||
for (const row of rows) {
|
||||
const existing = grouped.get(row.turnId)
|
||||
if (!existing) {
|
||||
grouped.set(row.turnId, {
|
||||
turnId: row.turnId,
|
||||
turnPrompt: row.turnPrompt,
|
||||
// Group's createdAt = its newest file (rows are
|
||||
// already desc-by-createdAt, so the first one wins).
|
||||
createdAt: row.createdAt,
|
||||
files: [row],
|
||||
})
|
||||
continue
|
||||
}
|
||||
existing.files.push(row)
|
||||
if (row.createdAt > existing.createdAt) {
|
||||
existing.createdAt = row.createdAt
|
||||
}
|
||||
}
|
||||
return Array.from(grouped.values()).sort(
|
||||
(a, b) => b.createdAt - a.createdAt,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
function truncatePrompt(value: string): string {
|
||||
const trimmed = value.trim()
|
||||
if (trimmed.length <= TURN_PROMPT_MAX_CHARS) return trimmed
|
||||
return `${trimmed.slice(0, TURN_PROMPT_MAX_CHARS - 1)}…`
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve `workspaceDir + relPath` to an absolute host path, but only
|
||||
* if the resolved real path lives inside the workspace root. Returns
|
||||
* null on:
|
||||
* - lexical traversal (`..` segments escaping the root),
|
||||
* - symlink escape (a file in the workspace pointing outside it),
|
||||
* - missing files,
|
||||
* - any unreadable path component.
|
||||
*
|
||||
* Exported so the unit test can hit it without a sqlite handle.
|
||||
*/
|
||||
export async function resolveSafeWorkspacePath(
|
||||
workspaceDir: string,
|
||||
relPath: string,
|
||||
): Promise<string | null> {
|
||||
// Lexical containment first — fail fast without touching the FS.
|
||||
const workspaceRoot = resolve(workspaceDir)
|
||||
const lexical = resolve(workspaceRoot, relPath)
|
||||
const lexicalRel = relative(workspaceRoot, lexical)
|
||||
if (
|
||||
lexicalRel === '' ||
|
||||
lexicalRel.startsWith('..') ||
|
||||
lexicalRel.startsWith(`..${sep}`)
|
||||
) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Realpath check — collapses symlinks so a workspace symlink that
|
||||
// points outside the root cannot be downloaded. Falls through to
|
||||
// null if anything errors (file gone, permissions, broken link).
|
||||
try {
|
||||
const [realRoot, realFile] = await Promise.all([
|
||||
realpath(workspaceRoot),
|
||||
realpath(lexical),
|
||||
])
|
||||
const realRel = relative(realRoot, realFile)
|
||||
if (
|
||||
realRel === '' ||
|
||||
realRel.startsWith('..') ||
|
||||
realRel.startsWith(`..${sep}`)
|
||||
) {
|
||||
return null
|
||||
}
|
||||
await stat(realFile)
|
||||
return realFile
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
// Re-export the row type so callers pulling the store don't have to
|
||||
// also import the schema module.
|
||||
export type { ProducedFileRow }
|
||||
@@ -0,0 +1,127 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Workspace walker used by the produced-files diff watcher. Recurses
|
||||
* an OpenClaw agent's workspace directory and yields one
|
||||
* `(workspace-relative path, size, mtime)` triple per file.
|
||||
*
|
||||
* Design choices:
|
||||
*
|
||||
* - **Pure async iteration.** No third-party deps; relies on
|
||||
* `fs.promises.readdir` + `Dirent` so directory traversal is one
|
||||
* syscall per directory.
|
||||
* - **Symlink-aware.** Symlinks themselves aren't followed (they
|
||||
* appear in `Dirent.isSymbolicLink()`); the walker skips them so
|
||||
* an agent can't smuggle host-fs paths into the diff via a
|
||||
* symlink in its workspace.
|
||||
* - **Excludes well-known cruft directories** that no useful agent
|
||||
* output ever lives inside (`node_modules`, `.git`, `.cache`).
|
||||
* These directories are also expensive to traverse, so skipping
|
||||
* them keeps the per-turn snapshot fast.
|
||||
* - **Bounded.** Hard caps on entry count and recursion depth keep
|
||||
* pathological workspaces from stalling the chat-turn finalizer.
|
||||
*/
|
||||
|
||||
import type { Dirent } from 'node:fs'
|
||||
import { readdir, stat } from 'node:fs/promises'
|
||||
import { join, relative, sep } from 'node:path'
|
||||
|
||||
const EXCLUDED_DIRECTORIES = new Set(['node_modules', '.git', '.cache'])
|
||||
|
||||
const MAX_ENTRIES = 50_000
|
||||
const MAX_DEPTH = 16
|
||||
|
||||
export interface WorkspaceFileMetadata {
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
export type WorkspaceFileVisitor = (
|
||||
/** Workspace-relative path (POSIX-style separators). */
|
||||
relativePath: string,
|
||||
metadata: WorkspaceFileMetadata,
|
||||
) => void
|
||||
|
||||
/**
|
||||
* Walk `workspaceDir` recursively, calling `visit` for every regular
|
||||
* file. Returns silently if the directory doesn't exist (a fresh
|
||||
* agent that hasn't produced anything yet shouldn't error here).
|
||||
*/
|
||||
export async function walkWorkspace(
|
||||
workspaceDir: string,
|
||||
visit: WorkspaceFileVisitor,
|
||||
): Promise<void> {
|
||||
let entriesSeen = 0
|
||||
await walk(workspaceDir, workspaceDir, 0, (file) => {
|
||||
entriesSeen += 1
|
||||
if (entriesSeen > MAX_ENTRIES) return false
|
||||
visit(file.relativePath, file.metadata)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
interface VisitedFile {
|
||||
relativePath: string
|
||||
metadata: WorkspaceFileMetadata
|
||||
}
|
||||
|
||||
async function walk(
|
||||
root: string,
|
||||
current: string,
|
||||
depth: number,
|
||||
yieldFile: (file: VisitedFile) => boolean,
|
||||
): Promise<boolean> {
|
||||
if (depth > MAX_DEPTH) return true
|
||||
|
||||
let entries: Dirent[]
|
||||
try {
|
||||
entries = await readdir(current, { withFileTypes: true })
|
||||
} catch {
|
||||
// Workspace dir missing or unreadable — fresh agent that hasn't
|
||||
// written anything yet, or transient permissions issue. Treat as
|
||||
// "no files" rather than throwing.
|
||||
return true
|
||||
}
|
||||
|
||||
for (const entry of entries) {
|
||||
if (EXCLUDED_DIRECTORIES.has(entry.name)) continue
|
||||
const absolute = join(current, entry.name)
|
||||
|
||||
if (entry.isSymbolicLink()) {
|
||||
// Skip symlinks — never follow, never record. Prevents an
|
||||
// agent from smuggling host-fs paths into the diff via a
|
||||
// symlink in its workspace.
|
||||
continue
|
||||
}
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
const keepGoing = await walk(root, absolute, depth + 1, yieldFile)
|
||||
if (!keepGoing) return false
|
||||
continue
|
||||
}
|
||||
|
||||
if (!entry.isFile()) continue
|
||||
|
||||
let stats: Awaited<ReturnType<typeof stat>>
|
||||
try {
|
||||
stats = await stat(absolute)
|
||||
} catch {
|
||||
// Concurrent delete between readdir and stat — skip silently.
|
||||
continue
|
||||
}
|
||||
const relativePath = toPosix(relative(root, absolute))
|
||||
const keepGoing = yieldFile({
|
||||
relativePath,
|
||||
metadata: { size: stats.size, mtimeMs: stats.mtimeMs },
|
||||
})
|
||||
if (!keepGoing) return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
function toPosix(value: string): string {
|
||||
if (sep === '/') return value
|
||||
return value.split(sep).join('/')
|
||||
}
|
||||
@@ -4,16 +4,13 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import type { createRuntimeStore } from 'acpx/runtime'
|
||||
import type { OpenClawGatewayChatClient } from '../../api/services/openclaw/openclaw-gateway-chat-client'
|
||||
import type { AgentDefinition } from './agent-types'
|
||||
import { prepareClaudeCodeContext } from './claude-code/prepare'
|
||||
import { prepareCodexContext } from './codex/prepare'
|
||||
import { prepareOpenClawContext } from './openclaw/prepare'
|
||||
import {
|
||||
maybeHandleOpenClawTurn,
|
||||
prepareOpenClawContext,
|
||||
} from './openclaw/prepare'
|
||||
import type { AgentPromptInput, AgentStreamEvent } from './types'
|
||||
prepareClaudeCodeContext,
|
||||
prepareCodexContext,
|
||||
prepareHermesContext,
|
||||
} from './runtime'
|
||||
|
||||
export interface PreparedAcpxAgentContext {
|
||||
cwd: string
|
||||
@@ -22,6 +19,14 @@ export interface PreparedAcpxAgentContext {
|
||||
commandEnv: Record<string, string>
|
||||
commandIdentity: string
|
||||
useBrowserosMcp: boolean
|
||||
/**
|
||||
* Hostname the agent should use to reach the BrowserOS HTTP MCP server.
|
||||
* Default `127.0.0.1` is correct for host-process adapters (claude, codex,
|
||||
* Phase A host-mode hermes). Container-spawned adapters override this to
|
||||
* `host.containers.internal` so the URL injected into ACP newSession's
|
||||
* mcpServers resolves from inside the container.
|
||||
*/
|
||||
browserosMcpHost?: string
|
||||
openclawSessionKey: string | null
|
||||
}
|
||||
|
||||
@@ -35,29 +40,17 @@ export interface PrepareAcpxAgentContextInput {
|
||||
message: string
|
||||
}
|
||||
|
||||
export interface AcpxAdapterTurnInput {
|
||||
prompt: AgentPromptInput
|
||||
prepared: PreparedAcpxAgentContext
|
||||
sessionStore: ReturnType<typeof createRuntimeStore>
|
||||
openclawGatewayChat: OpenClawGatewayChatClient | null
|
||||
}
|
||||
|
||||
export interface AcpxAgentAdapter {
|
||||
prepare(
|
||||
input: PrepareAcpxAgentContextInput,
|
||||
): Promise<PreparedAcpxAgentContext>
|
||||
maybeHandleTurn?(
|
||||
input: AcpxAdapterTurnInput,
|
||||
): Promise<ReadableStream<AgentStreamEvent> | null>
|
||||
}
|
||||
|
||||
const ADAPTERS: Record<AgentDefinition['adapter'], AcpxAgentAdapter> = {
|
||||
claude: { prepare: prepareClaudeCodeContext },
|
||||
codex: { prepare: prepareCodexContext },
|
||||
openclaw: {
|
||||
prepare: prepareOpenClawContext,
|
||||
maybeHandleTurn: maybeHandleOpenClawTurn,
|
||||
},
|
||||
openclaw: { prepare: prepareOpenClawContext },
|
||||
hermes: { prepare: prepareHermesContext },
|
||||
}
|
||||
|
||||
export function getAcpxAgentAdapter(
|
||||
|
||||
@@ -57,6 +57,7 @@ export async function finishBrowserosManagedContext(input: {
|
||||
skillNames: string[]
|
||||
promptPrefix: string
|
||||
commandEnv: Record<string, string>
|
||||
browserosMcpHost?: string
|
||||
}): Promise<PreparedAcpxAgentContext> {
|
||||
const commandIdentity = stableCommandIdentity(input.commandEnv)
|
||||
const runtimeSessionKey = deriveRuntimeSessionKey({
|
||||
@@ -83,6 +84,7 @@ export async function finishBrowserosManagedContext(input: {
|
||||
commandEnv: input.commandEnv,
|
||||
commandIdentity,
|
||||
useBrowserosMcp: true,
|
||||
browserosMcpHost: input.browserosMcpHost,
|
||||
openclawSessionKey: null,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import { randomUUID } from 'node:crypto'
|
||||
import { constants, type Stats } from 'node:fs'
|
||||
import {
|
||||
access,
|
||||
mkdir,
|
||||
readFile,
|
||||
rename,
|
||||
rm,
|
||||
@@ -18,6 +17,7 @@ import {
|
||||
} from 'node:fs/promises'
|
||||
import { homedir } from 'node:os'
|
||||
import { basename, dirname, join, resolve } from 'node:path'
|
||||
import { ensureDirectory } from '../ensure-directory'
|
||||
import {
|
||||
MEMORY_TEMPLATE,
|
||||
RUNTIME_SKILLS,
|
||||
@@ -66,7 +66,7 @@ export function resolveAgentRuntimePaths(input: {
|
||||
|
||||
/** Seeds the stable per-agent identity and memory home without overwriting edits. */
|
||||
export async function ensureAgentHome(paths: AgentRuntimePaths): Promise<void> {
|
||||
await mkdir(join(paths.agentHome, 'memory'), { recursive: true })
|
||||
await ensureDirectory(join(paths.agentHome, 'memory'))
|
||||
await writeFileIfMissing(join(paths.agentHome, 'SOUL.md'), SOUL_TEMPLATE)
|
||||
await writeFileIfMissing(join(paths.agentHome, 'MEMORY.md'), MEMORY_TEMPLATE)
|
||||
}
|
||||
@@ -89,7 +89,7 @@ export async function materializeCodexHome(input: {
|
||||
skillNames: string[]
|
||||
sourceCodexHome?: string
|
||||
}): Promise<void> {
|
||||
await mkdir(input.paths.codexHome, { recursive: true })
|
||||
await ensureDirectory(input.paths.codexHome)
|
||||
const source =
|
||||
input.sourceCodexHome ??
|
||||
process.env.CODEX_HOME?.trim() ??
|
||||
@@ -163,7 +163,7 @@ export async function ensureUsableCwd(
|
||||
isDefaultWorkspace: boolean,
|
||||
): Promise<void> {
|
||||
if (isDefaultWorkspace) {
|
||||
await mkdir(cwd, { recursive: true })
|
||||
await ensureDirectory(cwd)
|
||||
return
|
||||
}
|
||||
let info: Stats
|
||||
@@ -195,7 +195,7 @@ async function writeFileIfMissing(
|
||||
path: string,
|
||||
content: string,
|
||||
): Promise<void> {
|
||||
await mkdir(dirname(path), { recursive: true })
|
||||
await ensureDirectory(dirname(path))
|
||||
try {
|
||||
await writeFile(path, content, { encoding: 'utf8', flag: 'wx' })
|
||||
} catch (err) {
|
||||
@@ -205,7 +205,7 @@ async function writeFileIfMissing(
|
||||
|
||||
async function symlinkIfPresent(source: string, target: string): Promise<void> {
|
||||
if (!(await sourceFileExists(source))) return
|
||||
await mkdir(dirname(target), { recursive: true })
|
||||
await ensureDirectory(dirname(target))
|
||||
try {
|
||||
await symlink(source, target)
|
||||
} catch (err) {
|
||||
@@ -216,7 +216,7 @@ async function symlinkIfPresent(source: string, target: string): Promise<void> {
|
||||
async function copyIfPresent(source: string, target: string): Promise<void> {
|
||||
if (!(await sourceFileExists(source))) return
|
||||
const content = await readFile(source, 'utf8')
|
||||
await mkdir(dirname(target), { recursive: true })
|
||||
await ensureDirectory(dirname(target))
|
||||
try {
|
||||
await writeFile(target, content, { encoding: 'utf8', flag: 'wx' })
|
||||
} catch (err) {
|
||||
@@ -226,7 +226,7 @@ async function copyIfPresent(source: string, target: string): Promise<void> {
|
||||
|
||||
/** Writes generated content via atomic replace so readers never see partial files. */
|
||||
async function writeFileAtomic(path: string, content: string): Promise<void> {
|
||||
await mkdir(dirname(path), { recursive: true })
|
||||
await ensureDirectory(dirname(path))
|
||||
const temporaryPath = join(
|
||||
dirname(path),
|
||||
`.${basename(path)}.${process.pid}.${randomUUID()}.tmp`,
|
||||
|
||||
@@ -19,13 +19,9 @@ import {
|
||||
createAgentRegistry,
|
||||
createRuntimeStore,
|
||||
} from 'acpx/runtime'
|
||||
import type { OpenClawGatewayChatClient } from '../../api/services/openclaw/openclaw-gateway-chat-client'
|
||||
import { getBrowserosDir } from '../browseros-dir'
|
||||
import { logger } from '../logger'
|
||||
import {
|
||||
getAcpxAgentAdapter,
|
||||
prepareAcpxAgentContext,
|
||||
} from './acpx-agent-adapter'
|
||||
import { prepareAcpxAgentContext } from './acpx-agent-adapter'
|
||||
import {
|
||||
resolveAgentRuntimePaths,
|
||||
wrapCommandWithEnv,
|
||||
@@ -36,6 +32,7 @@ import type {
|
||||
AgentHistoryEntry,
|
||||
AgentHistoryToolCall,
|
||||
} from './agent-types'
|
||||
import { getHermesRuntime } from './runtime'
|
||||
import type {
|
||||
AgentHistoryPage,
|
||||
AgentPromptInput,
|
||||
@@ -51,11 +48,10 @@ import type {
|
||||
* when spawning the openclaw ACP adapter inside the gateway container.
|
||||
*
|
||||
* Fields are getters (not snapshot values) so the harness picks up the
|
||||
* current token and VM/container paths at spawn time.
|
||||
* current VM/container paths at spawn time. The bundled gateway runs
|
||||
* with `gateway.auth.mode=none`, so no auth token is plumbed through.
|
||||
*/
|
||||
export interface OpenclawGatewayAccessor {
|
||||
/** Current gateway auth token. Passed to `openclaw acp --token`. */
|
||||
getGatewayToken(): string
|
||||
/** Container name e.g. browseros-openclaw-openclaw-gateway-1. */
|
||||
getContainerName(): string
|
||||
/** LIMA_HOME directory containing the browseros-vm instance. */
|
||||
@@ -76,15 +72,6 @@ type AcpxRuntimeOptions = {
|
||||
* claude/codex (their adapters spawn their own CLI binaries).
|
||||
*/
|
||||
openclawGateway?: OpenclawGatewayAccessor
|
||||
/**
|
||||
* Optional. When wired, the runtime diverts OpenClaw turns that
|
||||
* carry image attachments to the gateway's HTTP `/v1/chat/completions`
|
||||
* endpoint (which accepts OpenAI-style `image_url` parts) instead of
|
||||
* the ACP bridge — the bridge silently drops image content blocks.
|
||||
* Without this client, image turns to OpenClaw agents fall through to
|
||||
* the ACP path and the model never sees the image.
|
||||
*/
|
||||
openclawGatewayChat?: OpenClawGatewayChatClient
|
||||
runtimeFactory?: (options: AcpRuntimeOptions) => AcpxCoreRuntime
|
||||
}
|
||||
|
||||
@@ -95,22 +82,16 @@ interface PreparedRuntimeContext {
|
||||
agentCommandEnv: Record<string, string>
|
||||
commandIdentity: string
|
||||
useBrowserosMcp: boolean
|
||||
browserosMcpHost?: string
|
||||
openclawSessionKey: string | null
|
||||
}
|
||||
|
||||
const BROWSEROS_ACP_AGENT_INSTRUCTIONS = `<role>
|
||||
You are BrowserOS - a browser agent with full control of a Chromium browser through the BrowserOS MCP server.
|
||||
|
||||
Use the BrowserOS MCP server for all browser tasks, including browsing the web, interacting with pages, inspecting browser state, and managing tabs, windows, bookmarks, and history.
|
||||
</role>`
|
||||
|
||||
export class AcpxRuntime implements AgentRuntime {
|
||||
private readonly defaultCwd: string | null
|
||||
private readonly browserosDir: string
|
||||
private readonly stateDir: string
|
||||
private readonly browserosServerPort: number
|
||||
private readonly openclawGateway: OpenclawGatewayAccessor | null
|
||||
private readonly openclawGatewayChat: OpenClawGatewayChatClient | null
|
||||
private readonly runtimeFactory: (
|
||||
options: AcpRuntimeOptions,
|
||||
) => AcpxCoreRuntime
|
||||
@@ -127,7 +108,6 @@ export class AcpxRuntime implements AgentRuntime {
|
||||
this.browserosServerPort =
|
||||
options.browserosServerPort ?? DEFAULT_PORTS.server
|
||||
this.openclawGateway = options.openclawGateway ?? null
|
||||
this.openclawGatewayChat = options.openclawGatewayChat ?? null
|
||||
this.sessionStore = createRuntimeStore({ stateDir: this.stateDir })
|
||||
this.runtimeFactory = options.runtimeFactory ?? createAcpRuntime
|
||||
}
|
||||
@@ -205,24 +185,6 @@ export class AcpxRuntime implements AgentRuntime {
|
||||
imageAttachmentCount: imageAttachments.length,
|
||||
})
|
||||
|
||||
const adapter = getAcpxAgentAdapter(input.agent.adapter)
|
||||
const adapterStream =
|
||||
(await adapter.maybeHandleTurn?.({
|
||||
prompt: input,
|
||||
prepared: {
|
||||
cwd: prepared.cwd,
|
||||
runtimeSessionKey: prepared.runtimeSessionKey,
|
||||
runPrompt: prepared.runPrompt,
|
||||
commandEnv: prepared.agentCommandEnv,
|
||||
commandIdentity: prepared.commandIdentity,
|
||||
useBrowserosMcp: prepared.useBrowserosMcp,
|
||||
openclawSessionKey: prepared.openclawSessionKey,
|
||||
},
|
||||
sessionStore: this.sessionStore,
|
||||
openclawGatewayChat: this.openclawGatewayChat,
|
||||
})) ?? null
|
||||
if (adapterStream) return adapterStream
|
||||
|
||||
const runtime = this.getRuntime({
|
||||
cwd,
|
||||
permissionMode: input.permissionMode,
|
||||
@@ -230,6 +192,7 @@ export class AcpxRuntime implements AgentRuntime {
|
||||
commandEnv: prepared.agentCommandEnv,
|
||||
commandIdentity: prepared.commandIdentity,
|
||||
useBrowserosMcp: prepared.useBrowserosMcp,
|
||||
browserosMcpHost: prepared.browserosMcpHost,
|
||||
openclawSessionKey: prepared.openclawSessionKey,
|
||||
})
|
||||
|
||||
@@ -277,6 +240,7 @@ export class AcpxRuntime implements AgentRuntime {
|
||||
agentCommandEnv: prepared.commandEnv,
|
||||
commandIdentity: prepared.commandIdentity,
|
||||
useBrowserosMcp: prepared.useBrowserosMcp,
|
||||
browserosMcpHost: prepared.browserosMcpHost,
|
||||
openclawSessionKey: prepared.openclawSessionKey,
|
||||
}
|
||||
}
|
||||
@@ -288,14 +252,17 @@ export class AcpxRuntime implements AgentRuntime {
|
||||
commandEnv: Record<string, string>
|
||||
commandIdentity: string
|
||||
useBrowserosMcp: boolean
|
||||
browserosMcpHost?: string
|
||||
openclawSessionKey: string | null
|
||||
}): AcpxCoreRuntime {
|
||||
const mcpHost = input.browserosMcpHost ?? '127.0.0.1'
|
||||
const key = JSON.stringify({
|
||||
cwd: input.cwd,
|
||||
permissionMode: input.permissionMode,
|
||||
nonInteractivePermissions: input.nonInteractivePermissions,
|
||||
commandIdentity: input.commandIdentity,
|
||||
useBrowserosMcp: input.useBrowserosMcp,
|
||||
browserosMcpHost: mcpHost,
|
||||
openclawSessionKey: input.openclawSessionKey,
|
||||
})
|
||||
const existing = this.runtimes.get(key)
|
||||
@@ -310,7 +277,7 @@ export class AcpxRuntime implements AgentRuntime {
|
||||
commandEnv: input.commandEnv,
|
||||
}),
|
||||
mcpServers: input.useBrowserosMcp
|
||||
? createBrowserosMcpServers(this.browserosServerPort)
|
||||
? createBrowserosMcpServers(this.browserosServerPort, mcpHost)
|
||||
: [],
|
||||
permissionMode: input.permissionMode,
|
||||
nonInteractivePermissions: input.nonInteractivePermissions,
|
||||
@@ -322,6 +289,7 @@ export class AcpxRuntime implements AgentRuntime {
|
||||
permissionMode: input.permissionMode,
|
||||
nonInteractivePermissions: input.nonInteractivePermissions,
|
||||
browserosServerPort: this.browserosServerPort,
|
||||
browserosMcpHost: mcpHost,
|
||||
commandIdentity: input.commandIdentity,
|
||||
useBrowserosMcp: input.useBrowserosMcp,
|
||||
openclawSessionKey: input.openclawSessionKey,
|
||||
@@ -509,14 +477,16 @@ export function unwrapBrowserosAcpUserMessage(raw: string): string {
|
||||
}
|
||||
|
||||
function stripOuterRoleEnvelope(value: string): string {
|
||||
const prefix = `${BROWSEROS_ACP_AGENT_INSTRUCTIONS}
|
||||
|
||||
<user_request>
|
||||
`
|
||||
const suffix = `
|
||||
</user_request>`
|
||||
if (!value.startsWith(prefix) || !value.endsWith(suffix)) return value
|
||||
return value.slice(prefix.length, -suffix.length)
|
||||
// Any `<role>…</role>\n\n<user_request>\n…\n</user_request>` envelope.
|
||||
// Adapter-agnostic so both the BrowserOS multi-line role block and the
|
||||
// openclaw single-line role block get unwrapped. TKT-774's exact-prefix
|
||||
// match only covered the BrowserOS form, so the openclaw envelope
|
||||
// (added when openclaw moved to its own prepare step) was landing
|
||||
// unwrapped in history payloads.
|
||||
const match = value.match(
|
||||
/^<role\b[^>]*>[\s\S]*?<\/role>\n\n<user_request>\n([\s\S]*?)\n<\/user_request>$/,
|
||||
)
|
||||
return match ? match[1] : value
|
||||
}
|
||||
|
||||
function stripOuterRuntimeEnvelope(value: string): string {
|
||||
@@ -700,12 +670,13 @@ function createAcpxEventStream(
|
||||
|
||||
function createBrowserosMcpServers(
|
||||
browserosServerPort: number,
|
||||
host = '127.0.0.1',
|
||||
): NonNullable<AcpRuntimeOptions['mcpServers']> {
|
||||
return [
|
||||
{
|
||||
type: 'http',
|
||||
name: 'browseros',
|
||||
url: `http://127.0.0.1:${browserosServerPort}/mcp`,
|
||||
url: `http://${host}:${browserosServerPort}/mcp`,
|
||||
headers: [],
|
||||
},
|
||||
]
|
||||
@@ -740,6 +711,22 @@ function createBrowserosAgentRegistry(input: {
|
||||
)
|
||||
}
|
||||
|
||||
if (lower === 'hermes') {
|
||||
const runtime = getHermesRuntime()
|
||||
if (runtime)
|
||||
return runtime.buildExecArgv(runtime.getAcpExecSpec(input.commandEnv))
|
||||
// No runtime registered (tests, dev fallback, non-darwin) →
|
||||
// host-process spawn of the bare hermes binary.
|
||||
return wrapCommandWithEnv('hermes acp', input.commandEnv)
|
||||
}
|
||||
|
||||
// claude + codex resolve through acpx-core's built-in registry
|
||||
// because the canonical command is an npx wrapper around the
|
||||
// upstream ACP-adapter package (e.g. `npx @zed-industries/codex-acp`),
|
||||
// and the package version range lives inside acpx-core. The
|
||||
// ClaudeRuntime / CodexRuntime registrations still drive health
|
||||
// probing and per-turn prep; only the spawn command source-of-
|
||||
// truth stays in acpx-core.
|
||||
if (lower === 'claude' || lower === 'codex') {
|
||||
return wrapCommandWithEnv(registry.resolve(agentName), input.commandEnv)
|
||||
}
|
||||
@@ -756,8 +743,8 @@ function createBrowserosAgentRegistry(input: {
|
||||
* already installed alongside the gateway is reused; BrowserOS does
|
||||
* not require a host-side openclaw install.
|
||||
*
|
||||
* Auth: `openclaw acp --url ...` deliberately does not reuse implicit
|
||||
* env/config credentials, so pass the gateway token explicitly.
|
||||
* Auth: BrowserOS configures the bundled gateway with `gateway.auth.mode=none`,
|
||||
* so no gateway token flag is needed for the local ACP bridge.
|
||||
*
|
||||
* Banner output: OPENCLAW_HIDE_BANNER and OPENCLAW_SUPPRESS_NOTES
|
||||
* suppress non-JSON-RPC chatter on stdout that would otherwise corrupt
|
||||
@@ -767,7 +754,6 @@ function resolveOpenclawAcpCommand(
|
||||
gateway: OpenclawGatewayAccessor,
|
||||
sessionKey: string | null,
|
||||
): string {
|
||||
const token = gateway.getGatewayToken()
|
||||
const limactl = gateway.getLimactlPath()
|
||||
const vm = gateway.getVmName()
|
||||
const container = gateway.getContainerName()
|
||||
@@ -816,8 +802,6 @@ function resolveOpenclawAcpCommand(
|
||||
'acp',
|
||||
'--url',
|
||||
gatewayUrlInsideContainer,
|
||||
'--token',
|
||||
token,
|
||||
]
|
||||
if (bridgeSessionKey) {
|
||||
argv.push('--session', bridgeSessionKey)
|
||||
|
||||
@@ -91,7 +91,7 @@ export class RingBuffer {
|
||||
/** Frames with seq > fromSeq, plus the terminal frame if not already in the slice. */
|
||||
slice(fromSeq: number): TurnFrame[] {
|
||||
const live = this.frames.filter((f) => f.seq > fromSeq)
|
||||
if (this.terminal && !live.some((f) => f.seq === this.terminal!.seq)) {
|
||||
if (this.terminal && !live.some((f) => f.seq === this.terminal?.seq)) {
|
||||
// Terminal might have been evicted by overflow; re-attach it so
|
||||
// subscribers always see a terminal if one exists.
|
||||
if (this.terminal.seq > fromSeq) live.push(this.terminal)
|
||||
|
||||
@@ -4,12 +4,13 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import { exec } from 'node:child_process'
|
||||
import { promisify } from 'node:util'
|
||||
import { logger } from '../logger'
|
||||
import type { AgentAdapter } from './agent-types'
|
||||
|
||||
const execAsync = promisify(exec)
|
||||
import {
|
||||
type AgentRuntime,
|
||||
type AgentRuntimeRegistry,
|
||||
getAgentRuntimeRegistry,
|
||||
HostProcessAgentRuntime,
|
||||
} from './runtime'
|
||||
|
||||
export interface AdapterHealth {
|
||||
healthy: boolean
|
||||
@@ -19,100 +20,48 @@ export interface AdapterHealth {
|
||||
checkedAt: number
|
||||
}
|
||||
|
||||
interface CachedHealth extends AdapterHealth {
|
||||
expiresAt: number
|
||||
}
|
||||
|
||||
/**
|
||||
* In-memory cache of adapter binary availability. Probed lazily on
|
||||
* first read and refreshed every `cacheTtlMs`. The probe is one
|
||||
* `<binary> --version` invocation per adapter with a hard 2s timeout
|
||||
* so a hung CLI doesn't block the listing endpoint.
|
||||
* Reports adapter readiness for the `/adapters` route. Reads from the
|
||||
* `AgentRuntimeRegistry` — host-process runtimes self-cache their
|
||||
* `<binary> --version` probe; container runtimes expose lifecycle
|
||||
* state via the same snapshot.
|
||||
*
|
||||
* OpenClaw isn't probed here — its health derives from the gateway
|
||||
* lifecycle snapshot already exposed via `getGatewayStatus()`.
|
||||
* OpenClaw still falls back to a permissive default until Phase 4
|
||||
* migrates it onto a runtime — its health currently comes from the
|
||||
* gateway lifecycle snapshot the harness already exposes.
|
||||
*/
|
||||
export class AdapterHealthChecker {
|
||||
private readonly cache = new Map<AgentAdapter, CachedHealth>()
|
||||
private readonly cacheTtlMs: number
|
||||
private readonly probeTimeoutMs: number
|
||||
private readonly inflight = new Map<AgentAdapter, Promise<AdapterHealth>>()
|
||||
private readonly registry: AgentRuntimeRegistry
|
||||
|
||||
constructor(options: { cacheTtlMs?: number; probeTimeoutMs?: number } = {}) {
|
||||
this.cacheTtlMs = options.cacheTtlMs ?? 5 * 60 * 1000
|
||||
this.probeTimeoutMs = options.probeTimeoutMs ?? 2_000
|
||||
constructor(options: { registry?: AgentRuntimeRegistry } = {}) {
|
||||
this.registry = options.registry ?? getAgentRuntimeRegistry()
|
||||
}
|
||||
|
||||
async getHealth(adapter: AgentAdapter): Promise<AdapterHealth> {
|
||||
if (adapter === 'openclaw') {
|
||||
// OpenClaw health is derived from the gateway snapshot the
|
||||
// harness service already returns; the row component reads
|
||||
// that path. Surface a permissive default so the dot doesn't
|
||||
// spuriously light up red.
|
||||
return { healthy: true, checkedAt: Date.now() }
|
||||
}
|
||||
const now = Date.now()
|
||||
const cached = this.cache.get(adapter)
|
||||
if (cached && cached.expiresAt > now) return cached
|
||||
|
||||
const inflight = this.inflight.get(adapter)
|
||||
if (inflight) return inflight
|
||||
|
||||
const probe = this.runProbe(adapter)
|
||||
.then((result) => {
|
||||
const cacheEntry: CachedHealth = {
|
||||
...result,
|
||||
expiresAt: Date.now() + this.cacheTtlMs,
|
||||
}
|
||||
this.cache.set(adapter, cacheEntry)
|
||||
return result
|
||||
})
|
||||
.finally(() => {
|
||||
this.inflight.delete(adapter)
|
||||
})
|
||||
this.inflight.set(adapter, probe)
|
||||
return probe
|
||||
}
|
||||
|
||||
private async runProbe(adapter: AgentAdapter): Promise<AdapterHealth> {
|
||||
const command = ADAPTER_HEALTH_COMMANDS[adapter]
|
||||
if (!command) {
|
||||
return {
|
||||
healthy: false,
|
||||
reason: 'No health probe defined',
|
||||
checkedAt: Date.now(),
|
||||
}
|
||||
}
|
||||
try {
|
||||
await execAsync(command, { timeout: this.probeTimeoutMs })
|
||||
return { healthy: true, checkedAt: Date.now() }
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : String(err)
|
||||
logger.debug('Adapter health probe failed', { adapter, error: message })
|
||||
return {
|
||||
healthy: false,
|
||||
reason: friendlyProbeFailure(adapter, message),
|
||||
checkedAt: Date.now(),
|
||||
}
|
||||
}
|
||||
const runtime = this.registry.get(adapter)
|
||||
if (!runtime) return openclawFallback(adapter)
|
||||
if (runtime instanceof HostProcessAgentRuntime) await runtime.probeHealth()
|
||||
return runtimeSnapshotToHealth(runtime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Probes are deliberately conservative — `--version` exits zero on
|
||||
* any installed CLI and won't trigger network calls or auth flows.
|
||||
*/
|
||||
const ADAPTER_HEALTH_COMMANDS: Partial<Record<AgentAdapter, string>> = {
|
||||
claude: 'claude --version',
|
||||
codex: 'codex --version',
|
||||
function runtimeSnapshotToHealth(runtime: AgentRuntime): AdapterHealth {
|
||||
const snap = runtime.getStatusSnapshot()
|
||||
return {
|
||||
healthy: snap.isReady,
|
||||
reason: snap.isReady ? undefined : (snap.lastError ?? undefined),
|
||||
// Prefer probedAt so the timestamp reflects probe completion
|
||||
// regardless of health state. lastErrorAt is the fallback for
|
||||
// runtimes that don't emit probedAt yet (containers).
|
||||
checkedAt: snap.probedAt ?? snap.lastErrorAt ?? Date.now(),
|
||||
}
|
||||
}
|
||||
|
||||
function friendlyProbeFailure(adapter: AgentAdapter, raw: string): string {
|
||||
if (/command not found|not recognized|ENOENT/i.test(raw)) {
|
||||
return `${ADAPTER_HEALTH_COMMANDS[adapter]} failed: command not found`
|
||||
function openclawFallback(adapter: AgentAdapter): AdapterHealth {
|
||||
if (adapter === 'openclaw') return { healthy: true, checkedAt: Date.now() }
|
||||
return {
|
||||
healthy: false,
|
||||
reason: `No runtime registered for "${adapter}"`,
|
||||
checkedAt: Date.now(),
|
||||
}
|
||||
if (/timed out|ETIMEDOUT/i.test(raw)) {
|
||||
return `${ADAPTER_HEALTH_COMMANDS[adapter]} did not respond within timeout`
|
||||
}
|
||||
return raw.split('\n')[0]?.slice(0, 200) ?? raw
|
||||
}
|
||||
|
||||
@@ -84,6 +84,24 @@ export const AGENT_ADAPTER_CATALOG: AgentAdapterDescriptor[] = [
|
||||
{ id: 'adaptive', label: 'Adaptive' },
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'hermes',
|
||||
name: 'Hermes',
|
||||
// 'default' means whatever the user configured via `hermes setup` —
|
||||
// Hermes' config.yaml is the source of truth for the model. ACP exposes
|
||||
// session/set_model but we don't surface it in Phase A.
|
||||
defaultModelId: 'default',
|
||||
defaultReasoningEffort: 'medium',
|
||||
modelControl: 'best-effort',
|
||||
// Empty list signals "no per-session model picker" — like OpenClaw.
|
||||
// Phase A.5 may dynamically populate from session/new response.
|
||||
models: [],
|
||||
reasoningEfforts: [
|
||||
{ id: 'low', label: 'Low' },
|
||||
{ id: 'medium', label: 'Medium', recommended: true },
|
||||
{ id: 'high', label: 'High' },
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
export function getAgentAdapterDescriptor(
|
||||
@@ -93,7 +111,12 @@ export function getAgentAdapterDescriptor(
|
||||
}
|
||||
|
||||
export function isAgentAdapter(value: unknown): value is AgentAdapter {
|
||||
return value === 'claude' || value === 'codex' || value === 'openclaw'
|
||||
return (
|
||||
value === 'claude' ||
|
||||
value === 'codex' ||
|
||||
value === 'openclaw' ||
|
||||
value === 'hermes'
|
||||
)
|
||||
}
|
||||
|
||||
export function resolveDefaultModelId(adapter: AgentAdapter): string {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
export type AgentAdapter = 'claude' | 'codex' | 'openclaw'
|
||||
export type AgentAdapter = 'claude' | 'codex' | 'openclaw' | 'hermes'
|
||||
|
||||
export type AgentPermissionMode = 'approve-all'
|
||||
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import type {
|
||||
PrepareAcpxAgentContextInput,
|
||||
PreparedAcpxAgentContext,
|
||||
} from '../acpx-agent-adapter'
|
||||
import {
|
||||
finishBrowserosManagedContext,
|
||||
prepareBrowserosManagedContext,
|
||||
} from '../acpx-agent-common'
|
||||
|
||||
/** Prepares Claude Code with BrowserOS agent home while preserving host Claude auth. */
|
||||
export async function prepareClaudeCodeContext(
|
||||
input: PrepareAcpxAgentContextInput,
|
||||
): Promise<PreparedAcpxAgentContext> {
|
||||
const common = await prepareBrowserosManagedContext(input)
|
||||
return finishBrowserosManagedContext({
|
||||
...common,
|
||||
commandEnv: {
|
||||
AGENT_HOME: common.paths.agentHome,
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import type {
|
||||
PrepareAcpxAgentContextInput,
|
||||
PreparedAcpxAgentContext,
|
||||
} from '../acpx-agent-adapter'
|
||||
import {
|
||||
finishBrowserosManagedContext,
|
||||
prepareBrowserosManagedContext,
|
||||
} from '../acpx-agent-common'
|
||||
import { materializeCodexHome } from '../acpx-runtime-context'
|
||||
|
||||
/** Prepares Codex with a contained CODEX_HOME and BrowserOS agent home. */
|
||||
export async function prepareCodexContext(
|
||||
input: PrepareAcpxAgentContextInput,
|
||||
): Promise<PreparedAcpxAgentContext> {
|
||||
const common = await prepareBrowserosManagedContext(input)
|
||||
await materializeCodexHome({
|
||||
paths: common.paths,
|
||||
skillNames: common.skillNames,
|
||||
})
|
||||
return finishBrowserosManagedContext({
|
||||
...common,
|
||||
commandEnv: {
|
||||
AGENT_HOME: common.paths.agentHome,
|
||||
CODEX_HOME: common.paths.codexHome,
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -1,219 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import { randomUUID } from 'node:crypto'
|
||||
import type { AcpSessionRecord, createRuntimeStore } from 'acpx/runtime'
|
||||
import type {
|
||||
OpenAIChatMessage,
|
||||
OpenAIContentPart,
|
||||
} from '../../../api/services/openclaw/openclaw-gateway-chat-client'
|
||||
import { logger } from '../../logger'
|
||||
import type { AcpxAdapterTurnInput } from '../acpx-agent-adapter'
|
||||
import type { AgentStreamEvent } from '../types'
|
||||
|
||||
type ImageAttachment = Readonly<{ mediaType: string; data: string }>
|
||||
|
||||
export async function maybeHandleOpenClawTurn(
|
||||
input: AcpxAdapterTurnInput,
|
||||
): Promise<ReadableStream<AgentStreamEvent> | null> {
|
||||
const imageAttachments = (input.prompt.attachments ?? []).filter((a) =>
|
||||
a.mediaType.startsWith('image/'),
|
||||
)
|
||||
if (imageAttachments.length === 0 || !input.openclawGatewayChat) {
|
||||
return null
|
||||
}
|
||||
return sendOpenclawViaGateway({
|
||||
prompt: input.prompt,
|
||||
sessionStore: input.sessionStore,
|
||||
openclawGatewayChat: input.openclawGatewayChat,
|
||||
imageAttachments,
|
||||
cwd: input.prepared.cwd,
|
||||
runPrompt: input.prepared.runPrompt,
|
||||
})
|
||||
}
|
||||
|
||||
/** Handles OpenClaw image turns through the gateway HTTP chat endpoint. */
|
||||
async function sendOpenclawViaGateway(input: {
|
||||
prompt: AcpxAdapterTurnInput['prompt']
|
||||
sessionStore: AcpxAdapterTurnInput['sessionStore']
|
||||
openclawGatewayChat: NonNullable<AcpxAdapterTurnInput['openclawGatewayChat']>
|
||||
imageAttachments: ReadonlyArray<ImageAttachment>
|
||||
cwd: string
|
||||
runPrompt: string
|
||||
}): Promise<ReadableStream<AgentStreamEvent>> {
|
||||
const existingRecord = await input.sessionStore.load(input.prompt.sessionKey)
|
||||
const priorMessages = existingRecord
|
||||
? recordToOpenAIMessages(existingRecord)
|
||||
: []
|
||||
const userContent: OpenAIContentPart[] = [
|
||||
{
|
||||
type: 'text',
|
||||
text: input.runPrompt,
|
||||
},
|
||||
...input.imageAttachments.map(
|
||||
(a): OpenAIContentPart => ({
|
||||
type: 'image_url',
|
||||
image_url: { url: `data:${a.mediaType};base64,${a.data}` },
|
||||
}),
|
||||
),
|
||||
]
|
||||
const messages: OpenAIChatMessage[] = [
|
||||
...priorMessages,
|
||||
{ role: 'user', content: userContent },
|
||||
]
|
||||
|
||||
logger.info('Agent harness gateway image turn dispatched', {
|
||||
agentId: input.prompt.agent.id,
|
||||
sessionKey: input.prompt.sessionKey,
|
||||
cwd: input.cwd,
|
||||
priorMessageCount: priorMessages.length,
|
||||
imageAttachmentCount: input.imageAttachments.length,
|
||||
})
|
||||
|
||||
const upstream = await input.openclawGatewayChat.streamTurn({
|
||||
agentId: input.prompt.agent.id,
|
||||
sessionKey: input.prompt.sessionKey,
|
||||
messages,
|
||||
signal: input.prompt.signal,
|
||||
})
|
||||
|
||||
const sessionStore = input.sessionStore
|
||||
const sessionKey = input.prompt.sessionKey
|
||||
const userMessageText = input.prompt.message
|
||||
const imageAttachments = input.imageAttachments
|
||||
let accumulated = ''
|
||||
|
||||
return new ReadableStream<AgentStreamEvent>({
|
||||
start: (controller) => {
|
||||
const reader = upstream.getReader()
|
||||
const persist = async () => {
|
||||
if (!existingRecord || !accumulated) return
|
||||
try {
|
||||
await persistGatewayTurn(
|
||||
sessionStore,
|
||||
sessionKey,
|
||||
userMessageText,
|
||||
imageAttachments,
|
||||
accumulated,
|
||||
)
|
||||
} catch (err) {
|
||||
logger.warn(
|
||||
'Failed to persist gateway image turn to acpx session record',
|
||||
{
|
||||
sessionKey,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
;(async () => {
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
if (value.type === 'text_delta') accumulated += value.text
|
||||
controller.enqueue(value)
|
||||
}
|
||||
await persist()
|
||||
controller.close()
|
||||
} catch (err) {
|
||||
controller.enqueue({
|
||||
type: 'error',
|
||||
message: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
controller.close()
|
||||
}
|
||||
})().catch(() => {})
|
||||
},
|
||||
cancel: () => {
|
||||
// Best-effort: cancel propagation to the gateway is tracked separately.
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
async function persistGatewayTurn(
|
||||
sessionStore: ReturnType<typeof createRuntimeStore>,
|
||||
sessionKey: string,
|
||||
userMessageText: string,
|
||||
imageAttachments: ReadonlyArray<ImageAttachment>,
|
||||
assistantText: string,
|
||||
): Promise<void> {
|
||||
const record = await sessionStore.load(sessionKey)
|
||||
if (!record) return
|
||||
const userContent: AcpxUserContent[] = [
|
||||
{ Text: userMessageText } as AcpxUserContent,
|
||||
]
|
||||
for (const _image of imageAttachments) {
|
||||
userContent.push({ Image: { source: 'base64' } } as AcpxUserContent)
|
||||
}
|
||||
const turnId = randomUUID()
|
||||
const updated = {
|
||||
...record,
|
||||
messages: [
|
||||
...record.messages,
|
||||
{ User: { id: `user-${turnId}`, content: userContent } },
|
||||
{ Agent: { content: [{ Text: assistantText }], tool_results: {} } },
|
||||
],
|
||||
lastUsedAt: new Date().toISOString(),
|
||||
} as AcpSessionRecord
|
||||
await sessionStore.save(updated)
|
||||
}
|
||||
|
||||
function recordToOpenAIMessages(record: AcpSessionRecord): OpenAIChatMessage[] {
|
||||
const messages: OpenAIChatMessage[] = []
|
||||
for (const message of record.messages) {
|
||||
if (message === 'Resume') continue
|
||||
if ('User' in message) {
|
||||
const text = message.User.content
|
||||
.map(userContentToText)
|
||||
.filter(Boolean)
|
||||
.join('\n\n')
|
||||
.trim()
|
||||
if (text) messages.push({ role: 'user', content: text })
|
||||
continue
|
||||
}
|
||||
if ('Agent' in message) {
|
||||
const text = message.Agent.content
|
||||
.map((part) => ('Text' in part ? part.Text : ''))
|
||||
.join('')
|
||||
.trim()
|
||||
if (text) messages.push({ role: 'assistant', content: text })
|
||||
}
|
||||
}
|
||||
return messages
|
||||
}
|
||||
|
||||
type AcpxSessionMessage = AcpSessionRecord['messages'][number]
|
||||
type AcpxUserContent = Extract<
|
||||
Exclude<AcpxSessionMessage, 'Resume'>,
|
||||
{ User: unknown }
|
||||
>['User']['content'][number]
|
||||
|
||||
function userContentToText(content: AcpxUserContent): string {
|
||||
if ('Text' in content) return unwrapPromptText(content.Text)
|
||||
if ('Mention' in content) return content.Mention.content
|
||||
if ('Image' in content) return content.Image.source ? '[image]' : ''
|
||||
return ''
|
||||
}
|
||||
|
||||
function unwrapPromptText(raw: string): string {
|
||||
const runtimeMatch = raw.match(
|
||||
/^<browseros_acpx_runtime\b[\s\S]*?<\/browseros_acpx_runtime>\n\n<user_request>\n([\s\S]*?)\n<\/user_request>$/,
|
||||
)
|
||||
if (runtimeMatch) return decodeBasicEntities(runtimeMatch[1]).trim()
|
||||
const roleMatch = raw.match(
|
||||
/^<role>[\s\S]*?<\/role>\n\n<user_request>\n([\s\S]*?)\n<\/user_request>$/,
|
||||
)
|
||||
if (roleMatch) return decodeBasicEntities(roleMatch[1]).trim()
|
||||
return raw.trim()
|
||||
}
|
||||
|
||||
function decodeBasicEntities(value: string): string {
|
||||
return value
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/&/g, '&')
|
||||
}
|
||||
@@ -14,8 +14,6 @@ import {
|
||||
resolveAgentRuntimePaths,
|
||||
} from '../acpx-runtime-context'
|
||||
|
||||
export { maybeHandleOpenClawTurn } from './image-turn'
|
||||
|
||||
const OPENCLAW_BROWSEROS_ACP_INSTRUCTIONS =
|
||||
'<role>You are running inside BrowserOS through the OpenClaw ACP adapter. Use your OpenClaw identity, memory, and browser tools.</role>'
|
||||
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Top-level interface every adapter runtime implements. Two abstract
|
||||
* subclasses (`ContainerAgentRuntime`, `HostProcessAgentRuntime`)
|
||||
* cover the two kinds we ship today.
|
||||
*/
|
||||
|
||||
import type {
|
||||
ExecSpec,
|
||||
RuntimeAction,
|
||||
RuntimeCapability,
|
||||
RuntimeDescriptor,
|
||||
RuntimeStatusSnapshot,
|
||||
StateListener,
|
||||
Unsubscribe,
|
||||
} from './types'
|
||||
|
||||
export interface AgentRuntime {
|
||||
readonly descriptor: RuntimeDescriptor
|
||||
|
||||
// ── Status surface (Plane B feed) ────────────────────────────────
|
||||
getStatusSnapshot(): RuntimeStatusSnapshot
|
||||
subscribe(listener: StateListener): Unsubscribe
|
||||
getCapabilities(): ReadonlyArray<RuntimeCapability>
|
||||
|
||||
// ── Action dispatch (Plane B control) ────────────────────────────
|
||||
executeAction(
|
||||
action: RuntimeAction,
|
||||
options?: { onLog?: (msg: string) => void },
|
||||
): Promise<void>
|
||||
|
||||
// ── ACP plane integration ────────────────────────────────────────
|
||||
/**
|
||||
* Build the shell-command string acpx-core spawns to run `spec`
|
||||
* against this runtime. For container kinds, this is the
|
||||
* `limactl shell <vm> -- nerdctl exec -i …` chain; for host kinds,
|
||||
* it's `env KEY=VAL <binary> <argv...>`.
|
||||
*/
|
||||
buildExecArgv(spec: ExecSpec): string
|
||||
|
||||
// ── Filesystem ───────────────────────────────────────────────────
|
||||
/** Per-agent home dir on host. Both kinds expose this; container
|
||||
* kinds also expose `toContainerPath` for in-container translation. */
|
||||
getPerAgentHomeDir(agentId: string): string
|
||||
toContainerPath?(hostPath: string): string
|
||||
toHostPath?(containerPath: string): string
|
||||
}
|
||||
@@ -0,0 +1,93 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import { getBrowserosDir } from '../../browseros-dir'
|
||||
import { logger } from '../../logger'
|
||||
import type {
|
||||
PrepareAcpxAgentContextInput,
|
||||
PreparedAcpxAgentContext,
|
||||
} from '../acpx-agent-adapter'
|
||||
import {
|
||||
finishBrowserosManagedContext,
|
||||
prepareBrowserosManagedContext,
|
||||
} from '../acpx-agent-common'
|
||||
import { resolveAgentRuntimePaths } from '../acpx-runtime-context'
|
||||
import { HostProcessAgentRuntime } from './host-process-agent-runtime'
|
||||
import { getAgentRuntimeRegistry } from './registry'
|
||||
import type { RuntimeDescriptor } from './types'
|
||||
|
||||
const CLAUDE_BINARY = 'claude'
|
||||
|
||||
export interface ClaudeRuntimeConfig {
|
||||
browserosDir: string
|
||||
}
|
||||
|
||||
export class ClaudeRuntime extends HostProcessAgentRuntime {
|
||||
readonly descriptor: RuntimeDescriptor & { kind: 'host-process' } = {
|
||||
adapterId: 'claude',
|
||||
displayName: 'Claude Code',
|
||||
kind: 'host-process',
|
||||
platforms: ['darwin', 'linux'],
|
||||
}
|
||||
|
||||
private readonly claudeConfig: ClaudeRuntimeConfig
|
||||
|
||||
constructor(
|
||||
deps: ConstructorParameters<typeof HostProcessAgentRuntime>[0],
|
||||
config: ClaudeRuntimeConfig,
|
||||
) {
|
||||
super(deps)
|
||||
this.claudeConfig = config
|
||||
}
|
||||
|
||||
getPerAgentHomeDir(agentId: string): string {
|
||||
return resolveAgentRuntimePaths({
|
||||
browserosDir: this.claudeConfig.browserosDir,
|
||||
agentId,
|
||||
}).agentHome
|
||||
}
|
||||
|
||||
prepareTurnContext(
|
||||
input: PrepareAcpxAgentContextInput,
|
||||
): Promise<PreparedAcpxAgentContext> {
|
||||
return prepareClaudeCodeContext(input)
|
||||
}
|
||||
}
|
||||
|
||||
/** Prepares Claude Code with BrowserOS agent home while preserving host Claude auth. */
|
||||
export async function prepareClaudeCodeContext(
|
||||
input: PrepareAcpxAgentContextInput,
|
||||
): Promise<PreparedAcpxAgentContext> {
|
||||
const common = await prepareBrowserosManagedContext(input)
|
||||
return finishBrowserosManagedContext({
|
||||
...common,
|
||||
commandEnv: {
|
||||
AGENT_HOME: common.paths.agentHome,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
export interface ConfigureClaudeRuntimeOptions {
|
||||
browserosDir?: string
|
||||
}
|
||||
|
||||
export function configureClaudeRuntime(
|
||||
options: ConfigureClaudeRuntimeOptions = {},
|
||||
): ClaudeRuntime {
|
||||
const browserosDir = options.browserosDir ?? getBrowserosDir()
|
||||
const runtime = new ClaudeRuntime(
|
||||
{ binaryName: CLAUDE_BINARY },
|
||||
{ browserosDir },
|
||||
)
|
||||
getAgentRuntimeRegistry().register(runtime)
|
||||
logger.debug('ClaudeRuntime registered', { binary: CLAUDE_BINARY })
|
||||
return runtime
|
||||
}
|
||||
|
||||
export function getClaudeRuntime(): ClaudeRuntime | null {
|
||||
const r = getAgentRuntimeRegistry().get('claude')
|
||||
return r instanceof ClaudeRuntime ? r : null
|
||||
}
|
||||
@@ -0,0 +1,101 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import { getBrowserosDir } from '../../browseros-dir'
|
||||
import { logger } from '../../logger'
|
||||
import type {
|
||||
PrepareAcpxAgentContextInput,
|
||||
PreparedAcpxAgentContext,
|
||||
} from '../acpx-agent-adapter'
|
||||
import {
|
||||
finishBrowserosManagedContext,
|
||||
prepareBrowserosManagedContext,
|
||||
} from '../acpx-agent-common'
|
||||
import {
|
||||
materializeCodexHome,
|
||||
resolveAgentRuntimePaths,
|
||||
} from '../acpx-runtime-context'
|
||||
import { HostProcessAgentRuntime } from './host-process-agent-runtime'
|
||||
import { getAgentRuntimeRegistry } from './registry'
|
||||
import type { RuntimeDescriptor } from './types'
|
||||
|
||||
const CODEX_BINARY = 'codex'
|
||||
|
||||
export interface CodexRuntimeConfig {
|
||||
browserosDir: string
|
||||
}
|
||||
|
||||
export class CodexRuntime extends HostProcessAgentRuntime {
|
||||
readonly descriptor: RuntimeDescriptor & { kind: 'host-process' } = {
|
||||
adapterId: 'codex',
|
||||
displayName: 'Codex',
|
||||
kind: 'host-process',
|
||||
platforms: ['darwin', 'linux'],
|
||||
}
|
||||
|
||||
private readonly codexConfig: CodexRuntimeConfig
|
||||
|
||||
constructor(
|
||||
deps: ConstructorParameters<typeof HostProcessAgentRuntime>[0],
|
||||
config: CodexRuntimeConfig,
|
||||
) {
|
||||
super(deps)
|
||||
this.codexConfig = config
|
||||
}
|
||||
|
||||
getPerAgentHomeDir(agentId: string): string {
|
||||
return resolveAgentRuntimePaths({
|
||||
browserosDir: this.codexConfig.browserosDir,
|
||||
agentId,
|
||||
}).agentHome
|
||||
}
|
||||
|
||||
prepareTurnContext(
|
||||
input: PrepareAcpxAgentContextInput,
|
||||
): Promise<PreparedAcpxAgentContext> {
|
||||
return prepareCodexContext(input)
|
||||
}
|
||||
}
|
||||
|
||||
/** Prepares Codex with a contained CODEX_HOME and BrowserOS agent home. */
|
||||
export async function prepareCodexContext(
|
||||
input: PrepareAcpxAgentContextInput,
|
||||
): Promise<PreparedAcpxAgentContext> {
|
||||
const common = await prepareBrowserosManagedContext(input)
|
||||
await materializeCodexHome({
|
||||
paths: common.paths,
|
||||
skillNames: common.skillNames,
|
||||
})
|
||||
return finishBrowserosManagedContext({
|
||||
...common,
|
||||
commandEnv: {
|
||||
AGENT_HOME: common.paths.agentHome,
|
||||
CODEX_HOME: common.paths.codexHome,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
export interface ConfigureCodexRuntimeOptions {
|
||||
browserosDir?: string
|
||||
}
|
||||
|
||||
export function configureCodexRuntime(
|
||||
options: ConfigureCodexRuntimeOptions = {},
|
||||
): CodexRuntime {
|
||||
const browserosDir = options.browserosDir ?? getBrowserosDir()
|
||||
const runtime = new CodexRuntime(
|
||||
{ binaryName: CODEX_BINARY },
|
||||
{ browserosDir },
|
||||
)
|
||||
getAgentRuntimeRegistry().register(runtime)
|
||||
logger.debug('CodexRuntime registered', { binary: CODEX_BINARY })
|
||||
return runtime
|
||||
}
|
||||
|
||||
export function getCodexRuntime(): CodexRuntime | null {
|
||||
const r = getAgentRuntimeRegistry().get('codex')
|
||||
return r instanceof CodexRuntime ? r : null
|
||||
}
|
||||
@@ -0,0 +1,121 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Abstract base for container-backed agent runtimes (openclaw,
|
||||
* hermes). Extends `ManagedContainer` so subclasses keep all the
|
||||
* existing container plumbing (state machine, lifecycle lock, image
|
||||
* load, mount roots, exec gating); adds the runtime-layer surface on
|
||||
* top: descriptor, capability list, action dispatcher, status
|
||||
* snapshot.
|
||||
*/
|
||||
|
||||
import type {
|
||||
ContainerDescriptor,
|
||||
ContainerStatusSnapshot,
|
||||
} from '../../container/managed'
|
||||
import { ManagedContainer } from '../../container/managed'
|
||||
import type { AgentRuntime } from './agent-runtime'
|
||||
import { ActionNotSupportedError } from './errors'
|
||||
import type {
|
||||
RuntimeAction,
|
||||
RuntimeCapability,
|
||||
StateListener,
|
||||
Unsubscribe,
|
||||
} from './types'
|
||||
|
||||
export abstract class ContainerAgentRuntime
|
||||
extends ManagedContainer
|
||||
implements AgentRuntime
|
||||
{
|
||||
abstract override readonly descriptor: ContainerDescriptor & {
|
||||
kind: 'container'
|
||||
}
|
||||
abstract getPerAgentHomeDir(agentId: string): string
|
||||
|
||||
/**
|
||||
* Default capability list. Subclasses extend (e.g. OpenClaw adds
|
||||
* `'gateway-control-plane'`) or filter (e.g. drop reset levels the
|
||||
* subclass can't support yet).
|
||||
*/
|
||||
getCapabilities(): ReadonlyArray<RuntimeCapability> {
|
||||
return [
|
||||
'install',
|
||||
'start',
|
||||
'stop',
|
||||
'restart',
|
||||
'reset-soft',
|
||||
'reset-wipe-agent',
|
||||
'reset-hard',
|
||||
'logs',
|
||||
]
|
||||
}
|
||||
|
||||
override getStatusSnapshot(): ContainerStatusSnapshot & {
|
||||
isReady: boolean
|
||||
} {
|
||||
const state = this.getState()
|
||||
return {
|
||||
adapterId: this.descriptor.adapterId,
|
||||
containerName: this.descriptor.containerName,
|
||||
state,
|
||||
isReady: state === 'running',
|
||||
lastError: this.lastError,
|
||||
lastErrorAt: this.lastErrorAt,
|
||||
}
|
||||
}
|
||||
|
||||
subscribe(listener: StateListener): Unsubscribe {
|
||||
return this.subscribeState(() => listener(this.getStatusSnapshot()))
|
||||
}
|
||||
|
||||
async executeAction(
|
||||
action: RuntimeAction,
|
||||
opts: { onLog?: (msg: string) => void } = {},
|
||||
): Promise<void> {
|
||||
const required = actionToCapability(action)
|
||||
if (!this.getCapabilities().includes(required)) {
|
||||
throw new ActionNotSupportedError(
|
||||
this.descriptor.adapterId,
|
||||
action.type,
|
||||
this.getCapabilities(),
|
||||
)
|
||||
}
|
||||
switch (action.type) {
|
||||
case 'install':
|
||||
return this.install(opts)
|
||||
case 'start':
|
||||
return this.start(opts)
|
||||
case 'stop':
|
||||
return this.stop()
|
||||
case 'restart':
|
||||
return this.restart(opts)
|
||||
case 'reset-soft':
|
||||
return this.reset('soft', opts)
|
||||
case 'reset-wipe-agent':
|
||||
return this.reset('wipe-agent', { ...opts, agentId: action.agentId })
|
||||
case 'reset-hard':
|
||||
return this.reset('hard', opts)
|
||||
default:
|
||||
throw new ActionNotSupportedError(
|
||||
this.descriptor.adapterId,
|
||||
(action as { type: string }).type,
|
||||
this.getCapabilities(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Map an action variant to the capability key the gate checks. Kept
|
||||
* outside the class so the dispatcher can guard before constructing
|
||||
* any state.
|
||||
*/
|
||||
function actionToCapability(action: RuntimeAction): RuntimeCapability {
|
||||
// The action.type strings happen to coincide with capability
|
||||
// strings 1:1, so this is currently identity. Pulled out as a
|
||||
// function so the gate can grow more nuanced (e.g. action-specific
|
||||
// sub-capabilities) without re-flowing the dispatcher.
|
||||
return action.type as RuntimeCapability
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
/**
|
||||
* Thrown when `executeAction` is called with an action that the
|
||||
* runtime's `getCapabilities()` doesn't list. The HTTP route layer
|
||||
* maps this to 405; the UI gates affordances on capabilities so a
|
||||
* well-behaved client should never trip this.
|
||||
*/
|
||||
export class ActionNotSupportedError extends Error {
|
||||
constructor(
|
||||
public readonly adapterId: string,
|
||||
public readonly actionType: string,
|
||||
public readonly capabilities: ReadonlyArray<string>,
|
||||
) {
|
||||
super(
|
||||
`Runtime "${adapterId}" does not support action "${actionType}" ` +
|
||||
`(capabilities: ${capabilities.join(', ') || 'none'})`,
|
||||
)
|
||||
this.name = 'ActionNotSupportedError'
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Higher-level "runtime is not ready to take a turn" error. Mirrors
|
||||
* `ContainerNotReadyError` from the container layer but lives at the
|
||||
* runtime layer so callers can differentiate "container abstraction
|
||||
* says no" from "host CLI is missing" without reaching down two
|
||||
* layers.
|
||||
*/
|
||||
export class RuntimeNotReadyError extends Error {
|
||||
constructor(
|
||||
public readonly adapterId: string,
|
||||
public readonly state: string,
|
||||
public readonly hint: string,
|
||||
) {
|
||||
super(`Runtime "${adapterId}" is not ready (state=${state}): ${hint}`)
|
||||
this.name = 'RuntimeNotReadyError'
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,325 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Hermes-specific runtime. Owns the container spec, readiness probe,
|
||||
* mount roots, ACP launch spec, and per-turn context prep — the full
|
||||
* adapter surface lives in this single class.
|
||||
*/
|
||||
|
||||
import { mkdir } from 'node:fs/promises'
|
||||
import { join } from 'node:path'
|
||||
import {
|
||||
HERMES_CONTAINER_HARNESS_DIR,
|
||||
HERMES_CONTAINER_NAME,
|
||||
HERMES_IMAGE,
|
||||
} from '@browseros/shared/constants/hermes'
|
||||
import {
|
||||
getHermesAgentHomeHostDir,
|
||||
getHermesHarnessHostDir,
|
||||
getHermesHostStateDir,
|
||||
} from '../../../api/services/hermes/hermes-paths'
|
||||
import { getBrowserosDir } from '../../browseros-dir'
|
||||
import { ContainerCli } from '../../container/container-cli'
|
||||
import { ImageLoader } from '../../container/image-loader'
|
||||
import type {
|
||||
ContainerDescriptor,
|
||||
ManagedContainerDeps,
|
||||
MountRoot,
|
||||
} from '../../container/managed'
|
||||
import type { ContainerSpec } from '../../container/types'
|
||||
import { logger } from '../../logger'
|
||||
import {
|
||||
GUEST_VM_STATE,
|
||||
getLimaHomeDir,
|
||||
resolveBundledLimactl,
|
||||
resolveBundledLimaTemplate,
|
||||
VM_NAME,
|
||||
VmRuntime,
|
||||
} from '../../vm'
|
||||
import type {
|
||||
PrepareAcpxAgentContextInput,
|
||||
PreparedAcpxAgentContext,
|
||||
} from '../acpx-agent-adapter'
|
||||
import {
|
||||
finishBrowserosManagedContext,
|
||||
prepareBrowserosManagedContext,
|
||||
} from '../acpx-agent-common'
|
||||
import { ContainerAgentRuntime } from './container-agent-runtime'
|
||||
import { getAgentRuntimeRegistry } from './registry'
|
||||
import type { ExecSpec } from './types'
|
||||
|
||||
const HERMES_BINARY = '/opt/hermes/.venv/bin/hermes'
|
||||
|
||||
export interface HermesContainerRuntimeConfig {
|
||||
/** BrowserOS state root — used to compute per-agent home paths. */
|
||||
browserosDir: string
|
||||
/** Host-side directory where Hermes per-agent home dirs live. */
|
||||
hermesHarnessHostDir: string
|
||||
}
|
||||
|
||||
export class HermesContainerRuntime extends ContainerAgentRuntime {
|
||||
readonly descriptor: ContainerDescriptor & { kind: 'container' } = {
|
||||
adapterId: 'hermes',
|
||||
displayName: 'Hermes',
|
||||
kind: 'container',
|
||||
defaultImage: HERMES_IMAGE,
|
||||
containerName: HERMES_CONTAINER_NAME,
|
||||
platforms: ['darwin'],
|
||||
// Hermes has no HTTP probe; we exec `hermes --version` instead
|
||||
// (see `readinessProbe` below). Generous timeout because the
|
||||
// first exec inside a freshly-started container can be slow.
|
||||
readinessProbe: { timeoutMs: 30_000, intervalMs: 500 },
|
||||
}
|
||||
|
||||
private readonly hermesConfig: HermesContainerRuntimeConfig
|
||||
|
||||
constructor(
|
||||
deps: ManagedContainerDeps,
|
||||
config: HermesContainerRuntimeConfig,
|
||||
) {
|
||||
super(deps)
|
||||
this.hermesConfig = config
|
||||
}
|
||||
|
||||
// ── ManagedContainer abstracts ───────────────────────────────────
|
||||
|
||||
protected mountRoots(): readonly MountRoot[] {
|
||||
return [
|
||||
{
|
||||
hostPath: this.hermesConfig.hermesHarnessHostDir,
|
||||
containerPath: HERMES_CONTAINER_HARNESS_DIR,
|
||||
kind: 'shared',
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
protected async buildContainerSpec(): Promise<ContainerSpec> {
|
||||
// The bind-mount source is an in-VM path, not the host path —
|
||||
// Lima's bundled mount already exposes <browserosDir>/vm/ to the
|
||||
// VM at GUEST_VM_STATE, so nerdctl sees the harness dir at
|
||||
// `${GUEST_VM_STATE}/hermes/harness`. mountRoots() above declares
|
||||
// the *logical* host↔container mapping for path-translation use.
|
||||
const guestHarnessDir = `${GUEST_VM_STATE}/hermes/harness`
|
||||
const gateway = await this.deps.vm.getDefaultGateway()
|
||||
return {
|
||||
name: HERMES_CONTAINER_NAME,
|
||||
image: HERMES_IMAGE,
|
||||
restart: 'unless-stopped',
|
||||
env: { PYTHONUNBUFFERED: '1' },
|
||||
// host.containers.internal → VM gateway so hermes inside the
|
||||
// container can reach the BrowserOS HTTP server running on the
|
||||
// host (BrowserOS MCP /mcp).
|
||||
addHosts: [`host.containers.internal:${gateway}`],
|
||||
mounts: [
|
||||
{ source: guestHarnessDir, target: HERMES_CONTAINER_HARNESS_DIR },
|
||||
],
|
||||
// Override the upstream image's `hermes acp` ENTRYPOINT — we
|
||||
// want a long-lived idle container that we `nerdctl exec` into
|
||||
// per turn. Bypass tini (0.19.0 getopt-parses `-x` even after
|
||||
// the PROGRAM, so `tini /bin/sh -c "…"` errors).
|
||||
entrypoint: '/bin/sh',
|
||||
command: ['-c', 'exec sleep infinity'],
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Container-running is already checked by the base via
|
||||
* `cli.waitForContainerRunning` before this runs. Here we add an
|
||||
* exec-based liveness check: `hermes --version` exits 0. Catches
|
||||
* the failure mode where the container daemon thinks it's running
|
||||
* but the embedded Python venv is broken or the binary is missing.
|
||||
*
|
||||
* This must NOT go through `execProcess` — that would deadlock on
|
||||
* the state gate (we're in `starting`, not `running`). Use the
|
||||
* lower-level `cli.exec` directly.
|
||||
*/
|
||||
protected async readinessProbe(): Promise<boolean> {
|
||||
try {
|
||||
const exitCode = await this.deps.cli.exec(this.descriptor.containerName, [
|
||||
HERMES_BINARY,
|
||||
'--version',
|
||||
])
|
||||
return exitCode === 0
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ── AgentRuntime additions ───────────────────────────────────────
|
||||
|
||||
getPerAgentHomeDir(agentId: string): string {
|
||||
return getHermesAgentHomeHostDir({
|
||||
browserosDir: this.hermesConfig.browserosDir,
|
||||
agentId,
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* ExecSpec for `hermes acp`. The dispatcher feeds this to
|
||||
* `buildExecArgv()` (inherited from `ManagedContainer`) to get the
|
||||
* launch command string. PYTHONUNBUFFERED is re-added defensively —
|
||||
* the container has it set too, but acpx spawns through `nerdctl
|
||||
* exec` which doesn't inherit container env onto the new process.
|
||||
*/
|
||||
getAcpExecSpec(commandEnv: Record<string, string>): ExecSpec {
|
||||
return {
|
||||
argv: [HERMES_BINARY, 'acp'],
|
||||
env: { PYTHONUNBUFFERED: '1', ...commandEnv },
|
||||
}
|
||||
}
|
||||
|
||||
/** Per-turn context prep — thin wrapper around the standalone
|
||||
* `prepareHermesContext` so callers that prefer the runtime-style
|
||||
* surface stay self-contained. */
|
||||
prepareTurnContext(
|
||||
input: PrepareAcpxAgentContextInput,
|
||||
): Promise<PreparedAcpxAgentContext> {
|
||||
return prepareHermesContext(input)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Translate a host-side hermes home path to its in-container equivalent.
|
||||
* The container bind-mounts `<browserosDir>/vm/hermes/harness` (host)
|
||||
* onto `/data/agents/harness` (container), so paths under the host
|
||||
* harness root map cleanly to `/data/agents/harness/...` inside.
|
||||
*
|
||||
* Returns the original host path when it doesn't sit under the harness
|
||||
* root — defensive escape hatch for tests that inject a custom dir.
|
||||
*/
|
||||
function translateHermesHomeToContainerPath(
|
||||
hostHome: string,
|
||||
browserosDir: string,
|
||||
): string {
|
||||
const harnessHostRoot = getHermesHarnessHostDir(browserosDir)
|
||||
if (hostHome === harnessHostRoot) return HERMES_CONTAINER_HARNESS_DIR
|
||||
if (hostHome.startsWith(`${harnessHostRoot}/`)) {
|
||||
return `${HERMES_CONTAINER_HARNESS_DIR}${hostHome.slice(harnessHostRoot.length)}`
|
||||
}
|
||||
return hostHome
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares Hermes with a per-agent HERMES_HOME under
|
||||
* `<browserosDir>/vm/hermes/harness/<id>/home`. Provider config
|
||||
* (config.yaml + .env) is written into this directory at agent-create
|
||||
* time by AgentHarnessService.writeHermesPerAgentProvider. There is no
|
||||
* fallback to a global `~/.hermes/` install — Hermes agents always
|
||||
* carry their own provider config.
|
||||
*
|
||||
* HERMES_HOME inside the container is the container-side path
|
||||
* (`/data/agents/harness/<id>/home`) so Hermes resolves it correctly
|
||||
* when the runtime spawns `hermes acp` via `nerdctl exec`.
|
||||
*
|
||||
* Pure function — no runtime instance required, used directly by
|
||||
* the per-adapter prepare router in `acpx-agent-adapter.ts`.
|
||||
*/
|
||||
export async function prepareHermesContext(
|
||||
input: PrepareAcpxAgentContextInput,
|
||||
): Promise<PreparedAcpxAgentContext> {
|
||||
const common = await prepareBrowserosManagedContext(input)
|
||||
|
||||
// Hermes-specific home lives under vm/ so it's reachable inside the
|
||||
// Lima VM; the shared `common.paths.agentHome` (under agents/harness)
|
||||
// is OUTSIDE the VM mount and would not be visible to nerdctl.
|
||||
const hermesAgentHome = getHermesAgentHomeHostDir({
|
||||
browserosDir: input.browserosDir,
|
||||
agentId: input.agent.id,
|
||||
})
|
||||
await mkdir(hermesAgentHome, { recursive: true })
|
||||
|
||||
const hermesAgentHomeInContainer = translateHermesHomeToContainerPath(
|
||||
hermesAgentHome,
|
||||
input.browserosDir,
|
||||
)
|
||||
|
||||
return finishBrowserosManagedContext({
|
||||
...common,
|
||||
commandEnv: {
|
||||
HERMES_HOME: hermesAgentHomeInContainer,
|
||||
},
|
||||
// Hermes runs inside a Lima container; the BrowserOS HTTP MCP
|
||||
// server lives on the host. `host.containers.internal` resolves
|
||||
// to the VM gateway (via --add-host on the hermes container) so
|
||||
// hermes can reach the MCP endpoint that the harness injects via
|
||||
// newSession.
|
||||
browserosMcpHost: 'host.containers.internal',
|
||||
})
|
||||
}
|
||||
|
||||
// ── Factory + wire-up ──────────────────────────────────────────────
|
||||
|
||||
export interface ConfigureHermesRuntimeOptions {
|
||||
/** Bundled-resources root (provided by the launcher); when set,
|
||||
* resolves bundled limactl + Lima template paths instead of host
|
||||
* defaults. Optional in tests. */
|
||||
resourcesDir?: string
|
||||
/** Override BrowserOS state dir (defaults to `getBrowserosDir()`). */
|
||||
browserosDir?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a `HermesContainerRuntime` with production deps (bundled
|
||||
* limactl, BrowserOS state dirs, Lima VM runtime) and register it in
|
||||
* the global `AgentRuntimeRegistry`. Returns `null` on non-darwin —
|
||||
* the harness checks for the runtime and falls back gracefully.
|
||||
*
|
||||
* Idempotent against accidental double-init only insofar as the
|
||||
* registry's duplicate guard fires; callers should call this once at
|
||||
* server startup.
|
||||
*/
|
||||
export function configureHermesRuntime(
|
||||
options: ConfigureHermesRuntimeOptions = {},
|
||||
): HermesContainerRuntime | null {
|
||||
if (process.platform !== 'darwin') {
|
||||
logger.warn('Hermes runtime skipped: unsupported platform', {
|
||||
platform: process.platform,
|
||||
})
|
||||
return null
|
||||
}
|
||||
|
||||
const browserosDir = options.browserosDir ?? getBrowserosDir()
|
||||
const resourcesDir = options.resourcesDir ?? null
|
||||
const limactlPath = resourcesDir
|
||||
? resolveBundledLimactl(resourcesDir)
|
||||
: 'limactl'
|
||||
const limaHome = getLimaHomeDir(browserosDir)
|
||||
const hermesStateDir = getHermesHostStateDir(browserosDir)
|
||||
const hermesHarnessHostDir = getHermesHarnessHostDir(browserosDir)
|
||||
|
||||
const vm = new VmRuntime({
|
||||
limactlPath,
|
||||
limaHome,
|
||||
templatePath: resourcesDir
|
||||
? resolveBundledLimaTemplate(resourcesDir)
|
||||
: undefined,
|
||||
browserosRoot: browserosDir,
|
||||
})
|
||||
const cli = new ContainerCli({ limactlPath, limaHome, vmName: VM_NAME })
|
||||
const loader = new ImageLoader(cli)
|
||||
|
||||
const runtime = new HermesContainerRuntime(
|
||||
{
|
||||
cli,
|
||||
loader,
|
||||
vm,
|
||||
limactlPath,
|
||||
limaHome,
|
||||
vmName: VM_NAME,
|
||||
lockDir: join(hermesStateDir, '.locks'),
|
||||
},
|
||||
{ browserosDir, hermesHarnessHostDir },
|
||||
)
|
||||
|
||||
getAgentRuntimeRegistry().register(runtime)
|
||||
logger.debug('HermesContainerRuntime registered', { image: HERMES_IMAGE })
|
||||
return runtime
|
||||
}
|
||||
|
||||
/** Convenience getter — returns the registered runtime or null. */
|
||||
export function getHermesRuntime(): HermesContainerRuntime | null {
|
||||
const r = getAgentRuntimeRegistry().get('hermes')
|
||||
return r instanceof HermesContainerRuntime ? r : null
|
||||
}
|
||||
@@ -0,0 +1,240 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Abstract base for host-process agent runtimes (claude, codex). The
|
||||
* agent process runs from the user's host PATH — no container, no
|
||||
* Lima. This class owns binary discovery, version probing with
|
||||
* caching, and the smaller capability surface that host adapters
|
||||
* support.
|
||||
*/
|
||||
|
||||
import { logger } from '../../logger'
|
||||
import type { AgentRuntime } from './agent-runtime'
|
||||
import { ActionNotSupportedError } from './errors'
|
||||
import type {
|
||||
ExecSpec,
|
||||
RuntimeAction,
|
||||
RuntimeCapability,
|
||||
RuntimeDescriptor,
|
||||
RuntimeState,
|
||||
RuntimeStatusSnapshot,
|
||||
StateListener,
|
||||
Unsubscribe,
|
||||
} from './types'
|
||||
|
||||
export interface HostProcessAgentRuntimeDeps {
|
||||
/** Host PATH binary name to probe + spawn (e.g. 'claude', 'codex'). */
|
||||
binaryName: string
|
||||
/** Override the default `<binary> --version` probe argv. */
|
||||
versionProbeArgs?: ReadonlyArray<string>
|
||||
/** Cache window for probe results in ms. Default 5 minutes — same
|
||||
* as today's adapter-health.ts. */
|
||||
probeCacheMs?: number
|
||||
/** Test seam: spawn the probe via this fn instead of `Bun.$`. */
|
||||
spawnProbe?: (
|
||||
cmd: ReadonlyArray<string>,
|
||||
timeoutMs: number,
|
||||
) => Promise<{ exitCode: number; stdout: string; stderr: string }>
|
||||
}
|
||||
|
||||
const DEFAULT_PROBE_CACHE_MS = 5 * 60 * 1000
|
||||
const DEFAULT_PROBE_TIMEOUT_MS = 2_000
|
||||
|
||||
export abstract class HostProcessAgentRuntime implements AgentRuntime {
|
||||
abstract readonly descriptor: RuntimeDescriptor & { kind: 'host-process' }
|
||||
abstract getPerAgentHomeDir(agentId: string): string
|
||||
|
||||
protected state: RuntimeState = 'cli_missing'
|
||||
protected lastError: string | null = null
|
||||
protected lastErrorAt: number | null = null
|
||||
protected binaryVersion: string | null = null
|
||||
private readonly listeners = new Set<StateListener>()
|
||||
private healthCheckedAt = 0
|
||||
private probeInFlight: Promise<void> | null = null
|
||||
|
||||
constructor(protected readonly deps: HostProcessAgentRuntimeDeps) {}
|
||||
|
||||
// ── Status surface ───────────────────────────────────────────────
|
||||
|
||||
getStatusSnapshot(): RuntimeStatusSnapshot {
|
||||
return {
|
||||
adapterId: this.descriptor.adapterId,
|
||||
state: this.state,
|
||||
isReady: this.state === 'cli_present',
|
||||
lastError: this.lastError,
|
||||
lastErrorAt: this.lastErrorAt,
|
||||
probedAt: this.healthCheckedAt > 0 ? this.healthCheckedAt : null,
|
||||
details: { binaryVersion: this.binaryVersion },
|
||||
}
|
||||
}
|
||||
|
||||
subscribe(listener: StateListener): Unsubscribe {
|
||||
this.listeners.add(listener)
|
||||
return () => {
|
||||
this.listeners.delete(listener)
|
||||
}
|
||||
}
|
||||
|
||||
getCapabilities(): ReadonlyArray<RuntimeCapability> {
|
||||
return ['reinstall-cli', 'check-auth']
|
||||
}
|
||||
|
||||
// ── Action dispatch ──────────────────────────────────────────────
|
||||
|
||||
async executeAction(
|
||||
action: RuntimeAction,
|
||||
_opts: { onLog?: (msg: string) => void } = {},
|
||||
): Promise<void> {
|
||||
if (!this.getCapabilities().includes(action.type as RuntimeCapability)) {
|
||||
throw new ActionNotSupportedError(
|
||||
this.descriptor.adapterId,
|
||||
action.type,
|
||||
this.getCapabilities(),
|
||||
)
|
||||
}
|
||||
switch (action.type) {
|
||||
case 'reinstall-cli':
|
||||
return this.handleReinstallCli()
|
||||
case 'check-auth':
|
||||
return this.checkAuth()
|
||||
default:
|
||||
throw new ActionNotSupportedError(
|
||||
this.descriptor.adapterId,
|
||||
action.type,
|
||||
this.getCapabilities(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// ── ACP plane integration ────────────────────────────────────────
|
||||
|
||||
buildExecArgv(spec: ExecSpec): string {
|
||||
// Host binary lives on $PATH — no limactl chain. Compose
|
||||
// `env KEY=val ... <argv...>` so adapters that inject env
|
||||
// (AGENT_HOME, CODEX_HOME) get them on the spawned process.
|
||||
const envParts = Object.entries(spec.env ?? {}).map(([k, v]) => `${k}=${v}`)
|
||||
const prefix = envParts.length > 0 ? `env ${envParts.join(' ')} ` : ''
|
||||
return `${prefix}${spec.argv.join(' ')}`
|
||||
}
|
||||
|
||||
// ── Health probe ─────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Probe `<binary> --version` (override via deps.versionProbeArgs).
|
||||
* Cached for `probeCacheMs`. Updates state + binaryVersion +
|
||||
* fires subscribers. Idempotent within the cache window.
|
||||
*/
|
||||
async probeHealth(force = false): Promise<void> {
|
||||
const cacheMs = this.deps.probeCacheMs ?? DEFAULT_PROBE_CACHE_MS
|
||||
const now = Date.now()
|
||||
if (!force && now - this.healthCheckedAt < cacheMs) return
|
||||
// Concurrent callers race past the cache check when the cache is
|
||||
// stale or never stamped (spawn-failure path). Coalesce them onto
|
||||
// the same probe so we never spawn duplicate `--version` processes.
|
||||
if (this.probeInFlight) return this.probeInFlight
|
||||
this.probeInFlight = this.runProbeOnce().finally(() => {
|
||||
this.probeInFlight = null
|
||||
})
|
||||
return this.probeInFlight
|
||||
}
|
||||
|
||||
private async runProbeOnce(): Promise<void> {
|
||||
const argv = this.deps.versionProbeArgs ?? [
|
||||
this.deps.binaryName,
|
||||
'--version',
|
||||
]
|
||||
try {
|
||||
const result = await this.runProbe(argv, DEFAULT_PROBE_TIMEOUT_MS)
|
||||
this.healthCheckedAt = Date.now()
|
||||
if (result.exitCode === 0) {
|
||||
this.binaryVersion = result.stdout.trim() || null
|
||||
this.setState('cli_present')
|
||||
} else {
|
||||
this.binaryVersion = null
|
||||
this.setState(
|
||||
'cli_unhealthy',
|
||||
`${this.deps.binaryName} --version exited ${result.exitCode}: ${result.stderr.trim() || '(no stderr)'}`,
|
||||
)
|
||||
}
|
||||
} catch (err) {
|
||||
// Spawn failure (binary missing, perm denied) leaves the cache
|
||||
// unstamped so the next call re-probes; the inflight promise
|
||||
// above still prevents *concurrent* duplicates.
|
||||
this.binaryVersion = null
|
||||
this.setState(
|
||||
'cli_missing',
|
||||
err instanceof Error ? err.message : String(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// ── Subclass hooks ───────────────────────────────────────────────
|
||||
|
||||
/** Subclass override — claude reads ~/.claude/auth.json, codex
|
||||
* reads <CODEX_HOME>/auth.json, etc. Default is a no-op. */
|
||||
protected async checkAuth(): Promise<void> {
|
||||
return
|
||||
}
|
||||
|
||||
/** Default reinstall-cli handler — throws an informative error
|
||||
* pointing at the upstream docs. Subclasses can override to
|
||||
* trigger an in-app installer. */
|
||||
protected async handleReinstallCli(): Promise<void> {
|
||||
throw new Error(
|
||||
`${this.descriptor.displayName} CLI is not installed. ` +
|
||||
`Install ${this.deps.binaryName} from the upstream docs and probe again.`,
|
||||
)
|
||||
}
|
||||
|
||||
// ── Internals ────────────────────────────────────────────────────
|
||||
|
||||
protected setState(next: RuntimeState, errorMessage?: string): void {
|
||||
if (next === this.state && !errorMessage) return
|
||||
this.state = next
|
||||
if (errorMessage !== undefined) {
|
||||
this.lastError = errorMessage
|
||||
this.lastErrorAt = Date.now()
|
||||
} else if (next === 'cli_present') {
|
||||
this.lastError = null
|
||||
this.lastErrorAt = null
|
||||
}
|
||||
const snapshot = this.getStatusSnapshot()
|
||||
for (const listener of this.listeners) {
|
||||
try {
|
||||
listener(snapshot)
|
||||
} catch (err) {
|
||||
logger.warn('HostProcessAgentRuntime state listener threw', {
|
||||
adapterId: this.descriptor.adapterId,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async runProbe(
|
||||
cmd: ReadonlyArray<string>,
|
||||
timeoutMs: number,
|
||||
): Promise<{ exitCode: number; stdout: string; stderr: string }> {
|
||||
if (this.deps.spawnProbe) return this.deps.spawnProbe(cmd, timeoutMs)
|
||||
const proc = Bun.spawn(cmd as string[], {
|
||||
stdout: 'pipe',
|
||||
stderr: 'pipe',
|
||||
})
|
||||
const timer = setTimeout(() => {
|
||||
try {
|
||||
proc.kill()
|
||||
} catch {
|
||||
// best-effort
|
||||
}
|
||||
}, timeoutMs)
|
||||
const [stdout, stderr, exitCode] = await Promise.all([
|
||||
new Response(proc.stdout).text(),
|
||||
new Response(proc.stderr).text(),
|
||||
proc.exited,
|
||||
])
|
||||
clearTimeout(timer)
|
||||
return { exitCode, stdout, stderr }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,53 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
export type { AgentRuntime } from './agent-runtime'
|
||||
export {
|
||||
ClaudeRuntime,
|
||||
type ClaudeRuntimeConfig,
|
||||
type ConfigureClaudeRuntimeOptions,
|
||||
configureClaudeRuntime,
|
||||
getClaudeRuntime,
|
||||
prepareClaudeCodeContext,
|
||||
} from './claude-host-process-runtime'
|
||||
export {
|
||||
CodexRuntime,
|
||||
type CodexRuntimeConfig,
|
||||
type ConfigureCodexRuntimeOptions,
|
||||
configureCodexRuntime,
|
||||
getCodexRuntime,
|
||||
prepareCodexContext,
|
||||
} from './codex-host-process-runtime'
|
||||
export { ContainerAgentRuntime } from './container-agent-runtime'
|
||||
export { ActionNotSupportedError, RuntimeNotReadyError } from './errors'
|
||||
export {
|
||||
type ConfigureHermesRuntimeOptions,
|
||||
configureHermesRuntime,
|
||||
getHermesRuntime,
|
||||
HermesContainerRuntime,
|
||||
type HermesContainerRuntimeConfig,
|
||||
prepareHermesContext,
|
||||
} from './hermes-container-runtime'
|
||||
export {
|
||||
HostProcessAgentRuntime,
|
||||
type HostProcessAgentRuntimeDeps,
|
||||
} from './host-process-agent-runtime'
|
||||
export {
|
||||
AgentRuntimeRegistry,
|
||||
getAgentRuntimeRegistry,
|
||||
resetAgentRuntimeRegistry,
|
||||
} from './registry'
|
||||
export type {
|
||||
ExecSpec,
|
||||
Platform,
|
||||
RuntimeAction,
|
||||
RuntimeCapability,
|
||||
RuntimeDescriptor,
|
||||
RuntimeState,
|
||||
RuntimeStatusSnapshot,
|
||||
StateListener,
|
||||
Unsubscribe,
|
||||
} from './types'
|
||||
@@ -0,0 +1,43 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import type { AgentRuntime } from './agent-runtime'
|
||||
|
||||
export class AgentRuntimeRegistry {
|
||||
private readonly runtimes = new Map<string, AgentRuntime>()
|
||||
|
||||
register(runtime: AgentRuntime): void {
|
||||
const id = runtime.descriptor.adapterId
|
||||
if (this.runtimes.has(id)) {
|
||||
throw new Error(`Runtime for adapter "${id}" is already registered`)
|
||||
}
|
||||
this.runtimes.set(id, runtime)
|
||||
}
|
||||
|
||||
get(adapterId: string): AgentRuntime | null {
|
||||
return this.runtimes.get(adapterId) ?? null
|
||||
}
|
||||
|
||||
list(): ReadonlyArray<AgentRuntime> {
|
||||
return Array.from(this.runtimes.values())
|
||||
}
|
||||
|
||||
unregister(adapterId: string): boolean {
|
||||
return this.runtimes.delete(adapterId)
|
||||
}
|
||||
}
|
||||
|
||||
let globalRegistry: AgentRuntimeRegistry | null = null
|
||||
|
||||
export function getAgentRuntimeRegistry(): AgentRuntimeRegistry {
|
||||
if (!globalRegistry) globalRegistry = new AgentRuntimeRegistry()
|
||||
return globalRegistry
|
||||
}
|
||||
|
||||
/** Test-only — production code never calls this. */
|
||||
export function resetAgentRuntimeRegistry(): void {
|
||||
globalRegistry = null
|
||||
}
|
||||
@@ -0,0 +1,92 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Shared types for the AgentRuntime layer. Pure types — no behaviour
|
||||
* lives here.
|
||||
*/
|
||||
|
||||
import type { ExecSpec } from '../../container/managed'
|
||||
|
||||
export type Platform = NodeJS.Platform
|
||||
|
||||
export interface RuntimeDescriptor {
|
||||
/** Stable id matching `agent.adapter` for harness lookups. */
|
||||
adapterId: string
|
||||
/** Human-readable label for UI. */
|
||||
displayName: string
|
||||
/** Discriminator for runtime kind. UI components route on this. */
|
||||
kind: 'container' | 'host-process'
|
||||
/** Platforms where this runtime is supported (today: ['darwin']
|
||||
* for container kinds; varies for host kinds). */
|
||||
platforms: ReadonlyArray<Platform>
|
||||
}
|
||||
|
||||
export type RuntimeState =
|
||||
| 'unsupported_platform'
|
||||
| 'errored'
|
||||
// container-only
|
||||
| 'not_installed'
|
||||
| 'installing'
|
||||
| 'installed'
|
||||
| 'starting'
|
||||
| 'running'
|
||||
| 'stopped'
|
||||
// host-only
|
||||
| 'cli_missing'
|
||||
| 'cli_present'
|
||||
| 'cli_unhealthy'
|
||||
|
||||
export interface RuntimeStatusSnapshot {
|
||||
adapterId: string
|
||||
state: RuntimeState
|
||||
/** True iff the harness can spawn turns against this runtime now. */
|
||||
isReady: boolean
|
||||
lastError: string | null
|
||||
lastErrorAt: number | null
|
||||
/** Wall-clock ms when the last definitive readiness probe completed.
|
||||
* Null when the runtime has never been probed. Distinct from
|
||||
* `lastErrorAt` (only set on errors) so consumers can read probe
|
||||
* staleness regardless of health state. */
|
||||
probedAt?: number | null
|
||||
/** Adapter-specific structured fields the UI may render. Keep keys
|
||||
* stable so the UI can opt into them. */
|
||||
details?: Record<string, unknown>
|
||||
}
|
||||
|
||||
export type RuntimeCapability =
|
||||
| 'install'
|
||||
| 'start'
|
||||
| 'stop'
|
||||
| 'restart'
|
||||
| 'reset-soft'
|
||||
| 'reset-wipe-agent'
|
||||
| 'reset-hard'
|
||||
| 'logs'
|
||||
| 'terminal'
|
||||
| 'reinstall-cli'
|
||||
| 'check-auth'
|
||||
| 'gateway-control-plane'
|
||||
| 'agent-crud-via-runtime'
|
||||
|
||||
/**
|
||||
* Discriminated union of every action a runtime can be asked to
|
||||
* perform. Required arguments live on the variant so callers can't
|
||||
* forget them (e.g. `agentId` for `reset-wipe-agent`).
|
||||
*/
|
||||
export type RuntimeAction =
|
||||
| { type: 'install' }
|
||||
| { type: 'start' }
|
||||
| { type: 'stop' }
|
||||
| { type: 'restart' }
|
||||
| { type: 'reset-soft' }
|
||||
| { type: 'reset-wipe-agent'; agentId: string }
|
||||
| { type: 'reset-hard' }
|
||||
| { type: 'reinstall-cli' }
|
||||
| { type: 'check-auth' }
|
||||
|
||||
export type StateListener = (snapshot: RuntimeStatusSnapshot) => void
|
||||
export type Unsubscribe = () => void
|
||||
|
||||
export type { ExecSpec }
|
||||
@@ -27,6 +27,20 @@ export interface AgentHistoryPage {
|
||||
items: AgentHistoryEntry[]
|
||||
}
|
||||
|
||||
/**
|
||||
* One file the harness attributed to the assistant turn that just
|
||||
* finished. Emitted as part of a `produced_files` event before the
|
||||
* terminal `done` so the inline artifact card renders alongside the
|
||||
* streamed text the user just watched complete.
|
||||
*/
|
||||
export interface ProducedFileEventEntry {
|
||||
id: string
|
||||
/** Workspace-relative POSIX path. */
|
||||
path: string
|
||||
size: number
|
||||
mtimeMs: number
|
||||
}
|
||||
|
||||
export type AgentStreamEvent =
|
||||
| {
|
||||
type: 'text_delta'
|
||||
@@ -47,6 +61,10 @@ export type AgentStreamEvent =
|
||||
text: string
|
||||
rawType?: string
|
||||
}
|
||||
| {
|
||||
type: 'produced_files'
|
||||
files: ProducedFileEventEntry[]
|
||||
}
|
||||
| {
|
||||
type: 'done'
|
||||
text?: string
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import { unlinkSync } from 'node:fs'
|
||||
import { mkdir, readdir, rm, stat, writeFile } from 'node:fs/promises'
|
||||
import { readdir, rm, stat, writeFile } from 'node:fs/promises'
|
||||
import { homedir } from 'node:os'
|
||||
import { join } from 'node:path'
|
||||
import { PATHS } from '@browseros/shared/constants/paths'
|
||||
import type { ServerDiscoveryConfig } from '@browseros/shared/types/server-config'
|
||||
import { ensureDirectory } from './ensure-directory'
|
||||
import { logger } from './logger'
|
||||
|
||||
export function getBrowserosDir(): string {
|
||||
@@ -112,12 +113,12 @@ export function removeServerConfigSync(): void {
|
||||
|
||||
export async function ensureBrowserosDir(): Promise<void> {
|
||||
logDevelopmentBrowserosDir()
|
||||
await mkdir(getMemoryDir(), { recursive: true })
|
||||
await mkdir(getSkillsDir(), { recursive: true })
|
||||
await mkdir(getBuiltinSkillsDir(), { recursive: true })
|
||||
await mkdir(getSessionsDir(), { recursive: true })
|
||||
await mkdir(getLazyMonitoringRunsDir(), { recursive: true })
|
||||
await mkdir(getVmDisksDir(), { recursive: true })
|
||||
await ensureDirectory(getMemoryDir())
|
||||
await ensureDirectory(getSkillsDir())
|
||||
await ensureDirectory(getBuiltinSkillsDir())
|
||||
await ensureDirectory(getSessionsDir())
|
||||
await ensureDirectory(getLazyMonitoringRunsDir())
|
||||
await ensureDirectory(getVmDisksDir())
|
||||
}
|
||||
|
||||
export async function cleanOldSessions(): Promise<void> {
|
||||
|
||||
@@ -0,0 +1,68 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
const GEMINI_COMPUTER_USE_MODEL_PATTERN = /computer-use/i
|
||||
|
||||
const GEMINI_COMPUTER_USE_TOOL = {
|
||||
computerUse: {
|
||||
environment: 'ENVIRONMENT_BROWSER',
|
||||
},
|
||||
} as const
|
||||
|
||||
type JsonObject = Record<string, unknown>
|
||||
|
||||
function isJsonObject(value: unknown): value is JsonObject {
|
||||
return value !== null && typeof value === 'object' && !Array.isArray(value)
|
||||
}
|
||||
|
||||
function hasComputerUseTool(tool: unknown): boolean {
|
||||
return isJsonObject(tool) && 'computerUse' in tool
|
||||
}
|
||||
|
||||
export function isGeminiComputerUseModel(modelId: string): boolean {
|
||||
return GEMINI_COMPUTER_USE_MODEL_PATTERN.test(modelId)
|
||||
}
|
||||
|
||||
export function addGeminiComputerUseTool(body: unknown): unknown {
|
||||
if (!isJsonObject(body)) return body
|
||||
|
||||
const existingTools = Array.isArray(body.tools) ? body.tools : []
|
||||
if (existingTools.some(hasComputerUseTool)) return body
|
||||
|
||||
return {
|
||||
...body,
|
||||
tools: [GEMINI_COMPUTER_USE_TOOL, ...existingTools],
|
||||
}
|
||||
}
|
||||
|
||||
function injectComputerUseToolIntoBody(body: BodyInit | null | undefined) {
|
||||
if (typeof body !== 'string') return body
|
||||
|
||||
try {
|
||||
return JSON.stringify(addGeminiComputerUseTool(JSON.parse(body)))
|
||||
} catch {
|
||||
return body
|
||||
}
|
||||
}
|
||||
|
||||
export function createGeminiComputerUseFetch(
|
||||
modelId: string,
|
||||
): typeof globalThis.fetch | undefined {
|
||||
if (!isGeminiComputerUseModel(modelId)) return undefined
|
||||
|
||||
const fetchWithComputerUse = (async (input, init) => {
|
||||
return globalThis.fetch(input, {
|
||||
...init,
|
||||
body: injectComputerUseToolIntoBody(init?.body),
|
||||
})
|
||||
}) as typeof globalThis.fetch
|
||||
|
||||
fetchWithComputerUse.preconnect = globalThis.fetch.preconnect.bind(
|
||||
globalThis.fetch,
|
||||
)
|
||||
|
||||
return fetchWithComputerUse
|
||||
}
|
||||
@@ -21,6 +21,7 @@ import { logger } from '../../logger'
|
||||
import { createOpenRouterCompatibleFetch } from '../../openrouter-fetch'
|
||||
import { createCodexFetch } from '../oauth/codex-fetch'
|
||||
import { createCopilotFetch } from '../oauth/copilot-fetch'
|
||||
import { createGeminiComputerUseFetch } from './gemini-computer-use-fetch'
|
||||
import {
|
||||
createMockBrowserOSLanguageModel,
|
||||
shouldUseMockBrowserOSLLM,
|
||||
@@ -41,7 +42,12 @@ function createOpenAIModel(config: ResolvedLLMConfig): LanguageModel {
|
||||
|
||||
function createGoogleModel(config: ResolvedLLMConfig): LanguageModel {
|
||||
if (!config.apiKey) throw new Error('Google provider requires apiKey')
|
||||
return createGoogleGenerativeAI({ apiKey: config.apiKey })(config.model)
|
||||
const fetch = createGeminiComputerUseFetch(config.model)
|
||||
return createGoogleGenerativeAI({
|
||||
apiKey: config.apiKey,
|
||||
...(config.baseUrl && { baseURL: config.baseUrl }),
|
||||
...(fetch && { fetch }),
|
||||
})(config.model)
|
||||
}
|
||||
|
||||
function createOpenRouterModel(config: ResolvedLLMConfig): LanguageModel {
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
ContainerCliError,
|
||||
ContainerNameInUseError,
|
||||
ContainerNameReleaseTimeoutError,
|
||||
ContainerNotRunningError,
|
||||
} from '../vm/errors'
|
||||
import { LimaCli } from '../vm/lima-cli'
|
||||
import type {
|
||||
@@ -17,6 +18,7 @@ import type {
|
||||
MountSpec,
|
||||
PortMapping,
|
||||
WaitForContainerNameReleaseOptions,
|
||||
WaitForContainerRunningOptions,
|
||||
} from './types'
|
||||
|
||||
export function buildNerdctlCommand(args: string[]): string[] {
|
||||
@@ -117,6 +119,36 @@ export class ContainerCli {
|
||||
throw this.commandError(args, result)
|
||||
}
|
||||
|
||||
/**
|
||||
* Poll `nerdctl container inspect` until the container reports
|
||||
* `running: true`. Used by the managed-container layer between
|
||||
* `nerdctl create + start` and "container is ready for `exec`" so
|
||||
* the harness never spawns a turn against a half-started container.
|
||||
*
|
||||
* Distinct from `waitForContainerNameRelease`, which waits for the
|
||||
* container to disappear after `rm`. Defaults are sized for a
|
||||
* cold-start: 30 s budget at 500 ms cadence (60 polls). Caller can
|
||||
* tighten for tests via `opts`.
|
||||
*/
|
||||
async waitForContainerRunning(
|
||||
name: string,
|
||||
opts: WaitForContainerRunningOptions = {},
|
||||
): Promise<void> {
|
||||
const timeoutMs = opts.timeoutMs ?? 30_000
|
||||
const intervalMs = opts.intervalMs ?? 500
|
||||
const startedAt = Date.now()
|
||||
|
||||
while (Date.now() - startedAt <= timeoutMs) {
|
||||
const info = await this.inspectContainer(name)
|
||||
if (info?.running === true) return
|
||||
const remainingMs = timeoutMs - (Date.now() - startedAt)
|
||||
if (remainingMs <= 0) break
|
||||
await Bun.sleep(Math.min(intervalMs, remainingMs))
|
||||
}
|
||||
|
||||
throw new ContainerNotRunningError(name, timeoutMs)
|
||||
}
|
||||
|
||||
/** Wait for containerd/nerdctl to stop resolving a container name after rm. */
|
||||
async waitForContainerNameRelease(
|
||||
name: string,
|
||||
@@ -235,6 +267,7 @@ function buildCreateArgs(spec: ContainerSpec): string[] {
|
||||
args.push('--health-retries', String(spec.health.retries))
|
||||
}
|
||||
}
|
||||
if (spec.entrypoint) args.push('--entrypoint', spec.entrypoint)
|
||||
|
||||
args.push(spec.image)
|
||||
args.push(...(spec.command ?? []))
|
||||
@@ -285,7 +318,7 @@ function parseContainerInfo(
|
||||
}
|
||||
}
|
||||
|
||||
function isNoSuchContainer(stderr: string): boolean {
|
||||
export function isNoSuchContainer(stderr: string): boolean {
|
||||
const lower = stderr.toLowerCase()
|
||||
return (
|
||||
lower.includes('no such container') || lower.includes('container not found')
|
||||
|
||||
@@ -4,6 +4,10 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import {
|
||||
HERMES_AGENT_NAME,
|
||||
HERMES_IMAGE,
|
||||
} from '@browseros/shared/constants/hermes'
|
||||
import {
|
||||
OPENCLAW_AGENT_NAME,
|
||||
OPENCLAW_IMAGE,
|
||||
@@ -35,10 +39,14 @@ export class ImageLoader {
|
||||
|
||||
/** Resolve BrowserOS agent names to image refs and ensure the image exists. */
|
||||
async ensureAgentImageLoaded(name: string, onLog?: LogFn): Promise<string> {
|
||||
if (name !== OPENCLAW_AGENT_NAME) {
|
||||
throw new ImageLoadError(name, `no agent image mapping: ${name}`)
|
||||
if (name === OPENCLAW_AGENT_NAME) {
|
||||
await this.ensureImageLoaded(OPENCLAW_IMAGE, onLog)
|
||||
return OPENCLAW_IMAGE
|
||||
}
|
||||
await this.ensureImageLoaded(OPENCLAW_IMAGE, onLog)
|
||||
return OPENCLAW_IMAGE
|
||||
if (name === HERMES_AGENT_NAME) {
|
||||
await this.ensureImageLoaded(HERMES_IMAGE, onLog)
|
||||
return HERMES_IMAGE
|
||||
}
|
||||
throw new ImageLoadError(name, `no agent image mapping: ${name}`)
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user