mirror of
https://github.com/browseros-ai/BrowserOS.git
synced 2026-05-13 23:53:25 +00:00
Compare commits
21 Commits
exp/click_
...
feat/local
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1e8f1f2845 | ||
|
|
191da98714 | ||
|
|
c6fac277ee | ||
|
|
329e8b24a2 | ||
|
|
bbc4844599 | ||
|
|
70b91c0ea1 | ||
|
|
1db4d19f4b | ||
|
|
d7a83b9df2 | ||
|
|
5766b5eb7f | ||
|
|
c4ee678b00 | ||
|
|
9b27e6aece | ||
|
|
9e0252fe1c | ||
|
|
acf0ea028d | ||
|
|
88d3d9dbad | ||
|
|
31ca756aec | ||
|
|
e045e34b73 | ||
|
|
01d649da9a | ||
|
|
ddbb2cf492 | ||
|
|
711934555d | ||
|
|
5125dffbf3 | ||
|
|
0035893f33 |
10
.github/workflows/eval-weekly.yml
vendored
10
.github/workflows/eval-weekly.yml
vendored
@@ -43,6 +43,12 @@ jobs:
|
||||
working-directory: packages/browseros-agent
|
||||
run: bun install --ignore-scripts && bun run build:agent-sdk
|
||||
|
||||
- name: Install Python eval dependencies
|
||||
run: pip install agisdk requests
|
||||
|
||||
- name: Clone WebArena-Infinity
|
||||
run: git clone --depth 1 https://github.com/web-arena-x/webarena-infinity.git /tmp/webarena-infinity
|
||||
|
||||
- name: Install xvfb
|
||||
run: sudo apt-get update && sudo apt-get install -y xvfb
|
||||
|
||||
@@ -57,9 +63,11 @@ jobs:
|
||||
working-directory: packages/browseros-agent/apps/eval
|
||||
env:
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
|
||||
CLAUDE_CODE_OAUTH_TOKEN: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
||||
NOPECHA_API_KEY: ${{ secrets.NOPECHA_API_KEY }}
|
||||
BROWSEROS_BINARY: /usr/bin/browseros
|
||||
WEBARENA_INFINITY_DIR: /tmp/webarena-infinity
|
||||
EVAL_CONFIG: ${{ github.event.inputs.config || 'configs/browseros-agent-weekly.json' }}
|
||||
run: |
|
||||
echo "Running eval with config: $EVAL_CONFIG"
|
||||
@@ -81,6 +89,8 @@ jobs:
|
||||
|
||||
- name: Generate trend report
|
||||
if: success()
|
||||
timeout-minutes: 5
|
||||
continue-on-error: true
|
||||
working-directory: packages/browseros-agent
|
||||
env:
|
||||
EVAL_R2_ACCOUNT_ID: ${{ secrets.EVAL_R2_ACCOUNT_ID }}
|
||||
|
||||
2
packages/browseros-agent/.gitignore
vendored
2
packages/browseros-agent/.gitignore
vendored
@@ -180,6 +180,8 @@ packages/*/dist
|
||||
browseros-server
|
||||
browseros-server.exe
|
||||
browseros-server-*
|
||||
tools/alpha/balpha
|
||||
tools/alpha/browseros-alpha
|
||||
tools/dev/browseros-dev
|
||||
|
||||
log.txt
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { Bot } from 'lucide-react'
|
||||
import { Bot, Loader2, Wrench } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
import type { AgentCardData } from '@/lib/agent-conversations/types'
|
||||
import { cn } from '@/lib/utils'
|
||||
@@ -32,6 +32,11 @@ function getStatusTone(status: AgentCardData['status']): string {
|
||||
return 'bg-emerald-500'
|
||||
}
|
||||
|
||||
function formatCost(usd: number): string {
|
||||
if (usd < 0.005) return `$${usd.toFixed(4)}`
|
||||
return `$${usd.toFixed(2)}`
|
||||
}
|
||||
|
||||
export const AgentCardExpanded: FC<AgentCardProps> = ({
|
||||
agent,
|
||||
onClick,
|
||||
@@ -81,9 +86,26 @@ export const AgentCardExpanded: FC<AgentCardProps> = ({
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div className="mt-4 flex items-center justify-between gap-3 text-muted-foreground text-xs">
|
||||
<span>{formatTimestamp(agent.lastMessageTimestamp)}</span>
|
||||
<span>Open conversation</span>
|
||||
<div className="mt-4 space-y-1.5 text-muted-foreground text-xs">
|
||||
<div className="flex items-center justify-between gap-3">
|
||||
<span>{formatTimestamp(agent.lastMessageTimestamp)}</span>
|
||||
{agent.costUsd ? (
|
||||
<span className="tabular-nums opacity-70">
|
||||
{formatCost(agent.costUsd)}
|
||||
</span>
|
||||
) : null}
|
||||
</div>
|
||||
{agent.status === 'working' && agent.currentTool ? (
|
||||
<div className="flex items-center gap-1.5 text-[var(--accent-orange)]/70">
|
||||
<Loader2 className="size-3 shrink-0 animate-spin" />
|
||||
<span className="truncate">{agent.currentTool}</span>
|
||||
</div>
|
||||
) : agent.activitySummary ? (
|
||||
<div className="flex items-center gap-1.5 text-muted-foreground/60">
|
||||
<Wrench className="size-3 shrink-0" />
|
||||
<span className="truncate">{agent.activitySummary}</span>
|
||||
</div>
|
||||
) : null}
|
||||
</div>
|
||||
</button>
|
||||
)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { useQueryClient } from '@tanstack/react-query'
|
||||
import { ArrowLeft, Bot, Home } from 'lucide-react'
|
||||
import { type FC, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { Navigate, useNavigate, useParams, useSearchParams } from 'react-router'
|
||||
@@ -16,11 +15,8 @@ import {
|
||||
flattenHistoryPages,
|
||||
} from './claw-chat-types'
|
||||
import { useAgentConversation } from './useAgentConversation'
|
||||
import {
|
||||
CLAW_CHAT_QUERY_KEYS,
|
||||
useClawAgentSession,
|
||||
useClawChatHistory,
|
||||
} from './useClawChatHistory'
|
||||
import { useClawChatHistory } from './useClawChatHistory'
|
||||
import { useOutboundQueue } from './useOutboundQueue'
|
||||
|
||||
function StatusBadge({ status }: { status: string }) {
|
||||
return (
|
||||
@@ -196,20 +192,18 @@ function AgentConversationController({
|
||||
agentPathPrefix: string
|
||||
createAgentPath: string
|
||||
}) {
|
||||
const queryClient = useQueryClient()
|
||||
const navigate = useNavigate()
|
||||
const initialMessageSentRef = useRef<string | null>(null)
|
||||
const onInitialMessageConsumedRef = useRef(onInitialMessageConsumed)
|
||||
const [streamSessionKey, setStreamSessionKey] = useState<string | null>(null)
|
||||
const agent = agents.find((entry) => entry.agentId === agentId)
|
||||
const agentName = agent?.name || agentId || 'Agent'
|
||||
const sessionQuery = useClawAgentSession(agentId)
|
||||
const resolvedSessionKey =
|
||||
streamSessionKey ?? sessionQuery.data?.sessionKey ?? null
|
||||
// Single source of truth: the history endpoint resolves the session itself
|
||||
// when sessionKey is null. Once a chat creates a new session, streamSessionKey
|
||||
// overrides it and the history queryKey rotates to refetch for that session.
|
||||
const historyQuery = useClawChatHistory({
|
||||
agentId,
|
||||
sessionKey: resolvedSessionKey,
|
||||
enabled: Boolean(resolvedSessionKey),
|
||||
sessionKey: streamSessionKey,
|
||||
})
|
||||
|
||||
const historyMessages = useMemo(
|
||||
@@ -220,31 +214,61 @@ function AgentConversationController({
|
||||
() => buildChatHistoryFromClawMessages(historyMessages),
|
||||
[historyMessages],
|
||||
)
|
||||
const resolvedSessionKey =
|
||||
streamSessionKey ?? historyQuery.data?.pages?.[0]?.sessionKey ?? null
|
||||
|
||||
const { turns, streaming, send } = useAgentConversation(agentId, {
|
||||
const { turns, streaming } = useAgentConversation(agentId, {
|
||||
sessionKey: resolvedSessionKey,
|
||||
history: chatHistory,
|
||||
onSessionKeyChange: (sessionKey) => {
|
||||
setStreamSessionKey(sessionKey)
|
||||
void queryClient.invalidateQueries({
|
||||
queryKey: [CLAW_CHAT_QUERY_KEYS.session],
|
||||
})
|
||||
},
|
||||
})
|
||||
const sendRef = useRef(send)
|
||||
sendRef.current = send
|
||||
const outboundQueue = useOutboundQueue({
|
||||
agentId,
|
||||
sessionKey: resolvedSessionKey,
|
||||
})
|
||||
onInitialMessageConsumedRef.current = onInitialMessageConsumed
|
||||
|
||||
// Refetch history whenever a server-dispatched queue item completes.
|
||||
// The server worker streams the queued turn into OpenClaw directly, so
|
||||
// the client never observes the live tokens — we only see the new
|
||||
// assistant turn once the JSONL is updated. Watching the queue for
|
||||
// any 'sending' item dropping out is the cleanest "turn finalized"
|
||||
// signal we have without exposing per-turn SSE.
|
||||
const previousSendingIdsRef = useRef<Set<string>>(new Set())
|
||||
useEffect(() => {
|
||||
const currentSending = new Set(
|
||||
outboundQueue.queue
|
||||
.filter((item) => item.status === 'sending')
|
||||
.map((item) => item.id),
|
||||
)
|
||||
const dropped = [...previousSendingIdsRef.current].filter(
|
||||
(id) => !currentSending.has(id),
|
||||
)
|
||||
previousSendingIdsRef.current = currentSending
|
||||
if (dropped.length > 0) {
|
||||
void historyQuery.refetch()
|
||||
}
|
||||
}, [outboundQueue.queue, historyQuery])
|
||||
|
||||
const disabled = status?.status !== 'running'
|
||||
// Two-part gate: cover both "still fetching" AND "just got enabled but
|
||||
// hasn't started fetching yet". When `enabled` flips true (baseUrl
|
||||
// resolves), there's a render frame where React Query reports
|
||||
// isLoading=false but hasn't run the queryFn yet — `isFetched` is still
|
||||
// false. Without this we render EmptyState during that one frame.
|
||||
const isInitialLoading =
|
||||
sessionQuery.isLoading ||
|
||||
(Boolean(resolvedSessionKey) && historyQuery.isLoading)
|
||||
const historyReady =
|
||||
!resolvedSessionKey || historyQuery.isFetched || historyQuery.isError
|
||||
historyQuery.isLoading || (!historyQuery.isFetched && !historyQuery.isError)
|
||||
|
||||
const historyReady = historyQuery.isFetched || historyQuery.isError
|
||||
const initialMessageKey = initialMessage
|
||||
? `${agentId}:${initialMessage}`
|
||||
: null
|
||||
const error = sessionQuery.error ?? historyQuery.error ?? null
|
||||
const error = historyQuery.error ?? null
|
||||
|
||||
const enqueueRef = useRef(outboundQueue.enqueue)
|
||||
enqueueRef.current = outboundQueue.enqueue
|
||||
|
||||
useEffect(() => {
|
||||
const query = initialMessage?.trim()
|
||||
@@ -253,28 +277,24 @@ function AgentConversationController({
|
||||
return
|
||||
}
|
||||
|
||||
// The initial-message handoff (home composer → conversation page via
|
||||
// ?q=) goes through the outbound queue too, so it inherits the same
|
||||
// single-flight serialization. We no longer need to gate on
|
||||
// `streaming` — the queue worker drains as soon as the agent is
|
||||
// free.
|
||||
if (
|
||||
!query ||
|
||||
initialMessageSentRef.current === initialMessageKey ||
|
||||
disabled ||
|
||||
sessionQuery.isLoading ||
|
||||
!historyReady ||
|
||||
streaming
|
||||
!historyReady
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
initialMessageSentRef.current = initialMessageKey
|
||||
onInitialMessageConsumedRef.current()
|
||||
void sendRef.current(query)
|
||||
}, [
|
||||
disabled,
|
||||
historyReady,
|
||||
initialMessage,
|
||||
initialMessageKey,
|
||||
sessionQuery.isLoading,
|
||||
streaming,
|
||||
])
|
||||
enqueueRef.current({ text: query })
|
||||
}, [disabled, historyReady, initialMessage, initialMessageKey])
|
||||
|
||||
const handleSelectAgent = (entry: AgentEntry) => {
|
||||
navigate(`${agentPathPrefix}/${entry.agentId}`)
|
||||
@@ -295,7 +315,6 @@ function AgentConversationController({
|
||||
void historyQuery.fetchNextPage()
|
||||
}}
|
||||
onRetry={() => {
|
||||
void sessionQuery.refetch()
|
||||
void historyQuery.refetch()
|
||||
}}
|
||||
/>
|
||||
@@ -307,14 +326,28 @@ function AgentConversationController({
|
||||
agents={agents}
|
||||
selectedAgentId={agentId}
|
||||
onSelectAgent={handleSelectAgent}
|
||||
onSend={(text) => {
|
||||
void send(text)
|
||||
onSend={(input) => {
|
||||
outboundQueue.enqueue({
|
||||
text: input.text,
|
||||
attachments: input.attachments.map((a) => a.payload),
|
||||
attachmentPreviews: input.attachments.map((a) => ({
|
||||
id: a.id,
|
||||
kind: a.kind,
|
||||
mediaType: a.mediaType,
|
||||
name: a.name,
|
||||
dataUrl: a.dataUrl,
|
||||
})),
|
||||
history: chatHistory,
|
||||
})
|
||||
}}
|
||||
onCreateAgent={() => navigate(createAgentPath)}
|
||||
streaming={streaming}
|
||||
disabled={disabled}
|
||||
status={status?.status}
|
||||
placeholder={`Message ${agentName}...`}
|
||||
outboundQueue={outboundQueue.queue}
|
||||
onCancelQueued={outboundQueue.cancel}
|
||||
onRetryQueued={outboundQueue.retry}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -8,10 +8,12 @@ import type { AgentEntry } from '@/entrypoints/app/agents/useOpenClaw'
|
||||
import { ImportDataHint } from '@/entrypoints/newtab/index/ImportDataHint'
|
||||
import { SignInHint } from '@/entrypoints/newtab/index/SignInHint'
|
||||
import { useActiveHint } from '@/entrypoints/newtab/index/useActiveHint'
|
||||
import type { AgentCardData } from '@/lib/agent-conversations/types'
|
||||
import { AgentCardDock } from './AgentCardDock'
|
||||
import { useAgentCommandData } from './agent-command-layout'
|
||||
import { ConversationInput } from './ConversationInput'
|
||||
import { useAgentCardData } from './useAgentCardData'
|
||||
import { buildAgentCardData } from './useAgentCardData'
|
||||
import { useAgentDashboard } from './useAgentDashboard'
|
||||
|
||||
function AgentCommandSetupState({
|
||||
onOpenAgents,
|
||||
@@ -95,7 +97,7 @@ function RecentThreads({
|
||||
onSelectAgent,
|
||||
}: {
|
||||
activeAgentId?: string | null
|
||||
agents: ReturnType<typeof useAgentCardData>
|
||||
agents: AgentCardData[]
|
||||
onOpenAgents: () => void
|
||||
onSelectAgent: (agentId: string) => void
|
||||
}) {
|
||||
@@ -134,7 +136,8 @@ export const AgentCommandHome: FC = () => {
|
||||
const activeHint = useActiveHint()
|
||||
const { status, agents } = useAgentCommandData()
|
||||
const [selectedAgentId, setSelectedAgentId] = useState<string | null>(null)
|
||||
const cardData = useAgentCardData(agents, status?.status)
|
||||
const { data: dashboard } = useAgentDashboard(status?.status === 'running')
|
||||
const cardData = buildAgentCardData(agents, status?.status, dashboard?.agents)
|
||||
|
||||
useEffect(() => {
|
||||
if (agents.length === 0) {
|
||||
@@ -152,9 +155,16 @@ export const AgentCommandHome: FC = () => {
|
||||
}
|
||||
}, [agents, selectedAgentId])
|
||||
|
||||
const handleSend = (text: string) => {
|
||||
const handleSend = (input: { text: string }) => {
|
||||
if (!selectedAgentId) return
|
||||
navigate(`/home/agents/${selectedAgentId}?q=${encodeURIComponent(text)}`)
|
||||
// Home composer navigates to the conversation page with the prompt in
|
||||
// the query string. Attachments are dropped at this boundary in v1 —
|
||||
// the conversation page (where staging UX is most useful anyway) is
|
||||
// where users can attach. A future iteration can stash staged files
|
||||
// in chrome.storage.session and replay them on first mount there.
|
||||
navigate(
|
||||
`/home/agents/${selectedAgentId}?q=${encodeURIComponent(input.text)}`,
|
||||
)
|
||||
}
|
||||
|
||||
const handleSelectAgent = (agent: AgentEntry) => {
|
||||
|
||||
@@ -1,36 +1,155 @@
|
||||
import { CheckCircle2, Loader2, XCircle } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
import { CheckCircle2, Copy, Loader2, Wrench, XCircle } from 'lucide-react'
|
||||
import { type FC, useCallback, useMemo } from 'react'
|
||||
import {
|
||||
Message,
|
||||
MessageAction,
|
||||
MessageActions,
|
||||
MessageAttachment,
|
||||
MessageAttachments,
|
||||
MessageContent,
|
||||
MessageResponse,
|
||||
MessageToolbar,
|
||||
} from '@/components/ai-elements/message'
|
||||
import {
|
||||
Reasoning,
|
||||
ReasoningContent,
|
||||
ReasoningTrigger,
|
||||
} from '@/components/ai-elements/reasoning'
|
||||
import {
|
||||
Task,
|
||||
TaskContent,
|
||||
TaskItem,
|
||||
TaskTrigger,
|
||||
} from '@/components/ai-elements/task'
|
||||
import { cn } from '@/lib/utils'
|
||||
import type { ClawChatMessage as ClawChatMessageType } from './claw-chat-types'
|
||||
import type {
|
||||
ClawChatMessagePart,
|
||||
ClawChatMessage as ClawChatMessageType,
|
||||
} from './claw-chat-types'
|
||||
|
||||
function formatCost(usd: number): string {
|
||||
if (usd < 0.005) return `$${usd.toFixed(4)}`
|
||||
return `$${usd.toFixed(2)}`
|
||||
}
|
||||
|
||||
type ToolCallPart = Extract<ClawChatMessagePart, { type: 'tool-call' }>
|
||||
type AttachmentPart = Extract<ClawChatMessagePart, { type: 'attachment' }>
|
||||
|
||||
interface RenderEntry {
|
||||
kind: 'text' | 'reasoning' | 'meta' | 'task' | 'attachments'
|
||||
partIndex: number
|
||||
part?: ClawChatMessagePart
|
||||
tools?: ToolCallPart[]
|
||||
attachments?: AttachmentPart[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a render plan that groups all tool-call parts into a single Task
|
||||
* collapsible and all attachment parts into a single attachment strip at
|
||||
* their respective first-appearance positions. Other parts render in place.
|
||||
*/
|
||||
function buildRenderEntries(parts: ClawChatMessagePart[]): RenderEntry[] {
|
||||
const entries: RenderEntry[] = []
|
||||
const tools: ToolCallPart[] = []
|
||||
const attachments: AttachmentPart[] = []
|
||||
let taskInserted = false
|
||||
let attachmentsInserted = false
|
||||
|
||||
parts.forEach((part, partIndex) => {
|
||||
if (part.type === 'tool-call') {
|
||||
tools.push(part)
|
||||
if (!taskInserted) {
|
||||
entries.push({ kind: 'task', partIndex, tools })
|
||||
taskInserted = true
|
||||
}
|
||||
} else if (part.type === 'attachment') {
|
||||
attachments.push(part)
|
||||
if (!attachmentsInserted) {
|
||||
entries.push({ kind: 'attachments', partIndex, attachments })
|
||||
attachmentsInserted = true
|
||||
}
|
||||
} else if (part.type === 'text') {
|
||||
entries.push({ kind: 'text', partIndex, part })
|
||||
} else if (part.type === 'reasoning') {
|
||||
entries.push({ kind: 'reasoning', partIndex, part })
|
||||
} else if (part.type === 'meta') {
|
||||
entries.push({ kind: 'meta', partIndex, part })
|
||||
}
|
||||
})
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
function ToolStatusIcon({ status }: { status: ToolCallPart['status'] }) {
|
||||
if (status === 'running' || status === 'pending') {
|
||||
return (
|
||||
<Loader2 className="size-3.5 shrink-0 animate-spin text-muted-foreground" />
|
||||
)
|
||||
}
|
||||
if (status === 'completed') {
|
||||
return <CheckCircle2 className="size-3.5 shrink-0 text-green-500" />
|
||||
}
|
||||
return <XCircle className="size-3.5 shrink-0 text-destructive" />
|
||||
}
|
||||
|
||||
interface ClawChatMessageProps {
|
||||
message: ClawChatMessageType
|
||||
}
|
||||
|
||||
export const ClawChatMessage: FC<ClawChatMessageProps> = ({ message }) => (
|
||||
<Message
|
||||
from={message.role}
|
||||
className="max-w-full group-[.is-user]:max-w-[80%]"
|
||||
>
|
||||
<MessageContent className="max-w-full overflow-hidden group-[.is-assistant]:w-full group-[.is-user]:max-w-full">
|
||||
{message.parts.map((part, index) => {
|
||||
const key = `${message.id}-part-${index}`
|
||||
export const ClawChatMessage: FC<ClawChatMessageProps> = ({ message }) => {
|
||||
const messageText = message.parts
|
||||
.filter((p) => p.type === 'text')
|
||||
.map((p) => p.text)
|
||||
.join('\n')
|
||||
|
||||
switch (part.type) {
|
||||
case 'text':
|
||||
const handleCopy = useCallback(() => {
|
||||
if (messageText) navigator.clipboard.writeText(messageText)
|
||||
}, [messageText])
|
||||
|
||||
const entries = useMemo(
|
||||
() => buildRenderEntries(message.parts),
|
||||
[message.parts],
|
||||
)
|
||||
|
||||
return (
|
||||
<Message
|
||||
from={message.role}
|
||||
className="max-w-full group-[.is-user]:max-w-[80%]"
|
||||
>
|
||||
<MessageContent className="max-w-full overflow-hidden group-[.is-assistant]:w-full group-[.is-user]:max-w-full">
|
||||
{entries.map((entry) => {
|
||||
const key = `${message.id}-entry-${entry.partIndex}`
|
||||
|
||||
if (entry.kind === 'attachments' && entry.attachments) {
|
||||
return (
|
||||
<MessageAttachments key={key}>
|
||||
{entry.attachments.map((attachment, idx) => (
|
||||
<MessageAttachment
|
||||
// biome-ignore lint/suspicious/noArrayIndexKey: attachment order is stable within a finalized message
|
||||
key={`${attachment.kind}-${idx}`}
|
||||
data={{
|
||||
type: 'file',
|
||||
url: attachment.dataUrl ?? '',
|
||||
mediaType: attachment.mediaType,
|
||||
filename: attachment.name,
|
||||
}}
|
||||
/>
|
||||
))}
|
||||
</MessageAttachments>
|
||||
)
|
||||
}
|
||||
|
||||
if (entry.kind === 'text' && entry.part?.type === 'text') {
|
||||
return (
|
||||
<MessageResponse
|
||||
key={key}
|
||||
// Historical messages are finalized — render immediately.
|
||||
// Streamdown's default "streaming" mode uses an idle-callback
|
||||
// debounce (300ms / 500ms idle) that paints empty content
|
||||
// first, which made history flash blank tool collapsibles
|
||||
// before text on every load.
|
||||
mode="static"
|
||||
parseIncompleteMarkdown={false}
|
||||
className={cn(
|
||||
'max-w-full overflow-hidden break-words',
|
||||
'[&_[data-streamdown="code-block"]]:!w-full [&_[data-streamdown="code-block"]]:!max-w-full [&_[data-streamdown="code-block"]]:overflow-x-auto',
|
||||
@@ -38,53 +157,92 @@ export const ClawChatMessage: FC<ClawChatMessageProps> = ({ message }) => (
|
||||
'[&_table]:w-max [&_table]:min-w-full',
|
||||
)}
|
||||
>
|
||||
{part.text}
|
||||
{entry.part.text}
|
||||
</MessageResponse>
|
||||
)
|
||||
}
|
||||
|
||||
case 'reasoning':
|
||||
if (entry.kind === 'reasoning' && entry.part?.type === 'reasoning') {
|
||||
return (
|
||||
<Reasoning key={key} className="w-full" defaultOpen={false}>
|
||||
<Reasoning
|
||||
key={key}
|
||||
className="w-full"
|
||||
defaultOpen={false}
|
||||
duration={entry.part.duration}
|
||||
>
|
||||
<ReasoningTrigger />
|
||||
<ReasoningContent>{part.text}</ReasoningContent>
|
||||
<ReasoningContent>{entry.part.text}</ReasoningContent>
|
||||
</Reasoning>
|
||||
)
|
||||
}
|
||||
|
||||
case 'tool-call':
|
||||
return (
|
||||
<div
|
||||
key={key}
|
||||
className="flex items-center gap-2 rounded-md border px-3 py-2 text-sm"
|
||||
>
|
||||
{part.status === 'running' || part.status === 'pending' ? (
|
||||
<Loader2 className="size-3.5 animate-spin text-muted-foreground" />
|
||||
) : null}
|
||||
{part.status === 'completed' ? (
|
||||
<CheckCircle2 className="size-3.5 text-green-500" />
|
||||
) : null}
|
||||
{part.status === 'failed' ? (
|
||||
<XCircle className="size-3.5 text-destructive" />
|
||||
) : null}
|
||||
<span className="font-mono text-xs">{part.name}</span>
|
||||
{part.error ? (
|
||||
<span className="ml-auto text-destructive text-xs">
|
||||
{part.error}
|
||||
</span>
|
||||
) : null}
|
||||
</div>
|
||||
)
|
||||
|
||||
case 'meta':
|
||||
if (entry.kind === 'meta' && entry.part?.type === 'meta') {
|
||||
return (
|
||||
<div key={key} className="text-muted-foreground text-xs">
|
||||
{part.label}: {part.value}
|
||||
{entry.part.label}: {entry.part.value}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
default:
|
||||
return null
|
||||
}
|
||||
})}
|
||||
</MessageContent>
|
||||
</Message>
|
||||
)
|
||||
if (entry.kind === 'task' && entry.tools) {
|
||||
const tools = entry.tools
|
||||
const errorCount = tools.filter((t) => t.status === 'failed').length
|
||||
const taskTitle = `Agent activity (${tools.length} ${tools.length === 1 ? 'action' : 'actions'}${errorCount > 0 ? `, ${errorCount} failed` : ''})`
|
||||
|
||||
return (
|
||||
<Task key={key} defaultOpen={false}>
|
||||
<TaskTrigger title={taskTitle} TriggerIcon={Wrench} />
|
||||
<TaskContent>
|
||||
{tools.map((tool, idx) => (
|
||||
<TaskItem
|
||||
// biome-ignore lint/suspicious/noArrayIndexKey: tool order is stable within a finalized historical message
|
||||
key={`${tool.name}-${tool.status}-${idx}`}
|
||||
className="flex items-center gap-2"
|
||||
>
|
||||
<ToolStatusIcon status={tool.status} />
|
||||
<span className="text-foreground text-xs">
|
||||
{tool.label}
|
||||
</span>
|
||||
{tool.subject ? (
|
||||
<span className="ml-1.5 truncate text-muted-foreground/70 text-xs">
|
||||
· {tool.subject}
|
||||
</span>
|
||||
) : null}
|
||||
{tool.error ? (
|
||||
<span className="ml-2 truncate text-destructive text-xs">
|
||||
{tool.error}
|
||||
</span>
|
||||
) : null}
|
||||
{tool.durationMs != null ? (
|
||||
<span className="ml-auto text-muted-foreground/60 text-xs tabular-nums">
|
||||
{(tool.durationMs / 1000).toFixed(1)}s
|
||||
</span>
|
||||
) : null}
|
||||
</TaskItem>
|
||||
))}
|
||||
</TaskContent>
|
||||
</Task>
|
||||
)
|
||||
}
|
||||
|
||||
return null
|
||||
})}
|
||||
|
||||
{message.role === 'assistant' && messageText ? (
|
||||
<MessageToolbar>
|
||||
<MessageActions>
|
||||
<MessageAction tooltip="Copy" onClick={handleCopy}>
|
||||
<Copy className="size-3.5" />
|
||||
</MessageAction>
|
||||
</MessageActions>
|
||||
{message.costUsd ? (
|
||||
<span className="text-[11px] text-muted-foreground/50 tabular-nums">
|
||||
{formatCost(message.costUsd)}
|
||||
</span>
|
||||
) : null}
|
||||
</MessageToolbar>
|
||||
) : null}
|
||||
</MessageContent>
|
||||
</Message>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,14 +1,20 @@
|
||||
import {
|
||||
AlertTriangle,
|
||||
ArrowRight,
|
||||
Bot,
|
||||
ChevronDown,
|
||||
FileText,
|
||||
Folder,
|
||||
Layers,
|
||||
Loader2,
|
||||
Mic,
|
||||
Paperclip,
|
||||
RefreshCw,
|
||||
Square,
|
||||
X,
|
||||
} from 'lucide-react'
|
||||
import {
|
||||
type DragEvent,
|
||||
type FC,
|
||||
type ReactNode,
|
||||
useEffect,
|
||||
@@ -24,6 +30,7 @@ import { Textarea } from '@/components/ui/textarea'
|
||||
import type { AgentEntry } from '@/entrypoints/app/agents/useOpenClaw'
|
||||
import { McpServerIcon } from '@/entrypoints/app/connect-mcp/McpServerIcon'
|
||||
import { useGetUserMCPIntegrations } from '@/entrypoints/app/connect-mcp/useGetUserMCPIntegrations'
|
||||
import { type StagedAttachment, stageAttachments } from '@/lib/attachments'
|
||||
import { Feature } from '@/lib/browseros/capabilities'
|
||||
import { useCapabilities } from '@/lib/browseros/useCapabilities'
|
||||
import { useMcpServers } from '@/lib/mcp/mcpServerStorage'
|
||||
@@ -31,18 +38,33 @@ import { cn } from '@/lib/utils'
|
||||
import { useVoiceInput } from '@/lib/voice/useVoiceInput'
|
||||
import { useWorkspace } from '@/lib/workspace/use-workspace'
|
||||
import { AgentSelector } from './AgentSelector'
|
||||
import type { OutboundMessage } from './useOutboundQueue'
|
||||
|
||||
export interface ConversationInputSendInput {
|
||||
text: string
|
||||
attachments: StagedAttachment[]
|
||||
}
|
||||
|
||||
interface ConversationInputProps {
|
||||
agents: AgentEntry[]
|
||||
selectedAgentId: string | null
|
||||
onSelectAgent: (agent: AgentEntry) => void
|
||||
onSend: (text: string) => void
|
||||
onSend: (input: ConversationInputSendInput) => void
|
||||
onCreateAgent?: () => void
|
||||
streaming: boolean
|
||||
disabled?: boolean
|
||||
status?: string
|
||||
placeholder?: string
|
||||
variant?: 'home' | 'conversation'
|
||||
// Outbound queue: when present, the composer renders the queue strip
|
||||
// above the textarea and lets the user keep sending while a previous
|
||||
// turn is in flight. Optional so non-conversation variants (the home
|
||||
// page) can opt out — the queue only makes sense in the conversation
|
||||
// page where each enqueued message will eventually be delivered to the
|
||||
// active agent.
|
||||
outboundQueue?: OutboundMessage[]
|
||||
onCancelQueued?: (id: string) => void
|
||||
onRetryQueued?: (id: string) => void
|
||||
}
|
||||
|
||||
function InputActionButton({
|
||||
@@ -131,6 +153,8 @@ function ContextControls({
|
||||
onToggleTab,
|
||||
showAgentSelector,
|
||||
status,
|
||||
onAttachClick,
|
||||
attachDisabled,
|
||||
}: {
|
||||
agents: AgentEntry[]
|
||||
onCreateAgent?: () => void
|
||||
@@ -140,6 +164,8 @@ function ContextControls({
|
||||
onToggleTab: (tab: chrome.tabs.Tab) => void
|
||||
showAgentSelector: boolean
|
||||
status?: string
|
||||
onAttachClick: () => void
|
||||
attachDisabled: boolean
|
||||
}) {
|
||||
const { supports } = useCapabilities()
|
||||
const { selectedFolder } = useWorkspace()
|
||||
@@ -199,6 +225,20 @@ function ContextControls({
|
||||
<span>Tabs</span>
|
||||
</Button>
|
||||
</TabPickerPopover>
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
onClick={onAttachClick}
|
||||
disabled={attachDisabled}
|
||||
title="Attach files"
|
||||
className={cn(
|
||||
'flex items-center gap-2 rounded-lg px-3 py-1.5 font-medium text-sm transition-all',
|
||||
'bg-transparent text-muted-foreground hover:bg-accent hover:text-accent-foreground',
|
||||
)}
|
||||
>
|
||||
<Paperclip className="h-4 w-4" />
|
||||
<span>Attach</span>
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{supports(Feature.MANAGED_MCP_SUPPORT) ? (
|
||||
@@ -267,10 +307,18 @@ export const ConversationInput: FC<ConversationInputProps> = ({
|
||||
status,
|
||||
placeholder,
|
||||
variant = 'conversation',
|
||||
outboundQueue,
|
||||
onCancelQueued,
|
||||
onRetryQueued,
|
||||
}) => {
|
||||
const [input, setInput] = useState('')
|
||||
const [selectedTabs, setSelectedTabs] = useState<chrome.tabs.Tab[]>([])
|
||||
const [isExpandedDraft, setIsExpandedDraft] = useState(false)
|
||||
const [attachments, setAttachments] = useState<StagedAttachment[]>([])
|
||||
const [attachmentError, setAttachmentError] = useState<string | null>(null)
|
||||
const [isStaging, setIsStaging] = useState(false)
|
||||
const [isDragOver, setIsDragOver] = useState(false)
|
||||
const fileInputRef = useRef<HTMLInputElement>(null)
|
||||
const voice = useVoiceInput()
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null)
|
||||
const selectedAgent = agents.find(
|
||||
@@ -278,6 +326,28 @@ export const ConversationInput: FC<ConversationInputProps> = ({
|
||||
)
|
||||
const isConversation = variant === 'conversation'
|
||||
|
||||
const stageFiles = async (files: File[]) => {
|
||||
if (files.length === 0) return
|
||||
setIsStaging(true)
|
||||
setAttachmentError(null)
|
||||
try {
|
||||
const result = await stageAttachments(files, attachments.length)
|
||||
if (result.staged.length > 0) {
|
||||
setAttachments((prev) => [...prev, ...result.staged])
|
||||
}
|
||||
if (result.errors.length > 0) {
|
||||
setAttachmentError(result.errors.map((e) => e.message).join(' \u2022 '))
|
||||
}
|
||||
} finally {
|
||||
setIsStaging(false)
|
||||
}
|
||||
}
|
||||
|
||||
const removeAttachment = (id: string) => {
|
||||
setAttachments((prev) => prev.filter((a) => a.id !== id))
|
||||
setAttachmentError(null)
|
||||
}
|
||||
|
||||
useLayoutEffect(() => {
|
||||
const element = textareaRef.current
|
||||
if (!element) return
|
||||
@@ -309,11 +379,71 @@ export const ConversationInput: FC<ConversationInputProps> = ({
|
||||
})
|
||||
}
|
||||
|
||||
const hasContent = input.trim().length > 0 || attachments.length > 0
|
||||
const queueEnabled = outboundQueue !== undefined
|
||||
|
||||
const handleSend = () => {
|
||||
const text = input.trim()
|
||||
if (!text || streaming || disabled) return
|
||||
onSend(text)
|
||||
// The outbound queue accepts new messages while streaming; legacy
|
||||
// direct-send callers (e.g., the home composer) keep the original
|
||||
// streaming-blocks-send semantic.
|
||||
if (disabled || isStaging) return
|
||||
if (!queueEnabled && streaming) return
|
||||
if (!text && attachments.length === 0) return
|
||||
onSend({ text, attachments })
|
||||
setInput('')
|
||||
setAttachments([])
|
||||
setAttachmentError(null)
|
||||
}
|
||||
|
||||
const handlePaste = (event: React.ClipboardEvent<HTMLTextAreaElement>) => {
|
||||
const items = event.clipboardData?.items
|
||||
if (!items) return
|
||||
const files: File[] = []
|
||||
for (const item of items) {
|
||||
if (item.kind === 'file') {
|
||||
const file = item.getAsFile()
|
||||
if (file) files.push(file)
|
||||
}
|
||||
}
|
||||
if (files.length > 0) {
|
||||
event.preventDefault()
|
||||
void stageFiles(files)
|
||||
}
|
||||
}
|
||||
|
||||
const handleDrop = (event: DragEvent<HTMLDivElement>) => {
|
||||
event.preventDefault()
|
||||
setIsDragOver(false)
|
||||
const files = Array.from(event.dataTransfer?.files ?? [])
|
||||
if (files.length > 0) {
|
||||
void stageFiles(files)
|
||||
}
|
||||
}
|
||||
|
||||
const handleDragOver = (event: DragEvent<HTMLDivElement>) => {
|
||||
if (!event.dataTransfer?.types.includes('Files')) return
|
||||
event.preventDefault()
|
||||
setIsDragOver(true)
|
||||
}
|
||||
|
||||
const handleDragLeave = (event: DragEvent<HTMLDivElement>) => {
|
||||
if (event.currentTarget.contains(event.relatedTarget as Node | null)) {
|
||||
return
|
||||
}
|
||||
setIsDragOver(false)
|
||||
}
|
||||
|
||||
const openFilePicker = () => {
|
||||
fileInputRef.current?.click()
|
||||
}
|
||||
|
||||
const handleFileInputChange = (
|
||||
event: React.ChangeEvent<HTMLInputElement>,
|
||||
) => {
|
||||
const files = Array.from(event.target.files ?? [])
|
||||
event.target.value = ''
|
||||
if (files.length > 0) void stageFiles(files)
|
||||
}
|
||||
|
||||
const shell = variant === 'home' ? HomeShell : ConversationShell
|
||||
@@ -321,82 +451,313 @@ export const ConversationInput: FC<ConversationInputProps> = ({
|
||||
|
||||
return (
|
||||
<Shell>
|
||||
<div
|
||||
className={cn(
|
||||
'flex gap-3',
|
||||
variant === 'home' ? 'px-4 py-3' : 'px-4 py-3',
|
||||
isExpandedDraft ? 'items-end' : 'items-center',
|
||||
)}
|
||||
<section
|
||||
// Drag/drop on a region isn't a click affordance — wrap the
|
||||
// composer in a labeled <section> so the a11y rule is satisfied
|
||||
// without misrepresenting the surface as interactive.
|
||||
aria-label="Message composer"
|
||||
className={cn('relative', isDragOver && 'ring-2 ring-primary/60')}
|
||||
onDragOver={handleDragOver}
|
||||
onDragLeave={handleDragLeave}
|
||||
onDrop={handleDrop}
|
||||
>
|
||||
<BotInputIcon variant={variant} />
|
||||
<div className="flex-1">
|
||||
<Textarea
|
||||
ref={textareaRef}
|
||||
value={input}
|
||||
onChange={(event) => setInput(event.currentTarget.value)}
|
||||
onKeyDown={(event) => {
|
||||
if (event.key === 'Enter' && !event.shiftKey) {
|
||||
event.preventDefault()
|
||||
handleSend()
|
||||
<input
|
||||
ref={fileInputRef}
|
||||
type="file"
|
||||
multiple
|
||||
accept="image/png,image/jpeg,image/webp,image/gif,text/*,application/json"
|
||||
className="hidden"
|
||||
onChange={handleFileInputChange}
|
||||
/>
|
||||
{attachments.length > 0 || attachmentError ? (
|
||||
<AttachmentStrip
|
||||
attachments={attachments}
|
||||
onRemove={removeAttachment}
|
||||
error={attachmentError}
|
||||
/>
|
||||
) : null}
|
||||
{queueEnabled && outboundQueue && outboundQueue.length > 0 ? (
|
||||
<OutboundQueueStrip
|
||||
messages={outboundQueue}
|
||||
onCancel={onCancelQueued}
|
||||
onRetry={onRetryQueued}
|
||||
/>
|
||||
) : null}
|
||||
<div
|
||||
className={cn(
|
||||
'flex gap-3',
|
||||
variant === 'home' ? 'px-4 py-3' : 'px-4 py-3',
|
||||
isExpandedDraft ? 'items-end' : 'items-center',
|
||||
)}
|
||||
>
|
||||
<BotInputIcon variant={variant} />
|
||||
<div className="flex-1">
|
||||
<Textarea
|
||||
ref={textareaRef}
|
||||
value={input}
|
||||
onChange={(event) => setInput(event.currentTarget.value)}
|
||||
onKeyDown={(event) => {
|
||||
if (event.key === 'Enter' && !event.shiftKey) {
|
||||
event.preventDefault()
|
||||
handleSend()
|
||||
}
|
||||
}}
|
||||
onPaste={handlePaste}
|
||||
rows={1}
|
||||
placeholder={
|
||||
voice.isTranscribing
|
||||
? 'Transcribing...'
|
||||
: (placeholder ??
|
||||
`Message ${selectedAgent?.name ?? 'agent'}...`)
|
||||
}
|
||||
disabled={disabled || voice.isTranscribing}
|
||||
className={cn(
|
||||
'resize-none border-none bg-transparent px-0 text-[15px] shadow-none focus-visible:ring-0',
|
||||
'[field-sizing:fixed]',
|
||||
variant === 'home'
|
||||
? 'min-h-[40px] py-2 leading-6'
|
||||
: 'min-h-[40px] py-2 leading-6',
|
||||
'placeholder:text-muted-foreground/80',
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
<VoiceButton
|
||||
isRecording={voice.isRecording}
|
||||
isTranscribing={voice.isTranscribing}
|
||||
onStart={() => {
|
||||
void voice.startRecording()
|
||||
}}
|
||||
rows={1}
|
||||
placeholder={
|
||||
voice.isTranscribing
|
||||
? 'Transcribing...'
|
||||
: (placeholder ??
|
||||
`Message ${selectedAgent?.name ?? 'agent'}...`)
|
||||
onStop={() => {
|
||||
void voice.stopRecording()
|
||||
}}
|
||||
/>
|
||||
<InputActionButton
|
||||
disabled={
|
||||
!hasContent ||
|
||||
isStaging ||
|
||||
!!disabled ||
|
||||
voice.isRecording ||
|
||||
voice.isTranscribing ||
|
||||
// Only block on `streaming` for the legacy direct-send path
|
||||
// (no queue). With the queue active the press always
|
||||
// succeeds — it just enqueues instead of dispatching.
|
||||
(!queueEnabled && streaming)
|
||||
}
|
||||
disabled={disabled || voice.isTranscribing}
|
||||
className={cn(
|
||||
'resize-none border-none bg-transparent px-0 text-[15px] shadow-none focus-visible:ring-0',
|
||||
'[field-sizing:fixed]',
|
||||
variant === 'home'
|
||||
? 'min-h-[40px] py-2 leading-6'
|
||||
: 'min-h-[40px] py-2 leading-6',
|
||||
'placeholder:text-muted-foreground/80',
|
||||
)}
|
||||
onClick={handleSend}
|
||||
// Spinner stays the user-facing "agent is busy" hint; with the
|
||||
// queue active we still spin while a turn is in flight.
|
||||
streaming={streaming}
|
||||
/>
|
||||
</div>
|
||||
<VoiceButton
|
||||
isRecording={voice.isRecording}
|
||||
isTranscribing={voice.isTranscribing}
|
||||
onStart={() => {
|
||||
void voice.startRecording()
|
||||
}}
|
||||
onStop={() => {
|
||||
void voice.stopRecording()
|
||||
}}
|
||||
{voice.error ? (
|
||||
<div className="px-5 pb-2 text-destructive text-xs">
|
||||
{voice.error}
|
||||
</div>
|
||||
) : null}
|
||||
<ContextControls
|
||||
agents={agents}
|
||||
onCreateAgent={onCreateAgent}
|
||||
onSelectAgent={onSelectAgent}
|
||||
selectedAgentId={selectedAgentId}
|
||||
selectedTabs={selectedTabs}
|
||||
onToggleTab={toggleTab}
|
||||
showAgentSelector={variant === 'home'}
|
||||
status={status}
|
||||
onAttachClick={openFilePicker}
|
||||
attachDisabled={attachments.length >= 10 || isStaging || !!disabled}
|
||||
/>
|
||||
<InputActionButton
|
||||
disabled={
|
||||
!input.trim() ||
|
||||
streaming ||
|
||||
!!disabled ||
|
||||
voice.isRecording ||
|
||||
voice.isTranscribing
|
||||
}
|
||||
onClick={handleSend}
|
||||
streaming={streaming}
|
||||
/>
|
||||
</div>
|
||||
{voice.error ? (
|
||||
<div className="px-5 pb-2 text-destructive text-xs">{voice.error}</div>
|
||||
) : null}
|
||||
<ContextControls
|
||||
agents={agents}
|
||||
onCreateAgent={onCreateAgent}
|
||||
onSelectAgent={onSelectAgent}
|
||||
selectedAgentId={selectedAgentId}
|
||||
selectedTabs={selectedTabs}
|
||||
onToggleTab={toggleTab}
|
||||
showAgentSelector={variant === 'home'}
|
||||
status={status}
|
||||
/>
|
||||
{isDragOver ? (
|
||||
<div className="pointer-events-none absolute inset-0 flex items-center justify-center rounded-[inherit] bg-background/80 font-medium text-foreground text-sm backdrop-blur-sm">
|
||||
Drop files to attach
|
||||
</div>
|
||||
) : null}
|
||||
</section>
|
||||
</Shell>
|
||||
)
|
||||
}
|
||||
|
||||
function OutboundQueueStrip({
|
||||
messages,
|
||||
onCancel,
|
||||
onRetry,
|
||||
}: {
|
||||
messages: OutboundMessage[]
|
||||
onCancel?: (id: string) => void
|
||||
onRetry?: (id: string) => void
|
||||
}) {
|
||||
return (
|
||||
<div className="border-border/40 border-b px-4 pt-3 pb-2">
|
||||
<ul className="flex flex-col gap-1">
|
||||
{messages.map((message) => (
|
||||
<OutboundQueueItem
|
||||
key={message.id}
|
||||
message={message}
|
||||
onCancel={onCancel}
|
||||
onRetry={onRetry}
|
||||
/>
|
||||
))}
|
||||
</ul>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function OutboundQueueItem({
|
||||
message,
|
||||
onCancel,
|
||||
onRetry,
|
||||
}: {
|
||||
message: OutboundMessage
|
||||
onCancel?: (id: string) => void
|
||||
onRetry?: (id: string) => void
|
||||
}) {
|
||||
const preview = message.text.trim() || '(attachments only)'
|
||||
return (
|
||||
<li className="flex items-center gap-2 rounded-md px-2 py-1 text-xs">
|
||||
<OutboundQueueStatusIcon status={message.status} />
|
||||
<span className="min-w-0 flex-1 truncate text-muted-foreground">
|
||||
{preview}
|
||||
</span>
|
||||
{message.attachmentPreviews.length > 0 ? (
|
||||
<span className="inline-flex items-center gap-1 text-muted-foreground/70">
|
||||
<Paperclip className="size-3" />
|
||||
<span className="tabular-nums">
|
||||
{message.attachmentPreviews.length}
|
||||
</span>
|
||||
</span>
|
||||
) : null}
|
||||
{message.status === 'queued' && onCancel ? (
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => onCancel(message.id)}
|
||||
className="ml-1 inline-flex size-5 items-center justify-center rounded-full text-muted-foreground hover:bg-accent hover:text-foreground"
|
||||
aria-label="Cancel queued message"
|
||||
title="Cancel"
|
||||
>
|
||||
<X className="size-3" />
|
||||
</button>
|
||||
) : null}
|
||||
{message.status === 'failed' ? (
|
||||
<span className="ml-1 inline-flex items-center gap-2 text-destructive">
|
||||
<span className="max-w-[160px] truncate" title={message.error}>
|
||||
{message.error ?? 'Failed'}
|
||||
</span>
|
||||
{onRetry ? (
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => onRetry(message.id)}
|
||||
className="inline-flex size-5 items-center justify-center rounded-full hover:bg-accent hover:text-foreground"
|
||||
aria-label="Retry failed message"
|
||||
title="Retry"
|
||||
>
|
||||
<RefreshCw className="size-3" />
|
||||
</button>
|
||||
) : null}
|
||||
{onCancel ? (
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => onCancel(message.id)}
|
||||
className="inline-flex size-5 items-center justify-center rounded-full hover:bg-accent hover:text-foreground"
|
||||
aria-label="Discard failed message"
|
||||
title="Discard"
|
||||
>
|
||||
<X className="size-3" />
|
||||
</button>
|
||||
) : null}
|
||||
</span>
|
||||
) : null}
|
||||
</li>
|
||||
)
|
||||
}
|
||||
|
||||
function OutboundQueueStatusIcon({
|
||||
status,
|
||||
}: {
|
||||
status: OutboundMessage['status']
|
||||
}) {
|
||||
if (status === 'sending') {
|
||||
return (
|
||||
<Loader2 className="size-3.5 shrink-0 animate-spin text-muted-foreground" />
|
||||
)
|
||||
}
|
||||
if (status === 'failed') {
|
||||
return <AlertTriangle className="size-3.5 shrink-0 text-destructive" />
|
||||
}
|
||||
return (
|
||||
<span className="inline-block size-2 shrink-0 rounded-full bg-muted-foreground/40" />
|
||||
)
|
||||
}
|
||||
|
||||
function AttachmentStrip({
|
||||
attachments,
|
||||
onRemove,
|
||||
error,
|
||||
}: {
|
||||
attachments: StagedAttachment[]
|
||||
onRemove: (id: string) => void
|
||||
error: string | null
|
||||
}) {
|
||||
return (
|
||||
<div className="border-border/40 border-b px-4 pt-3 pb-2">
|
||||
{attachments.length > 0 ? (
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{attachments.map((attachment) => (
|
||||
<AttachmentChip
|
||||
key={attachment.id}
|
||||
attachment={attachment}
|
||||
onRemove={() => onRemove(attachment.id)}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
) : null}
|
||||
{error ? (
|
||||
<div className="mt-2 text-destructive text-xs">{error}</div>
|
||||
) : null}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function AttachmentChip({
|
||||
attachment,
|
||||
onRemove,
|
||||
}: {
|
||||
attachment: StagedAttachment
|
||||
onRemove: () => void
|
||||
}) {
|
||||
if (attachment.kind === 'image' && attachment.dataUrl) {
|
||||
return (
|
||||
<div className="group relative size-16 overflow-hidden rounded-md border border-border/60">
|
||||
<img
|
||||
src={attachment.dataUrl}
|
||||
alt={attachment.name}
|
||||
className="size-full object-cover"
|
||||
/>
|
||||
<button
|
||||
type="button"
|
||||
onClick={onRemove}
|
||||
className="absolute top-1 right-1 inline-flex size-5 items-center justify-center rounded-full bg-background/80 text-muted-foreground opacity-0 transition-opacity hover:text-foreground group-hover:opacity-100"
|
||||
aria-label={`Remove ${attachment.name}`}
|
||||
>
|
||||
<X className="size-3" />
|
||||
</button>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
return (
|
||||
<div className="group flex max-w-[220px] items-center gap-2 rounded-md border border-border/60 bg-background/60 px-2 py-1.5">
|
||||
<FileText className="size-4 shrink-0 text-muted-foreground" />
|
||||
<span className="truncate text-xs">{attachment.name}</span>
|
||||
<button
|
||||
type="button"
|
||||
onClick={onRemove}
|
||||
className="ml-1 inline-flex size-4 items-center justify-center text-muted-foreground hover:text-foreground"
|
||||
aria-label={`Remove ${attachment.name}`}
|
||||
>
|
||||
<X className="size-3" />
|
||||
</button>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function BotInputIcon({ variant }: { variant: 'home' | 'conversation' }) {
|
||||
return (
|
||||
<div
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import { Bot, CheckCircle2, Loader2, XCircle } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
import { Bot, CheckCircle2, Loader2, Wrench, XCircle } from 'lucide-react'
|
||||
import { type FC, useMemo } from 'react'
|
||||
import {
|
||||
Message,
|
||||
MessageAttachment,
|
||||
MessageAttachments,
|
||||
MessageContent,
|
||||
MessageResponse,
|
||||
} from '@/components/ai-elements/message'
|
||||
@@ -10,96 +12,191 @@ import {
|
||||
ReasoningContent,
|
||||
ReasoningTrigger,
|
||||
} from '@/components/ai-elements/reasoning'
|
||||
import type { AgentConversationTurn } from '@/lib/agent-conversations/types'
|
||||
import {
|
||||
Task,
|
||||
TaskContent,
|
||||
TaskItem,
|
||||
TaskTrigger,
|
||||
} from '@/components/ai-elements/task'
|
||||
import type {
|
||||
AgentConversationTurn,
|
||||
ToolEntry,
|
||||
} from '@/lib/agent-conversations/types'
|
||||
|
||||
interface ConversationMessageProps {
|
||||
turn: AgentConversationTurn
|
||||
streaming: boolean
|
||||
}
|
||||
|
||||
interface RenderEntry {
|
||||
kind: 'thinking' | 'text' | 'task'
|
||||
partIndex: number
|
||||
text?: string
|
||||
done?: boolean
|
||||
tools?: ToolEntry[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the render plan for an assistant turn:
|
||||
* - thinking and text parts render in place
|
||||
* - all tool-batch parts collapse into a single Task entry at their first
|
||||
* appearance position, with tools listed in arrival order
|
||||
*/
|
||||
function buildRenderEntries(turn: AgentConversationTurn): RenderEntry[] {
|
||||
const entries: RenderEntry[] = []
|
||||
const aggregatedTools: ToolEntry[] = []
|
||||
let taskInserted = false
|
||||
|
||||
turn.parts.forEach((part, partIndex) => {
|
||||
if (part.kind === 'thinking') {
|
||||
entries.push({
|
||||
kind: 'thinking',
|
||||
partIndex,
|
||||
text: part.text,
|
||||
done: part.done,
|
||||
})
|
||||
} else if (part.kind === 'text') {
|
||||
entries.push({ kind: 'text', partIndex, text: part.text })
|
||||
} else if (part.kind === 'tool-batch') {
|
||||
aggregatedTools.push(...part.tools)
|
||||
if (!taskInserted) {
|
||||
entries.push({
|
||||
kind: 'task',
|
||||
partIndex,
|
||||
tools: aggregatedTools,
|
||||
})
|
||||
taskInserted = true
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
function ToolStatusIcon({ status }: { status: ToolEntry['status'] }) {
|
||||
if (status === 'running') {
|
||||
return (
|
||||
<Loader2 className="size-3.5 shrink-0 animate-spin text-muted-foreground" />
|
||||
)
|
||||
}
|
||||
if (status === 'completed') {
|
||||
return <CheckCircle2 className="size-3.5 shrink-0 text-green-500" />
|
||||
}
|
||||
return <XCircle className="size-3.5 shrink-0 text-destructive" />
|
||||
}
|
||||
|
||||
export const ConversationMessage: FC<ConversationMessageProps> = ({
|
||||
turn,
|
||||
streaming,
|
||||
}) => (
|
||||
<div className="space-y-3">
|
||||
<Message from="user">
|
||||
<MessageContent>
|
||||
<pre className="whitespace-pre-wrap font-sans text-sm">
|
||||
{turn.userText}
|
||||
</pre>
|
||||
</MessageContent>
|
||||
</Message>
|
||||
}) => {
|
||||
const entries = useMemo(() => buildRenderEntries(turn), [turn])
|
||||
|
||||
{turn.parts.length > 0 && (
|
||||
<Message from="assistant">
|
||||
return (
|
||||
<div className="space-y-3">
|
||||
<Message from="user">
|
||||
<MessageContent>
|
||||
{turn.parts.map((part, i) => {
|
||||
const key = `${turn.id}-part-${i}`
|
||||
{turn.userAttachments && turn.userAttachments.length > 0 && (
|
||||
<MessageAttachments>
|
||||
{turn.userAttachments.map((attachment) => (
|
||||
<MessageAttachment
|
||||
key={attachment.id}
|
||||
data={{
|
||||
type: 'file',
|
||||
url: attachment.dataUrl ?? '',
|
||||
mediaType: attachment.mediaType,
|
||||
filename: attachment.name,
|
||||
}}
|
||||
/>
|
||||
))}
|
||||
</MessageAttachments>
|
||||
)}
|
||||
{turn.userText && (
|
||||
<pre className="whitespace-pre-wrap font-sans text-sm">
|
||||
{turn.userText}
|
||||
</pre>
|
||||
)}
|
||||
</MessageContent>
|
||||
</Message>
|
||||
|
||||
switch (part.kind) {
|
||||
case 'thinking':
|
||||
{entries.length > 0 && (
|
||||
<Message from="assistant">
|
||||
<MessageContent>
|
||||
{entries.map((entry) => {
|
||||
const key = `${turn.id}-entry-${entry.partIndex}`
|
||||
|
||||
if (entry.kind === 'thinking') {
|
||||
return (
|
||||
<Reasoning
|
||||
key={key}
|
||||
className="w-full"
|
||||
isStreaming={!part.done}
|
||||
defaultOpen={!part.done}
|
||||
isStreaming={!entry.done}
|
||||
defaultOpen={!entry.done}
|
||||
>
|
||||
<ReasoningTrigger />
|
||||
<ReasoningContent>{part.text}</ReasoningContent>
|
||||
<ReasoningContent>{entry.text ?? ''}</ReasoningContent>
|
||||
</Reasoning>
|
||||
)
|
||||
}
|
||||
|
||||
case 'tool-batch':
|
||||
if (entry.kind === 'text') {
|
||||
return (
|
||||
<div key={key} className="w-full space-y-1">
|
||||
{part.tools.map((tool) => (
|
||||
<div
|
||||
<MessageResponse key={key}>
|
||||
{entry.text ?? ''}
|
||||
</MessageResponse>
|
||||
)
|
||||
}
|
||||
|
||||
const tools = entry.tools ?? []
|
||||
const allDone = tools.every((t) => t.status !== 'running')
|
||||
const taskTitle = allDone
|
||||
? `Agent activity (${tools.length} ${tools.length === 1 ? 'action' : 'actions'})`
|
||||
: `Working… (${tools.length} ${tools.length === 1 ? 'action' : 'actions'})`
|
||||
|
||||
return (
|
||||
<Task key={key} defaultOpen={!turn.done}>
|
||||
<TaskTrigger title={taskTitle} TriggerIcon={Wrench} />
|
||||
<TaskContent>
|
||||
{tools.map((tool) => (
|
||||
<TaskItem
|
||||
key={tool.id}
|
||||
className="flex items-center gap-2 rounded-md border px-3 py-2 text-sm"
|
||||
className="flex items-center gap-2"
|
||||
>
|
||||
{tool.status === 'running' && (
|
||||
<Loader2 className="size-3.5 animate-spin text-muted-foreground" />
|
||||
)}
|
||||
{tool.status === 'completed' && (
|
||||
<CheckCircle2 className="size-3.5 text-green-500" />
|
||||
)}
|
||||
{tool.status === 'error' && (
|
||||
<XCircle className="size-3.5 text-destructive" />
|
||||
)}
|
||||
<span className="font-mono text-xs">{tool.name}</span>
|
||||
<ToolStatusIcon status={tool.status} />
|
||||
<span className="text-foreground text-xs">
|
||||
{tool.label}
|
||||
</span>
|
||||
{tool.subject ? (
|
||||
<span className="ml-1.5 truncate text-muted-foreground/70 text-xs">
|
||||
· {tool.subject}
|
||||
</span>
|
||||
) : null}
|
||||
{tool.durationMs != null && (
|
||||
<span className="ml-auto text-muted-foreground text-xs">
|
||||
<span className="ml-auto text-muted-foreground/60 text-xs tabular-nums">
|
||||
{(tool.durationMs / 1000).toFixed(1)}s
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
</TaskItem>
|
||||
))}
|
||||
</div>
|
||||
)
|
||||
</TaskContent>
|
||||
</Task>
|
||||
)
|
||||
})}
|
||||
</MessageContent>
|
||||
</Message>
|
||||
)}
|
||||
|
||||
case 'text':
|
||||
return <MessageResponse key={key}>{part.text}</MessageResponse>
|
||||
|
||||
default:
|
||||
return null
|
||||
}
|
||||
})}
|
||||
</MessageContent>
|
||||
</Message>
|
||||
)}
|
||||
|
||||
{!turn.done && turn.parts.length === 0 && streaming && (
|
||||
<div className="flex gap-2">
|
||||
<div className="flex size-7 shrink-0 items-center justify-center rounded-full bg-[var(--accent-orange)] text-white">
|
||||
<Bot className="size-3.5" />
|
||||
{!turn.done && turn.parts.length === 0 && streaming && (
|
||||
<div className="flex gap-2">
|
||||
<div className="flex size-7 shrink-0 items-center justify-center rounded-full bg-[var(--accent-orange)] text-white">
|
||||
<Bot className="size-3.5" />
|
||||
</div>
|
||||
<div className="flex items-center gap-1 rounded-xl rounded-tl-none border border-border/50 bg-card px-3 py-2.5 shadow-sm">
|
||||
<span className="size-1.5 animate-bounce rounded-full bg-[var(--accent-orange)] [animation-delay:-0.3s]" />
|
||||
<span className="size-1.5 animate-bounce rounded-full bg-[var(--accent-orange)] [animation-delay:-0.15s]" />
|
||||
<span className="size-1.5 animate-bounce rounded-full bg-[var(--accent-orange)]" />
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex items-center gap-1 rounded-xl rounded-tl-none border border-border/50 bg-card px-3 py-2.5 shadow-sm">
|
||||
<span className="size-1.5 animate-bounce rounded-full bg-[var(--accent-orange)] [animation-delay:-0.3s]" />
|
||||
<span className="size-1.5 animate-bounce rounded-full bg-[var(--accent-orange)] [animation-delay:-0.15s]" />
|
||||
<span className="size-1.5 animate-bounce rounded-full bg-[var(--accent-orange)]" />
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -17,11 +17,31 @@ export interface BrowserOSOpenClawSession {
|
||||
modelProvider?: string
|
||||
}
|
||||
|
||||
export interface AgentSessionResponse {
|
||||
agentId: string
|
||||
exists: boolean
|
||||
sessionKey: string | null
|
||||
session: BrowserOSOpenClawSession | null
|
||||
export interface BrowserOSChatHistoryToolCall {
|
||||
toolCallId?: string
|
||||
toolName: string
|
||||
label: string
|
||||
subject?: string
|
||||
status: 'completed' | 'failed'
|
||||
input?: Record<string, unknown>
|
||||
output?: string
|
||||
error?: string
|
||||
durationMs?: number
|
||||
}
|
||||
|
||||
export interface BrowserOSChatHistoryReasoning {
|
||||
text: string
|
||||
durationMs?: number
|
||||
}
|
||||
|
||||
export interface BrowserOSChatHistoryAttachment {
|
||||
kind: 'image' | 'file'
|
||||
mediaType: string
|
||||
// Images carry a `data:` URL so we can render directly without any
|
||||
// additional fetch; files (text/PDF) currently round-trip via inline
|
||||
// text in the message body and do not populate this field in v1.
|
||||
dataUrl?: string
|
||||
name?: string
|
||||
}
|
||||
|
||||
export interface BrowserOSChatHistoryItem {
|
||||
@@ -32,6 +52,12 @@ export interface BrowserOSChatHistoryItem {
|
||||
messageSeq: number
|
||||
sessionKey: string
|
||||
source: ClawChatSource
|
||||
costUsd?: number
|
||||
tokensIn?: number
|
||||
tokensOut?: number
|
||||
toolCalls?: BrowserOSChatHistoryToolCall[]
|
||||
reasoning?: BrowserOSChatHistoryReasoning
|
||||
attachments?: BrowserOSChatHistoryAttachment[]
|
||||
}
|
||||
|
||||
export interface AgentHistoryPageResponse {
|
||||
@@ -58,10 +84,20 @@ export type ClawChatMessagePart =
|
||||
| {
|
||||
type: 'tool-call'
|
||||
name: string
|
||||
label: string
|
||||
subject?: string
|
||||
status: 'pending' | 'running' | 'completed' | 'failed'
|
||||
input?: unknown
|
||||
output?: unknown
|
||||
error?: string
|
||||
durationMs?: number
|
||||
}
|
||||
| {
|
||||
type: 'attachment'
|
||||
kind: 'image' | 'file'
|
||||
mediaType: string
|
||||
dataUrl?: string
|
||||
name?: string
|
||||
}
|
||||
| { type: 'meta'; label: string; value: string }
|
||||
|
||||
@@ -74,11 +110,70 @@ export interface ClawChatMessage {
|
||||
messageSeq?: number
|
||||
status?: ClawChatMessageStatus
|
||||
parts: ClawChatMessagePart[]
|
||||
costUsd?: number
|
||||
tokensIn?: number
|
||||
tokensOut?: number
|
||||
}
|
||||
|
||||
export function mapHistoryItemToClawMessage(
|
||||
item: BrowserOSChatHistoryItem,
|
||||
): ClawChatMessage {
|
||||
const parts: ClawChatMessagePart[] = []
|
||||
|
||||
// Attachments first — they belong above the text in user messages and
|
||||
// never appear on assistant messages today (assistant images come back
|
||||
// through tool results, which render via the Task collapsible).
|
||||
if (item.attachments && item.attachments.length > 0) {
|
||||
for (const attachment of item.attachments) {
|
||||
parts.push({
|
||||
type: 'attachment',
|
||||
kind: attachment.kind,
|
||||
mediaType: attachment.mediaType,
|
||||
dataUrl: attachment.dataUrl,
|
||||
name: attachment.name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Reasoning, then tool calls, then text — the chronological order the
|
||||
// agent produced them (think → act → answer).
|
||||
if (item.reasoning && item.reasoning.text.trim().length > 0) {
|
||||
// 0ms means thinking and the final answer were emitted in the same JSONL
|
||||
// line (no tool calls between them) — there's no real elapsed wall-clock,
|
||||
// so fall through to the "Thinking" trigger instead of "Thought for 0
|
||||
// seconds" / streaming shimmer. Real multi-line turns floor at 1s.
|
||||
const durationMs = item.reasoning.durationMs ?? 0
|
||||
const duration =
|
||||
durationMs > 0 ? Math.max(1, Math.round(durationMs / 1000)) : undefined
|
||||
parts.push({
|
||||
type: 'reasoning',
|
||||
text: item.reasoning.text,
|
||||
duration,
|
||||
})
|
||||
}
|
||||
|
||||
if (item.toolCalls && item.toolCalls.length > 0) {
|
||||
for (const tc of item.toolCalls) {
|
||||
parts.push({
|
||||
type: 'tool-call',
|
||||
name: tc.toolName,
|
||||
label: tc.label,
|
||||
subject: tc.subject,
|
||||
status: tc.status,
|
||||
input: tc.input,
|
||||
output: tc.output,
|
||||
error: tc.error,
|
||||
durationMs: tc.durationMs,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Only emit a text part when there's actual content. User messages with
|
||||
// only attachments and no caption shouldn't render an empty bubble.
|
||||
if (item.text.trim().length > 0) {
|
||||
parts.push({ type: 'text', text: item.text })
|
||||
}
|
||||
|
||||
return {
|
||||
id: item.id,
|
||||
role: item.role,
|
||||
@@ -87,7 +182,10 @@ export function mapHistoryItemToClawMessage(
|
||||
source: item.source,
|
||||
messageSeq: item.messageSeq,
|
||||
status: 'historical',
|
||||
parts: [{ type: 'text', text: item.text }],
|
||||
parts,
|
||||
costUsd: item.costUsd,
|
||||
tokensIn: item.tokensIn,
|
||||
tokensOut: item.tokensOut,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,69 +1,50 @@
|
||||
import { useEffect, useState } from 'react'
|
||||
import {
|
||||
type AgentEntry,
|
||||
getModelDisplayName,
|
||||
type OpenClawStatus,
|
||||
} from '@/entrypoints/app/agents/useOpenClaw'
|
||||
import { getLatestConversation } from '@/lib/agent-conversations/storage'
|
||||
import type { AgentCardData } from '@/lib/agent-conversations/types'
|
||||
import type { AgentOverview } from './useAgentDashboard'
|
||||
|
||||
function getAgentStatusTone(
|
||||
status: OpenClawStatus['status'] | undefined,
|
||||
function resolveAgentStatus(
|
||||
gatewayStatus: OpenClawStatus['status'] | undefined,
|
||||
liveStatus: AgentOverview['status'] | undefined,
|
||||
): AgentCardData['status'] {
|
||||
if (status === 'error') return 'error'
|
||||
if (status === 'starting') return 'working'
|
||||
// Gateway-level errors take precedence
|
||||
if (gatewayStatus === 'error') return 'error'
|
||||
if (gatewayStatus === 'starting') return 'working'
|
||||
|
||||
// Per-agent live status from the WS observer
|
||||
if (liveStatus === 'working') return 'working'
|
||||
if (liveStatus === 'error') return 'error'
|
||||
|
||||
return 'idle'
|
||||
}
|
||||
|
||||
async function getAgentCardData(
|
||||
agent: AgentEntry,
|
||||
status: OpenClawStatus['status'] | undefined,
|
||||
): Promise<AgentCardData> {
|
||||
const conversation = await getLatestConversation(agent.agentId)
|
||||
const lastTurn = conversation?.turns[conversation.turns.length - 1]
|
||||
const lastTextPart = lastTurn?.parts.findLast((part) => part.kind === 'text')
|
||||
|
||||
return {
|
||||
agentId: agent.agentId,
|
||||
name: agent.name,
|
||||
model: getModelDisplayName(agent.model),
|
||||
status: getAgentStatusTone(status),
|
||||
lastMessage:
|
||||
lastTextPart?.kind === 'text'
|
||||
? lastTextPart.text.slice(0, 120)
|
||||
: undefined,
|
||||
lastMessageTimestamp: lastTurn?.timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
export function useAgentCardData(
|
||||
/**
|
||||
* Build agent card display data by merging the raw agent entries from
|
||||
* the gateway with enriched overview data from the dashboard API.
|
||||
*
|
||||
* Pure function — no hooks, no IndexedDB, no async.
|
||||
*/
|
||||
export function buildAgentCardData(
|
||||
agents: AgentEntry[],
|
||||
status: OpenClawStatus['status'] | undefined,
|
||||
) {
|
||||
const [cardData, setCardData] = useState<AgentCardData[]>([])
|
||||
dashboard: AgentOverview[] | undefined,
|
||||
): AgentCardData[] {
|
||||
return agents.map((agent) => {
|
||||
const overview = dashboard?.find((d) => d.agentId === agent.agentId)
|
||||
|
||||
useEffect(() => {
|
||||
let active = true
|
||||
|
||||
const loadCardData = async () => {
|
||||
const nextCardData = await Promise.all(
|
||||
agents.map((agent) => getAgentCardData(agent, status)),
|
||||
)
|
||||
if (active) {
|
||||
setCardData(nextCardData)
|
||||
}
|
||||
return {
|
||||
agentId: agent.agentId,
|
||||
name: agent.name,
|
||||
model: getModelDisplayName(agent.model),
|
||||
status: resolveAgentStatus(status, overview?.status),
|
||||
lastMessage: overview?.latestMessage?.slice(0, 200) ?? undefined,
|
||||
lastMessageTimestamp: overview?.latestMessageAt ?? undefined,
|
||||
activitySummary: overview?.activitySummary ?? undefined,
|
||||
currentTool: overview?.currentTool ?? undefined,
|
||||
costUsd: overview?.totalCostUsd ?? undefined,
|
||||
}
|
||||
|
||||
if (agents.length > 0) {
|
||||
void loadCardData()
|
||||
} else {
|
||||
setCardData([])
|
||||
}
|
||||
|
||||
return () => {
|
||||
active = false
|
||||
}
|
||||
}, [agents, status])
|
||||
|
||||
return cardData
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,8 +7,20 @@ import {
|
||||
import type {
|
||||
AgentConversationTurn,
|
||||
AssistantPart,
|
||||
UserAttachmentPreview,
|
||||
} from '@/lib/agent-conversations/types'
|
||||
import type { ServerAttachmentPayload } from '@/lib/attachments'
|
||||
import { consumeSSEStream } from '@/lib/sse'
|
||||
import { buildToolLabel } from '@/lib/tool-labels'
|
||||
|
||||
export interface SendInput {
|
||||
text: string
|
||||
attachments?: ServerAttachmentPayload[]
|
||||
// Optional preview metadata used to render the optimistic user turn.
|
||||
// Built by the composer at staging time; the server only sees the
|
||||
// payload array.
|
||||
attachmentPreviews?: UserAttachmentPreview[]
|
||||
}
|
||||
|
||||
interface UseAgentConversationOptions {
|
||||
sessionKey?: string | null
|
||||
@@ -92,9 +104,14 @@ export function useAgentConversation(
|
||||
}
|
||||
|
||||
case 'tool-start': {
|
||||
const rawName = (event.data.toolName as string) ?? 'unknown'
|
||||
const args = event.data.args as Record<string, unknown> | undefined
|
||||
const { label, subject } = buildToolLabel(rawName, args)
|
||||
const tool = {
|
||||
id: (event.data.toolCallId as string) ?? crypto.randomUUID(),
|
||||
name: (event.data.toolName as string) ?? 'unknown',
|
||||
name: rawName,
|
||||
label,
|
||||
subject,
|
||||
status: 'running' as const,
|
||||
}
|
||||
updateCurrentTurnParts((parts) => {
|
||||
@@ -165,12 +182,22 @@ export function useAgentConversation(
|
||||
}
|
||||
}
|
||||
|
||||
const send = async (text: string) => {
|
||||
if (!text.trim() || streaming) return
|
||||
const send = async (input: string | SendInput) => {
|
||||
const normalized: SendInput =
|
||||
typeof input === 'string' ? { text: input } : input
|
||||
const trimmed = normalized.text.trim()
|
||||
const attachments = normalized.attachments ?? []
|
||||
if (streaming) return
|
||||
if (!trimmed && attachments.length === 0) return
|
||||
|
||||
const turn: AgentConversationTurn = {
|
||||
id: crypto.randomUUID(),
|
||||
userText: text.trim(),
|
||||
userText: trimmed,
|
||||
userAttachments:
|
||||
normalized.attachmentPreviews &&
|
||||
normalized.attachmentPreviews.length > 0
|
||||
? normalized.attachmentPreviews
|
||||
: undefined,
|
||||
parts: [],
|
||||
done: false,
|
||||
timestamp: Date.now(),
|
||||
@@ -185,10 +212,11 @@ export function useAgentConversation(
|
||||
try {
|
||||
const response = await chatWithAgent(
|
||||
agentId,
|
||||
text.trim(),
|
||||
trimmed,
|
||||
sessionKeyRef.current || undefined,
|
||||
historyRef.current,
|
||||
abortController.signal,
|
||||
attachments,
|
||||
)
|
||||
const responseSessionKey = response.headers.get('X-Session-Key')
|
||||
if (responseSessionKey) {
|
||||
|
||||
@@ -0,0 +1,95 @@
|
||||
import { useQuery, useQueryClient } from '@tanstack/react-query'
|
||||
import { useEffect } from 'react'
|
||||
import { useAgentServerUrl } from '@/lib/browseros/useBrowserOSProviders'
|
||||
|
||||
export interface AgentOverview {
|
||||
agentId: string
|
||||
status: 'working' | 'idle' | 'error' | 'unknown'
|
||||
latestMessage: string | null
|
||||
latestMessageAt: number | null
|
||||
activitySummary: string | null
|
||||
currentTool: string | null
|
||||
totalCostUsd: number
|
||||
sessionCount: number
|
||||
}
|
||||
|
||||
export interface DashboardResponse {
|
||||
agents: AgentOverview[]
|
||||
summary: {
|
||||
totalAgents: number
|
||||
totalCostUsd: number
|
||||
}
|
||||
}
|
||||
|
||||
interface StatusEvent {
|
||||
agentId: string
|
||||
status: AgentOverview['status']
|
||||
currentTool: string | null
|
||||
error: string | null
|
||||
timestamp: number
|
||||
}
|
||||
|
||||
const DASHBOARD_QUERY_KEY = ['claw', 'dashboard']
|
||||
|
||||
export function useAgentDashboard(enabled: boolean) {
|
||||
const { baseUrl, isLoading: urlLoading } = useAgentServerUrl()
|
||||
const queryClient = useQueryClient()
|
||||
const ready = enabled && Boolean(baseUrl) && !urlLoading
|
||||
|
||||
// Initial data load + periodic refresh as fallback
|
||||
const query = useQuery<DashboardResponse>({
|
||||
queryKey: [...DASHBOARD_QUERY_KEY, baseUrl],
|
||||
queryFn: async () => {
|
||||
const url = new URL('/claw/dashboard', baseUrl as string)
|
||||
const response = await fetch(url.toString())
|
||||
if (!response.ok) throw new Error('Failed to fetch dashboard')
|
||||
return response.json()
|
||||
},
|
||||
enabled: ready,
|
||||
})
|
||||
|
||||
// SSE subscription for real-time status patches
|
||||
useEffect(() => {
|
||||
if (!ready || !baseUrl) return
|
||||
|
||||
const streamUrl = new URL('/claw/dashboard/stream', baseUrl)
|
||||
const eventSource = new EventSource(streamUrl.toString())
|
||||
|
||||
eventSource.addEventListener('snapshot', (event) => {
|
||||
try {
|
||||
const dashboard = JSON.parse(event.data) as DashboardResponse
|
||||
queryClient.setQueryData([...DASHBOARD_QUERY_KEY, baseUrl], dashboard)
|
||||
} catch {}
|
||||
})
|
||||
|
||||
eventSource.addEventListener('status', (event) => {
|
||||
try {
|
||||
const status = JSON.parse(event.data) as StatusEvent
|
||||
queryClient.setQueryData<DashboardResponse>(
|
||||
[...DASHBOARD_QUERY_KEY, baseUrl],
|
||||
(prev) => {
|
||||
if (!prev) return prev
|
||||
return {
|
||||
...prev,
|
||||
agents: prev.agents.map((agent) =>
|
||||
agent.agentId === status.agentId
|
||||
? {
|
||||
...agent,
|
||||
status: status.status,
|
||||
currentTool: status.currentTool,
|
||||
}
|
||||
: agent,
|
||||
),
|
||||
}
|
||||
},
|
||||
)
|
||||
} catch {}
|
||||
})
|
||||
|
||||
return () => {
|
||||
eventSource.close()
|
||||
}
|
||||
}, [ready, baseUrl, queryClient])
|
||||
|
||||
return query
|
||||
}
|
||||
@@ -1,14 +1,8 @@
|
||||
import { useInfiniteQuery, useQuery } from '@tanstack/react-query'
|
||||
import { useInfiniteQuery } from '@tanstack/react-query'
|
||||
import { useAgentServerUrl } from '@/lib/browseros/useBrowserOSProviders'
|
||||
import type {
|
||||
AgentHistoryPageResponse,
|
||||
AgentSessionResponse,
|
||||
} from './claw-chat-types'
|
||||
import type { AgentHistoryPageResponse } from './claw-chat-types'
|
||||
|
||||
export const CLAW_CHAT_QUERY_KEYS = {
|
||||
session: 'claw-agent-session',
|
||||
history: 'claw-agent-history',
|
||||
} as const
|
||||
const HISTORY_QUERY_KEY = 'claw-agent-history'
|
||||
|
||||
async function fetchClawJson<T>(url: string): Promise<T> {
|
||||
const response = await fetch(url)
|
||||
@@ -29,38 +23,17 @@ function buildClawUrl(baseUrl: string, path: string): URL {
|
||||
return new URL(`/claw${path}`, baseUrl)
|
||||
}
|
||||
|
||||
export function useClawAgentSession(agentId: string) {
|
||||
const {
|
||||
baseUrl,
|
||||
isLoading: urlLoading,
|
||||
error: urlError,
|
||||
} = useAgentServerUrl()
|
||||
|
||||
const query = useQuery<AgentSessionResponse, Error>({
|
||||
queryKey: [CLAW_CHAT_QUERY_KEYS.session, baseUrl, agentId],
|
||||
queryFn: () => {
|
||||
const url = buildClawUrl(baseUrl as string, `/agents/${agentId}/session`)
|
||||
return fetchClawJson<AgentSessionResponse>(url.toString())
|
||||
},
|
||||
enabled: Boolean(baseUrl) && !urlLoading && Boolean(agentId),
|
||||
})
|
||||
|
||||
return {
|
||||
...query,
|
||||
error: query.error ?? urlError,
|
||||
isLoading: query.isLoading || urlLoading,
|
||||
}
|
||||
}
|
||||
|
||||
export function useClawChatHistory({
|
||||
agentId,
|
||||
sessionKey,
|
||||
enabled,
|
||||
enabled = true,
|
||||
limit = 50,
|
||||
}: {
|
||||
agentId: string
|
||||
// null lets the server resolve the most recent user-chat session for the
|
||||
// agent — avoids an extra /session round-trip and the race that came with it.
|
||||
sessionKey: string | null
|
||||
enabled: boolean
|
||||
enabled?: boolean
|
||||
limit?: number
|
||||
}) {
|
||||
const {
|
||||
@@ -70,9 +43,9 @@ export function useClawChatHistory({
|
||||
} = useAgentServerUrl()
|
||||
|
||||
const query = useInfiniteQuery<AgentHistoryPageResponse, Error>({
|
||||
queryKey: [CLAW_CHAT_QUERY_KEYS.history, baseUrl, agentId, sessionKey],
|
||||
queryKey: [HISTORY_QUERY_KEY, baseUrl, agentId, sessionKey],
|
||||
initialPageParam: undefined as string | undefined,
|
||||
queryFn: ({ pageParam }) => {
|
||||
queryFn: async ({ pageParam }) => {
|
||||
const url = buildClawUrl(baseUrl as string, `/agents/${agentId}/history`)
|
||||
url.searchParams.set('limit', String(limit))
|
||||
|
||||
@@ -87,12 +60,7 @@ export function useClawChatHistory({
|
||||
},
|
||||
getNextPageParam: (lastPage) =>
|
||||
lastPage.page.hasMore ? lastPage.page.cursor : undefined,
|
||||
enabled:
|
||||
enabled &&
|
||||
Boolean(baseUrl) &&
|
||||
!urlLoading &&
|
||||
Boolean(agentId) &&
|
||||
Boolean(sessionKey),
|
||||
enabled: enabled && Boolean(baseUrl) && !urlLoading && Boolean(agentId),
|
||||
})
|
||||
|
||||
return {
|
||||
|
||||
@@ -0,0 +1,270 @@
|
||||
import { useCallback, useEffect, useRef, useState } from 'react'
|
||||
import type { OpenClawChatHistoryMessage } from '@/entrypoints/app/agents/useOpenClaw'
|
||||
import type { UserAttachmentPreview } from '@/lib/agent-conversations/types'
|
||||
import type { ServerAttachmentPayload } from '@/lib/attachments'
|
||||
import { useAgentServerUrl } from '@/lib/browseros/useBrowserOSProviders'
|
||||
|
||||
export type OutboundMessageStatus = 'queued' | 'sending' | 'failed'
|
||||
|
||||
export interface OutboundMessage {
|
||||
id: string
|
||||
text: string
|
||||
attachments: ServerAttachmentPayload[]
|
||||
attachmentPreviews: UserAttachmentPreview[]
|
||||
status: OutboundMessageStatus
|
||||
error?: string
|
||||
createdAt: number
|
||||
}
|
||||
|
||||
export interface OutboundQueueEnqueueInput {
|
||||
text: string
|
||||
attachments?: ServerAttachmentPayload[]
|
||||
attachmentPreviews?: UserAttachmentPreview[]
|
||||
history?: OpenClawChatHistoryMessage[]
|
||||
}
|
||||
|
||||
export interface OutboundQueueApi {
|
||||
queue: OutboundMessage[]
|
||||
enqueue(input: OutboundQueueEnqueueInput): void
|
||||
cancel(id: string): void
|
||||
retry(id: string): void
|
||||
}
|
||||
|
||||
interface UseOutboundQueueOptions {
|
||||
agentId: string | null | undefined
|
||||
sessionKey?: string | null
|
||||
}
|
||||
|
||||
interface ServerQueuedItem {
|
||||
id: string
|
||||
status: 'queued' | 'dispatching' | 'failed'
|
||||
message: string
|
||||
attachmentsPreview: Array<{
|
||||
kind: 'image' | 'file'
|
||||
mediaType: string
|
||||
name?: string
|
||||
}>
|
||||
error?: string
|
||||
createdAt: number
|
||||
}
|
||||
|
||||
function makeId(): string {
|
||||
if (typeof crypto !== 'undefined' && crypto.randomUUID) {
|
||||
return crypto.randomUUID()
|
||||
}
|
||||
return `${Date.now().toString(36)}-${Math.random().toString(36).slice(2, 10)}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Server-backed outbound message queue. The browser is purely a
|
||||
* projection of server state — closing the tab is safe because the queue
|
||||
* keeps draining server-side via the OutboundQueueService.
|
||||
*
|
||||
* Single id-keyed list: the client generates the queue id and hands it
|
||||
* to the server in the POST body, so the optimistic row and the SSE
|
||||
* snapshot reconcile on the same key from frame zero — there is no
|
||||
* window in which the message renders twice.
|
||||
*/
|
||||
export function useOutboundQueue(
|
||||
options: UseOutboundQueueOptions,
|
||||
): OutboundQueueApi {
|
||||
const { agentId, sessionKey } = options
|
||||
const { baseUrl } = useAgentServerUrl()
|
||||
const sessionKeyRef = useRef<string | null | undefined>(sessionKey)
|
||||
sessionKeyRef.current = sessionKey
|
||||
|
||||
const [items, setItems] = useState<OutboundMessage[]>([])
|
||||
// Track which ids the server has confirmed seeing in any SSE snapshot.
|
||||
// We use this to know whether a missing-from-snapshot id is "drained
|
||||
// by the server" (drop it) or "still in flight client-side" (keep
|
||||
// showing the optimistic row).
|
||||
const everSeenByServerRef = useRef<Set<string>>(new Set())
|
||||
// Local-only attachment previews, keyed by queue id. Data URLs never
|
||||
// leave the browser — the SSE feed only carries metadata, so we hold
|
||||
// them here so the chip strip keeps rendering after server takeover.
|
||||
const previewMapRef = useRef<Map<string, UserAttachmentPreview[]>>(new Map())
|
||||
|
||||
useEffect(() => {
|
||||
if (!baseUrl || !agentId) {
|
||||
setItems([])
|
||||
everSeenByServerRef.current = new Set()
|
||||
previewMapRef.current = new Map()
|
||||
return
|
||||
}
|
||||
let cancelled = false
|
||||
const url = `${baseUrl}/claw/agents/${encodeURIComponent(agentId)}/queue/stream`
|
||||
const source = new EventSource(url)
|
||||
source.onmessage = (event) => {
|
||||
if (cancelled) return
|
||||
try {
|
||||
const parsed = JSON.parse(event.data) as { items: ServerQueuedItem[] }
|
||||
const snapshotIds = new Set(parsed.items.map((item) => item.id))
|
||||
for (const id of snapshotIds) everSeenByServerRef.current.add(id)
|
||||
|
||||
setItems((prev) => {
|
||||
const next: OutboundMessage[] = parsed.items.map((item) => ({
|
||||
id: item.id,
|
||||
text: item.message,
|
||||
attachments: [],
|
||||
attachmentPreviews: previewMapRef.current.get(item.id) ?? [],
|
||||
status: serverStatusToClient(item.status),
|
||||
error: item.error,
|
||||
createdAt: item.createdAt,
|
||||
}))
|
||||
// Carry forward any optimistic / failed entries the server
|
||||
// doesn't know about yet (POST in flight) or has finished
|
||||
// dispatching but the client wants to keep visible (failed).
|
||||
const carried = prev.filter((local) => {
|
||||
if (snapshotIds.has(local.id)) return false
|
||||
if (everSeenByServerRef.current.has(local.id)) {
|
||||
// Server saw it before and it's gone now — drained.
|
||||
previewMapRef.current.delete(local.id)
|
||||
return false
|
||||
}
|
||||
return local.status !== 'failed' || Boolean(local.error)
|
||||
})
|
||||
return [...carried, ...next]
|
||||
})
|
||||
} catch {
|
||||
// Malformed event — ignore; next snapshot will recover.
|
||||
}
|
||||
}
|
||||
source.onerror = () => {
|
||||
// Auto-reconnects; nothing to do here.
|
||||
}
|
||||
return () => {
|
||||
cancelled = true
|
||||
source.close()
|
||||
}
|
||||
}, [baseUrl, agentId])
|
||||
|
||||
const enqueue = useCallback(
|
||||
(input: OutboundQueueEnqueueInput) => {
|
||||
if (!baseUrl || !agentId) return
|
||||
const trimmed = input.text.trim()
|
||||
const attachments = input.attachments ?? []
|
||||
if (!trimmed && attachments.length === 0) return
|
||||
|
||||
const id = makeId()
|
||||
const previews = input.attachmentPreviews ?? []
|
||||
previewMapRef.current.set(id, previews)
|
||||
setItems((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id,
|
||||
text: trimmed,
|
||||
attachments,
|
||||
attachmentPreviews: previews,
|
||||
status: 'queued',
|
||||
createdAt: Date.now(),
|
||||
},
|
||||
])
|
||||
|
||||
void (async () => {
|
||||
try {
|
||||
const response = await fetch(
|
||||
`${baseUrl}/claw/agents/${encodeURIComponent(agentId)}/queue`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
id,
|
||||
message: trimmed,
|
||||
attachments: attachments.length > 0 ? attachments : undefined,
|
||||
sessionKey: sessionKeyRef.current ?? undefined,
|
||||
history: input.history,
|
||||
}),
|
||||
},
|
||||
)
|
||||
if (!response.ok) {
|
||||
const text = await response.text().catch(() => '')
|
||||
previewMapRef.current.delete(id)
|
||||
setItems((prev) =>
|
||||
prev.map((item) =>
|
||||
item.id === id
|
||||
? {
|
||||
...item,
|
||||
status: 'failed',
|
||||
error:
|
||||
text || `Failed to enqueue (status ${response.status})`,
|
||||
}
|
||||
: item,
|
||||
),
|
||||
)
|
||||
}
|
||||
} catch (err) {
|
||||
// Only mark as failed if the SSE snapshot hasn't already
|
||||
// taken ownership of the entry (i.e. the request actually
|
||||
// reached the server).
|
||||
if (everSeenByServerRef.current.has(id)) return
|
||||
previewMapRef.current.delete(id)
|
||||
setItems((prev) =>
|
||||
prev.map((item) =>
|
||||
item.id === id
|
||||
? {
|
||||
...item,
|
||||
status: 'failed',
|
||||
error:
|
||||
err instanceof Error
|
||||
? err.message
|
||||
: 'Failed to enqueue message',
|
||||
}
|
||||
: item,
|
||||
),
|
||||
)
|
||||
}
|
||||
})()
|
||||
},
|
||||
[baseUrl, agentId],
|
||||
)
|
||||
|
||||
const cancel = useCallback(
|
||||
(id: string) => {
|
||||
// If the server has never seen this id, just drop it locally.
|
||||
if (!everSeenByServerRef.current.has(id)) {
|
||||
previewMapRef.current.delete(id)
|
||||
setItems((prev) => prev.filter((item) => item.id !== id))
|
||||
return
|
||||
}
|
||||
if (!baseUrl || !agentId) return
|
||||
void fetch(
|
||||
`${baseUrl}/claw/agents/${encodeURIComponent(agentId)}/queue/${encodeURIComponent(id)}`,
|
||||
{ method: 'DELETE' },
|
||||
).catch(() => {})
|
||||
},
|
||||
[baseUrl, agentId],
|
||||
)
|
||||
|
||||
const retry = useCallback(
|
||||
(id: string) => {
|
||||
if (!everSeenByServerRef.current.has(id)) {
|
||||
// Optimistic-only entry, never made it to the server. Reset
|
||||
// status so the user can press Send again.
|
||||
setItems((prev) =>
|
||||
prev.map((item) =>
|
||||
item.id === id
|
||||
? { ...item, status: 'queued', error: undefined }
|
||||
: item,
|
||||
),
|
||||
)
|
||||
return
|
||||
}
|
||||
if (!baseUrl || !agentId) return
|
||||
void fetch(
|
||||
`${baseUrl}/claw/agents/${encodeURIComponent(agentId)}/queue/${encodeURIComponent(id)}/retry`,
|
||||
{ method: 'POST' },
|
||||
).catch(() => {})
|
||||
},
|
||||
[baseUrl, agentId],
|
||||
)
|
||||
|
||||
return { queue: items, enqueue, cancel, retry }
|
||||
}
|
||||
|
||||
function serverStatusToClient(
|
||||
status: ServerQueuedItem['status'],
|
||||
): OutboundMessageStatus {
|
||||
if (status === 'dispatching') return 'sending'
|
||||
if (status === 'failed') return 'failed'
|
||||
return 'queued'
|
||||
}
|
||||
@@ -317,12 +317,18 @@ export async function chatWithAgent(
|
||||
sessionKey?: string,
|
||||
history: OpenClawChatHistoryMessage[] = [],
|
||||
signal?: AbortSignal,
|
||||
attachments?: ReadonlyArray<unknown>,
|
||||
): Promise<Response> {
|
||||
const baseUrl = await getAgentServerUrl()
|
||||
return fetch(`${baseUrl}/claw/agents/${agentId}/chat`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ message, sessionKey, history }),
|
||||
body: JSON.stringify({
|
||||
message,
|
||||
sessionKey,
|
||||
history,
|
||||
...(attachments && attachments.length > 0 ? { attachments } : {}),
|
||||
}),
|
||||
signal,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@ export interface AssistantThinkingPart {
|
||||
export interface ToolEntry {
|
||||
id: string
|
||||
name: string
|
||||
label: string
|
||||
subject?: string
|
||||
status: 'running' | 'completed' | 'error'
|
||||
durationMs?: number
|
||||
}
|
||||
@@ -26,9 +28,24 @@ export type AssistantPart =
|
||||
| AssistantThinkingPart
|
||||
| AssistantToolBatchPart
|
||||
|
||||
/**
|
||||
* Attachments rendered alongside the user's text on the optimistic turn
|
||||
* — populated when the composer staged any images/files. The dataUrl is
|
||||
* the same one the server received; we keep it in memory only for the
|
||||
* lifetime of the live turn (history reload re-fetches via the JSONL).
|
||||
*/
|
||||
export interface UserAttachmentPreview {
|
||||
id: string
|
||||
kind: 'image' | 'file'
|
||||
mediaType: string
|
||||
name: string
|
||||
dataUrl?: string
|
||||
}
|
||||
|
||||
export interface AgentConversationTurn {
|
||||
id: string
|
||||
userText: string
|
||||
userAttachments?: UserAttachmentPreview[]
|
||||
parts: AssistantPart[]
|
||||
done: boolean
|
||||
timestamp: number
|
||||
@@ -50,4 +67,7 @@ export interface AgentCardData {
|
||||
status: 'idle' | 'working' | 'error'
|
||||
lastMessage?: string
|
||||
lastMessageTimestamp?: number
|
||||
activitySummary?: string
|
||||
currentTool?: string
|
||||
costUsd?: number
|
||||
}
|
||||
|
||||
369
packages/browseros-agent/apps/agent/lib/attachments.ts
Normal file
369
packages/browseros-agent/apps/agent/lib/attachments.ts
Normal file
@@ -0,0 +1,369 @@
|
||||
/**
|
||||
* Composer attachment helpers — validation, image compression, and the
|
||||
* client-side payload shape sent to /agents/:id/chat.
|
||||
*
|
||||
* Image attachments travel as `data:` URLs (base64) so the gateway, which
|
||||
* runs on 127.0.0.1 over Lima virtiofs, can ingest them as standard
|
||||
* OpenAI-style content blocks. Non-image text-shaped files are read into
|
||||
* memory and travel as their extracted text body — the server inlines
|
||||
* them as a fenced `<attachment>` block on the user message.
|
||||
*/
|
||||
|
||||
export const MAX_ATTACHMENTS_PER_MESSAGE = 10
|
||||
export const MAX_IMAGE_BYTES = 5 * 1024 * 1024 // 5 MB after compression
|
||||
export const MAX_FILE_TEXT_BYTES = 1 * 1024 * 1024 // 1 MB extracted text
|
||||
export const IMAGE_LONG_EDGE_CAP = 2048
|
||||
|
||||
export const ALLOWED_IMAGE_MEDIA_TYPES = [
|
||||
'image/png',
|
||||
'image/jpeg',
|
||||
'image/jpg',
|
||||
'image/webp',
|
||||
'image/gif',
|
||||
] as const
|
||||
|
||||
export const ALLOWED_FILE_MEDIA_TYPE_PREFIXES = [
|
||||
'text/',
|
||||
'application/json',
|
||||
] as const
|
||||
|
||||
export type ServerImageAttachment = {
|
||||
kind: 'image'
|
||||
mediaType: string
|
||||
dataUrl: string
|
||||
name?: string
|
||||
}
|
||||
|
||||
export type ServerFileAttachment = {
|
||||
kind: 'file'
|
||||
mediaType: string
|
||||
name: string
|
||||
text: string
|
||||
}
|
||||
|
||||
export type ServerAttachmentPayload =
|
||||
| ServerImageAttachment
|
||||
| ServerFileAttachment
|
||||
|
||||
/** UI-side representation: what the composer needs to render a chip. */
|
||||
export interface StagedAttachment {
|
||||
id: string
|
||||
kind: 'image' | 'file'
|
||||
mediaType: string
|
||||
name: string
|
||||
// Set for images so the chip thumbnail can render directly. For files
|
||||
// we don't need a preview yet, but the field exists for v2 PDF previews.
|
||||
dataUrl?: string
|
||||
// Pre-computed payload for the server. Built once at staging time so
|
||||
// re-renders don't re-encode large blobs.
|
||||
payload: ServerAttachmentPayload
|
||||
}
|
||||
|
||||
export type AttachmentValidationError =
|
||||
| { code: 'too_many'; message: string }
|
||||
| { code: 'unsupported_type'; message: string; mediaType: string }
|
||||
| { code: 'too_large'; message: string }
|
||||
| { code: 'read_failed'; message: string }
|
||||
|
||||
export type StageAttachmentResult =
|
||||
| { ok: true; attachment: StagedAttachment }
|
||||
| { ok: false; error: AttachmentValidationError }
|
||||
|
||||
function isImageMediaType(mediaType: string): boolean {
|
||||
return (ALLOWED_IMAGE_MEDIA_TYPES as readonly string[]).includes(mediaType)
|
||||
}
|
||||
|
||||
function isAllowedFileMediaType(mediaType: string): boolean {
|
||||
return ALLOWED_FILE_MEDIA_TYPE_PREFIXES.some((prefix) =>
|
||||
mediaType.startsWith(prefix),
|
||||
)
|
||||
}
|
||||
|
||||
/** Build a unique id without depending on `crypto.randomUUID` outside DOM. */
|
||||
function makeId(): string {
|
||||
if (typeof crypto !== 'undefined' && crypto.randomUUID) {
|
||||
return crypto.randomUUID()
|
||||
}
|
||||
return `att-${Date.now().toString(36)}-${Math.random().toString(36).slice(2, 10)}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Read a `File` and produce the staged-attachment shape — validate type,
|
||||
* compress if it's a large image, and pre-build the server payload.
|
||||
*/
|
||||
export async function stageAttachment(
|
||||
file: File,
|
||||
): Promise<StageAttachmentResult> {
|
||||
const mediaType = file.type || 'application/octet-stream'
|
||||
|
||||
if (isImageMediaType(mediaType)) {
|
||||
try {
|
||||
const compressed = await compressImageIfNeeded(file)
|
||||
const dataUrl = await readAsDataUrl(compressed)
|
||||
// Rough byte ceiling — `data:image/png;base64,...` doubles size with
|
||||
// base64. Reject early so we never POST something the route will 400.
|
||||
if (dataUrl.length > MAX_IMAGE_BYTES * 2) {
|
||||
return {
|
||||
ok: false,
|
||||
error: {
|
||||
code: 'too_large',
|
||||
message: `Image "${file.name}" is too large (max ${humanBytes(
|
||||
MAX_IMAGE_BYTES,
|
||||
)}).`,
|
||||
},
|
||||
}
|
||||
}
|
||||
return {
|
||||
ok: true,
|
||||
attachment: {
|
||||
id: makeId(),
|
||||
kind: 'image',
|
||||
mediaType,
|
||||
name: file.name || 'image',
|
||||
dataUrl,
|
||||
payload: {
|
||||
kind: 'image',
|
||||
mediaType,
|
||||
dataUrl,
|
||||
name: file.name || undefined,
|
||||
},
|
||||
},
|
||||
}
|
||||
} catch (err) {
|
||||
return {
|
||||
ok: false,
|
||||
error: {
|
||||
code: 'read_failed',
|
||||
message:
|
||||
err instanceof Error
|
||||
? err.message
|
||||
: `Failed to read image "${file.name}".`,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (isAllowedFileMediaType(mediaType)) {
|
||||
let text: string
|
||||
try {
|
||||
text = await file.text()
|
||||
} catch (err) {
|
||||
return {
|
||||
ok: false,
|
||||
error: {
|
||||
code: 'read_failed',
|
||||
message:
|
||||
err instanceof Error
|
||||
? err.message
|
||||
: `Failed to read file "${file.name}".`,
|
||||
},
|
||||
}
|
||||
}
|
||||
if (text.length > MAX_FILE_TEXT_BYTES) {
|
||||
return {
|
||||
ok: false,
|
||||
error: {
|
||||
code: 'too_large',
|
||||
message: `File "${file.name}" is too large (max ${humanBytes(
|
||||
MAX_FILE_TEXT_BYTES,
|
||||
)}).`,
|
||||
},
|
||||
}
|
||||
}
|
||||
return {
|
||||
ok: true,
|
||||
attachment: {
|
||||
id: makeId(),
|
||||
kind: 'file',
|
||||
mediaType,
|
||||
name: file.name || 'attachment',
|
||||
payload: {
|
||||
kind: 'file',
|
||||
mediaType,
|
||||
name: file.name || 'attachment',
|
||||
text,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
ok: false,
|
||||
error: {
|
||||
code: 'unsupported_type',
|
||||
message: `Unsupported attachment type: ${mediaType || 'unknown'}`,
|
||||
mediaType,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Stage multiple files at once, enforcing the per-message cap. The result
|
||||
* partitions successful stages and any errors so the caller can show
|
||||
* granular toasts.
|
||||
*/
|
||||
export async function stageAttachments(
|
||||
files: File[],
|
||||
alreadyStaged: number,
|
||||
): Promise<{
|
||||
staged: StagedAttachment[]
|
||||
errors: AttachmentValidationError[]
|
||||
}> {
|
||||
const remainingSlots = Math.max(
|
||||
0,
|
||||
MAX_ATTACHMENTS_PER_MESSAGE - alreadyStaged,
|
||||
)
|
||||
const staged: StagedAttachment[] = []
|
||||
const errors: AttachmentValidationError[] = []
|
||||
|
||||
if (remainingSlots === 0 && files.length > 0) {
|
||||
errors.push({
|
||||
code: 'too_many',
|
||||
message: `At most ${MAX_ATTACHMENTS_PER_MESSAGE} attachments per message.`,
|
||||
})
|
||||
return { staged, errors }
|
||||
}
|
||||
|
||||
const overflow = files.length - remainingSlots
|
||||
if (overflow > 0) {
|
||||
errors.push({
|
||||
code: 'too_many',
|
||||
message: `Only the first ${remainingSlots} of ${files.length} files were attached (max ${MAX_ATTACHMENTS_PER_MESSAGE}).`,
|
||||
})
|
||||
}
|
||||
|
||||
for (const file of files.slice(0, remainingSlots)) {
|
||||
const result = await stageAttachment(file)
|
||||
if (result.ok) {
|
||||
staged.push(result.attachment)
|
||||
} else {
|
||||
errors.push(result.error)
|
||||
}
|
||||
}
|
||||
|
||||
return { staged, errors }
|
||||
}
|
||||
|
||||
/**
|
||||
* Resize images that are oversized to a sane long-edge cap. JPEG/WebP
|
||||
* source files are re-encoded to JPEG; PNGs/GIFs that are already small
|
||||
* are passed through untouched.
|
||||
*/
|
||||
export async function compressImageIfNeeded(file: File): Promise<Blob> {
|
||||
// Cheap path: small files don't need any transform.
|
||||
if (file.size <= 1.5 * 1024 * 1024) return file
|
||||
|
||||
const bitmap = await blobToImageBitmap(file)
|
||||
const { width, height } = bitmap
|
||||
const longEdge = Math.max(width, height)
|
||||
if (longEdge <= IMAGE_LONG_EDGE_CAP && file.size <= MAX_IMAGE_BYTES) {
|
||||
bitmap.close?.()
|
||||
return file
|
||||
}
|
||||
|
||||
const scale = Math.min(1, IMAGE_LONG_EDGE_CAP / longEdge)
|
||||
const targetWidth = Math.max(1, Math.round(width * scale))
|
||||
const targetHeight = Math.max(1, Math.round(height * scale))
|
||||
|
||||
const canvas =
|
||||
typeof OffscreenCanvas !== 'undefined'
|
||||
? new OffscreenCanvas(targetWidth, targetHeight)
|
||||
: Object.assign(document.createElement('canvas'), {
|
||||
width: targetWidth,
|
||||
height: targetHeight,
|
||||
})
|
||||
|
||||
const ctx = canvas.getContext('2d') as
|
||||
| CanvasRenderingContext2D
|
||||
| OffscreenCanvasRenderingContext2D
|
||||
| null
|
||||
if (!ctx) {
|
||||
bitmap.close?.()
|
||||
return file
|
||||
}
|
||||
ctx.drawImage(bitmap, 0, 0, targetWidth, targetHeight)
|
||||
bitmap.close?.()
|
||||
|
||||
const outputType = 'image/jpeg'
|
||||
if (canvas instanceof HTMLCanvasElement) {
|
||||
return await new Promise<Blob>((resolve, reject) => {
|
||||
canvas.toBlob(
|
||||
(blob) => {
|
||||
if (blob) resolve(blob)
|
||||
else reject(new Error('Image compression failed.'))
|
||||
},
|
||||
outputType,
|
||||
0.85,
|
||||
)
|
||||
})
|
||||
}
|
||||
return await (canvas as OffscreenCanvas).convertToBlob({
|
||||
type: outputType,
|
||||
quality: 0.85,
|
||||
})
|
||||
}
|
||||
|
||||
async function blobToImageBitmap(blob: Blob): Promise<ImageBitmap> {
|
||||
if (typeof createImageBitmap === 'function') {
|
||||
return createImageBitmap(blob)
|
||||
}
|
||||
// Fallback: load via an Image element and use the canvas decode path.
|
||||
const url = URL.createObjectURL(blob)
|
||||
try {
|
||||
const img = await new Promise<HTMLImageElement>((resolve, reject) => {
|
||||
const el = new Image()
|
||||
el.onload = () => resolve(el)
|
||||
el.onerror = () =>
|
||||
reject(new Error('Failed to decode image for compression.'))
|
||||
el.src = url
|
||||
})
|
||||
const canvas = document.createElement('canvas')
|
||||
canvas.width = img.naturalWidth
|
||||
canvas.height = img.naturalHeight
|
||||
const ctx = canvas.getContext('2d')
|
||||
if (!ctx) throw new Error('Canvas 2D context unavailable.')
|
||||
ctx.drawImage(img, 0, 0)
|
||||
const blobOut = await new Promise<Blob | null>((resolve) =>
|
||||
canvas.toBlob(resolve, 'image/png'),
|
||||
)
|
||||
if (!blobOut) throw new Error('Canvas toBlob returned null.')
|
||||
return await createImageBitmap(blobOut)
|
||||
} finally {
|
||||
URL.revokeObjectURL(url)
|
||||
}
|
||||
}
|
||||
|
||||
async function readAsDataUrl(blob: Blob): Promise<string> {
|
||||
if ('arrayBuffer' in blob && typeof FileReader === 'undefined') {
|
||||
const buffer = await blob.arrayBuffer()
|
||||
const base64 = arrayBufferToBase64(buffer)
|
||||
const type = blob.type || 'application/octet-stream'
|
||||
return `data:${type};base64,${base64}`
|
||||
}
|
||||
return await new Promise<string>((resolve, reject) => {
|
||||
const reader = new FileReader()
|
||||
reader.onload = () => resolve(reader.result as string)
|
||||
reader.onerror = () =>
|
||||
reject(reader.error ?? new Error('FileReader failed to read blob.'))
|
||||
reader.readAsDataURL(blob)
|
||||
})
|
||||
}
|
||||
|
||||
function arrayBufferToBase64(buffer: ArrayBuffer): string {
|
||||
const bytes = new Uint8Array(buffer)
|
||||
let binary = ''
|
||||
const chunkSize = 0x8000
|
||||
for (let i = 0; i < bytes.byteLength; i += chunkSize) {
|
||||
binary += String.fromCharCode.apply(
|
||||
null,
|
||||
Array.from(bytes.subarray(i, Math.min(i + chunkSize, bytes.byteLength))),
|
||||
)
|
||||
}
|
||||
return btoa(binary)
|
||||
}
|
||||
|
||||
function humanBytes(bytes: number): string {
|
||||
if (bytes >= 1024 * 1024) return `${(bytes / 1024 / 1024).toFixed(0)} MB`
|
||||
if (bytes >= 1024) return `${(bytes / 1024).toFixed(0)} KB`
|
||||
return `${bytes} B`
|
||||
}
|
||||
325
packages/browseros-agent/apps/agent/lib/tool-labels.ts
Normal file
325
packages/browseros-agent/apps/agent/lib/tool-labels.ts
Normal file
@@ -0,0 +1,325 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Maps raw tool names + arguments to human-readable activity labels for
|
||||
* the chat UI activity view. The MCP ToolRegistry is the source of truth
|
||||
* for tool *existence*; this file is the editorial layer that turns
|
||||
* snake_case identifiers into agent-speak verbs.
|
||||
*/
|
||||
|
||||
const VERB_OVERRIDES: Record<string, string> = {
|
||||
// Navigation
|
||||
navigate_page: 'Navigated to',
|
||||
new_page: 'Opened tab',
|
||||
new_hidden_page: 'Opened tab',
|
||||
show_page: 'Showed tab',
|
||||
close_page: 'Closed tab',
|
||||
list_pages: 'Listed open tabs',
|
||||
get_active_page: 'Got active tab',
|
||||
move_page: 'Moved tab',
|
||||
group_tabs: 'Grouped tabs',
|
||||
|
||||
// Page reading
|
||||
take_snapshot: 'Captured page snapshot',
|
||||
take_enhanced_snapshot: 'Captured detailed snapshot',
|
||||
get_page_content: 'Read page content',
|
||||
get_page_links: 'Extracted page links',
|
||||
get_dom: 'Read page DOM',
|
||||
search_dom: 'Searched page DOM',
|
||||
take_screenshot: 'Took screenshot',
|
||||
|
||||
// Input
|
||||
click: 'Clicked',
|
||||
click_at: 'Clicked at coordinates',
|
||||
hover: 'Hovered',
|
||||
hover_at: 'Hovered at coordinates',
|
||||
type_at: 'Typed at coordinates',
|
||||
drag_at: 'Dragged',
|
||||
focus: 'Focused element',
|
||||
fill: 'Filled field',
|
||||
clear: 'Cleared field',
|
||||
check: 'Checked box',
|
||||
uncheck: 'Unchecked box',
|
||||
press_key: 'Pressed key',
|
||||
upload_file: 'Uploaded file',
|
||||
|
||||
// Console / scripts
|
||||
evaluate_script: 'Ran script',
|
||||
get_console_logs: 'Read console logs',
|
||||
|
||||
// History / bookmarks
|
||||
search_history: 'Searched history',
|
||||
get_recent_history: 'Read recent history',
|
||||
delete_history_url: 'Deleted history entry',
|
||||
delete_history_range: 'Deleted history range',
|
||||
get_bookmarks: 'Listed bookmarks',
|
||||
create_bookmark: 'Created bookmark',
|
||||
remove_bookmark: 'Removed bookmark',
|
||||
update_bookmark: 'Updated bookmark',
|
||||
move_bookmark: 'Moved bookmark',
|
||||
search_bookmarks: 'Searched bookmarks',
|
||||
|
||||
// Filesystem (sandboxed)
|
||||
read_file: 'Read file',
|
||||
write_file: 'Wrote file',
|
||||
find_files: 'Searched files',
|
||||
|
||||
// Memory
|
||||
read_soul: 'Read soul memory',
|
||||
read_core: 'Read core memory',
|
||||
write_memory: 'Wrote memory',
|
||||
search_memory: 'Searched memory',
|
||||
update_soul: 'Updated soul memory',
|
||||
update_core: 'Updated core memory',
|
||||
|
||||
// Web
|
||||
web_search: 'Searched the web',
|
||||
web_fetch: 'Fetched URL',
|
||||
|
||||
// Klavis / external apps (Strata)
|
||||
connector_mcp_servers: 'Listed connected apps',
|
||||
discover_server_categories_or_actions: 'Browsed available actions',
|
||||
get_category_actions: 'Listed actions',
|
||||
get_action_details: 'Looked up action',
|
||||
execute_action: 'Ran external action',
|
||||
search_documentation: 'Searched docs',
|
||||
handle_auth_failure: 'Handled auth issue',
|
||||
|
||||
// Suggestions
|
||||
suggest_schedule: 'Suggested schedule',
|
||||
suggest_app_connection: 'Suggested app connect',
|
||||
|
||||
// BrowserOS info
|
||||
browseros_info: 'Read BrowserOS info',
|
||||
|
||||
// Windows
|
||||
list_windows: 'Listed windows',
|
||||
focus_window: 'Focused window',
|
||||
close_window: 'Closed window',
|
||||
create_window: 'Created window',
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────
|
||||
// Helpers
|
||||
// ──────────────────────────────────────────────────────────────────────
|
||||
|
||||
function asString(value: unknown): string | undefined {
|
||||
return typeof value === 'string' && value.length > 0 ? value : undefined
|
||||
}
|
||||
|
||||
function stringField(
|
||||
input: Record<string, unknown>,
|
||||
...keys: string[]
|
||||
): string | undefined {
|
||||
for (const k of keys) {
|
||||
const v = asString(input[k])
|
||||
if (v) return v
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
function truncate(text: string | undefined, max: number): string | undefined {
|
||||
if (!text) return undefined
|
||||
return text.length > max ? `${text.slice(0, max - 1)}…` : text
|
||||
}
|
||||
|
||||
function quote(value: string | undefined): string | undefined {
|
||||
if (!value) return undefined
|
||||
return `"${truncate(value, 60)}"`
|
||||
}
|
||||
|
||||
function basename(path: string | undefined): string | undefined {
|
||||
if (!path) return undefined
|
||||
const parts = path.split(/[/\\]/).filter(Boolean)
|
||||
return parts[parts.length - 1] ?? path
|
||||
}
|
||||
|
||||
function formatUrl(value: unknown): string | undefined {
|
||||
const url = asString(value)
|
||||
if (!url) return undefined
|
||||
try {
|
||||
const parsed = new URL(url)
|
||||
const host = parsed.host
|
||||
const path = parsed.pathname === '/' ? '' : parsed.pathname
|
||||
const display = path && path.length > 0 ? `${host}${path}` : host
|
||||
return truncate(display, 60)
|
||||
} catch {
|
||||
return truncate(url, 60)
|
||||
}
|
||||
}
|
||||
|
||||
function coords(x: unknown, y: unknown): string | undefined {
|
||||
if (typeof x === 'number' && typeof y === 'number') {
|
||||
return `${Math.round(x)}, ${Math.round(y)}`
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────
|
||||
// Subject extractors
|
||||
// ──────────────────────────────────────────────────────────────────────
|
||||
|
||||
type SubjectExtractor = (input: Record<string, unknown>) => string | undefined
|
||||
|
||||
const SUBJECT_EXTRACTORS: Record<string, SubjectExtractor> = {
|
||||
// URL-bearing tools
|
||||
new_page: (i) => formatUrl(i.url),
|
||||
new_hidden_page: (i) => formatUrl(i.url),
|
||||
navigate_page: (i) => {
|
||||
const action = asString(i.action)
|
||||
if (action === 'back') return 'back'
|
||||
if (action === 'forward') return 'forward'
|
||||
if (action === 'reload') return 'reload'
|
||||
return formatUrl(i.url)
|
||||
},
|
||||
web_fetch: (i) => formatUrl(i.url),
|
||||
|
||||
// Search queries
|
||||
web_search: (i) => quote(stringField(i, 'query', 'q')),
|
||||
search_history: (i) => quote(stringField(i, 'query', 'text')),
|
||||
search_bookmarks: (i) => quote(stringField(i, 'query', 'text')),
|
||||
search_memory: (i) => quote(stringField(i, 'query', 'q')),
|
||||
search_dom: (i) => quote(stringField(i, 'query', 'selector')),
|
||||
search_documentation: (i) => quote(stringField(i, 'query', 'q')),
|
||||
find_files: (i) => quote(stringField(i, 'pattern', 'query')),
|
||||
|
||||
// Element interactions
|
||||
click: (i) => stringField(i, 'element'),
|
||||
hover: (i) => stringField(i, 'element'),
|
||||
focus: (i) => stringField(i, 'element'),
|
||||
clear: (i) => stringField(i, 'element'),
|
||||
check: (i) => stringField(i, 'element'),
|
||||
uncheck: (i) => stringField(i, 'element'),
|
||||
fill: (i) => {
|
||||
const target = stringField(i, 'element')
|
||||
const text = stringField(i, 'text')
|
||||
if (target && text) return `${target}: ${truncate(text, 40)}`
|
||||
return target ?? truncate(text, 40)
|
||||
},
|
||||
press_key: (i) => stringField(i, 'key'),
|
||||
|
||||
// Coordinate-based input
|
||||
click_at: (i) => coords(i.x, i.y),
|
||||
hover_at: (i) => coords(i.x, i.y),
|
||||
type_at: (i) => {
|
||||
const at = coords(i.x, i.y)
|
||||
const text = stringField(i, 'text')
|
||||
if (at && text) return `${at}: ${truncate(text, 40)}`
|
||||
return at ?? truncate(text, 40)
|
||||
},
|
||||
drag_at: (i) => {
|
||||
const from = coords(i.fromX, i.fromY)
|
||||
const to = coords(i.toX, i.toY)
|
||||
if (from && to) return `${from} → ${to}`
|
||||
return from ?? to
|
||||
},
|
||||
|
||||
// Tab management
|
||||
show_page: (i) => {
|
||||
const page = i.page
|
||||
return typeof page === 'number' ? `tab ${page}` : asString(page)
|
||||
},
|
||||
close_page: (i) => {
|
||||
const page = i.page
|
||||
return typeof page === 'number' ? `tab ${page}` : asString(page)
|
||||
},
|
||||
move_page: (i) => {
|
||||
const page = i.page
|
||||
return typeof page === 'number' ? `tab ${page}` : asString(page)
|
||||
},
|
||||
|
||||
// Page reads (take_snapshot, take_enhanced_snapshot, get_page_content,
|
||||
// get_page_links, get_dom, take_screenshot) intentionally omit a
|
||||
// subject — the only argument is a numeric page ID that's internal
|
||||
// to the agent and meaningless to the user ("tab 4" tells them nothing).
|
||||
// The verb alone communicates what happened.
|
||||
|
||||
// External actions via Strata
|
||||
execute_action: (i) => {
|
||||
const server = stringField(i, 'server_name')
|
||||
const action = stringField(i, 'action_name')
|
||||
if (server && action) return `${server} · ${action}`
|
||||
return action ?? server
|
||||
},
|
||||
get_category_actions: (i) => stringField(i, 'category_name', 'server_name'),
|
||||
get_action_details: (i) => stringField(i, 'action_name'),
|
||||
discover_server_categories_or_actions: (i) =>
|
||||
stringField(i, 'server_name', 'category_name'),
|
||||
connector_mcp_servers: (i) => stringField(i, 'server_name'),
|
||||
|
||||
// Filesystem
|
||||
read_file: (i) => basename(stringField(i, 'path')),
|
||||
write_file: (i) => basename(stringField(i, 'path')),
|
||||
|
||||
// Memory writes — show first chars of content
|
||||
write_memory: (i) => truncate(stringField(i, 'content', 'text'), 40),
|
||||
update_soul: (i) => truncate(stringField(i, 'content'), 40),
|
||||
update_core: (i) => truncate(stringField(i, 'content'), 40),
|
||||
|
||||
// Bookmarks
|
||||
create_bookmark: (i) => stringField(i, 'title') ?? formatUrl(i.url),
|
||||
remove_bookmark: (i) => stringField(i, 'id', 'title'),
|
||||
update_bookmark: (i) => stringField(i, 'id', 'title'),
|
||||
move_bookmark: (i) => stringField(i, 'id', 'title'),
|
||||
|
||||
// History
|
||||
delete_history_url: (i) => formatUrl(i.url),
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────
|
||||
// Public API
|
||||
// ──────────────────────────────────────────────────────────────────────
|
||||
|
||||
export interface ToolLabelResult {
|
||||
label: string
|
||||
subject?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Strip MCP namespace prefixes (e.g. "browseros__", "mcp_") to find the
|
||||
* canonical tool name used in the override maps.
|
||||
*/
|
||||
function canonicalName(rawName: string): string {
|
||||
return rawName.replace(/^browseros__/, '').replace(/^mcp_/, '')
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a snake_case tool name into Sentence-case English as a fallback
|
||||
* when no curated override exists.
|
||||
*/
|
||||
function humanizeToolName(rawName: string): string {
|
||||
const stripped = canonicalName(rawName)
|
||||
const words = stripped.split(/[_-]/).filter((w) => w.length > 0)
|
||||
if (words.length === 0) return rawName
|
||||
const first = words[0]!
|
||||
return [
|
||||
first.charAt(0).toUpperCase() + first.slice(1),
|
||||
...words.slice(1),
|
||||
].join(' ')
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a human-readable label and subject string for a tool call,
|
||||
* suitable for rendering in the chat activity view.
|
||||
*/
|
||||
export function buildToolLabel(
|
||||
rawName: string,
|
||||
input?: Record<string, unknown>,
|
||||
): ToolLabelResult {
|
||||
const canonical = canonicalName(rawName)
|
||||
const label =
|
||||
VERB_OVERRIDES[canonical] ??
|
||||
VERB_OVERRIDES[rawName] ??
|
||||
humanizeToolName(rawName)
|
||||
|
||||
const extractor = Object.hasOwn(SUBJECT_EXTRACTORS, canonical)
|
||||
? SUBJECT_EXTRACTORS[canonical]
|
||||
: Object.hasOwn(SUBJECT_EXTRACTORS, rawName)
|
||||
? SUBJECT_EXTRACTORS[rawName]
|
||||
: undefined
|
||||
const subject = extractor && input ? extractor(input) : undefined
|
||||
|
||||
return { label, subject }
|
||||
}
|
||||
26
packages/browseros-agent/apps/eval/configs/agisdk-real-smoke.json
vendored
Normal file
26
packages/browseros-agent/apps/eval/configs/agisdk-real-smoke.json
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"agent": {
|
||||
"type": "single",
|
||||
"provider": "openai-compatible",
|
||||
"model": "moonshotai/kimi-k2.5",
|
||||
"apiKey": "OPENROUTER_API_KEY",
|
||||
"baseUrl": "https://openrouter.ai/api/v1",
|
||||
"supportsImages": true
|
||||
},
|
||||
"dataset": "../data/agisdk-real.jsonl",
|
||||
"num_workers": 10,
|
||||
"restart_server_per_task": true,
|
||||
"browseros": {
|
||||
"server_url": "http://127.0.0.1:9110",
|
||||
"base_cdp_port": 9010,
|
||||
"base_server_port": 9110,
|
||||
"base_extension_port": 9310,
|
||||
"load_extensions": false,
|
||||
"headless": false
|
||||
},
|
||||
"captcha": {
|
||||
"api_key_env": "NOPECHA_API_KEY"
|
||||
},
|
||||
"graders": ["agisdk_state_diff"],
|
||||
"timeout_ms": 1800000
|
||||
}
|
||||
@@ -2,9 +2,9 @@
|
||||
"agent": {
|
||||
"type": "single",
|
||||
"provider": "openai-compatible",
|
||||
"model": "accounts/fireworks/models/kimi-k2p5",
|
||||
"apiKey": "FIREWORKS_API_KEY",
|
||||
"baseUrl": "https://api.fireworks.ai/inference/v1",
|
||||
"model": "moonshotai/kimi-k2.5",
|
||||
"apiKey": "OPENROUTER_API_KEY",
|
||||
"baseUrl": "https://openrouter.ai/api/v1",
|
||||
"supportsImages": true
|
||||
},
|
||||
"dataset": "../data/webbench-2of4-50.jsonl",
|
||||
|
||||
26
packages/browseros-agent/apps/eval/configs/infinity-hard-50.json
vendored
Normal file
26
packages/browseros-agent/apps/eval/configs/infinity-hard-50.json
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"agent": {
|
||||
"type": "single",
|
||||
"provider": "openai-compatible",
|
||||
"model": "moonshotai/kimi-k2.5",
|
||||
"apiKey": "OPENROUTER_API_KEY",
|
||||
"baseUrl": "https://openrouter.ai/api/v1",
|
||||
"supportsImages": true
|
||||
},
|
||||
"dataset": "../data/webarena-infinity-hard-50.jsonl",
|
||||
"num_workers": 10,
|
||||
"restart_server_per_task": true,
|
||||
"browseros": {
|
||||
"server_url": "http://127.0.0.1:9110",
|
||||
"base_cdp_port": 9010,
|
||||
"base_server_port": 9110,
|
||||
"base_extension_port": 9310,
|
||||
"load_extensions": false,
|
||||
"headless": false
|
||||
},
|
||||
"captcha": {
|
||||
"api_key_env": "NOPECHA_API_KEY"
|
||||
},
|
||||
"graders": ["infinity_state"],
|
||||
"timeout_ms": 1800000
|
||||
}
|
||||
47
packages/browseros-agent/apps/eval/data/agisdk-real.jsonl
vendored
Normal file
47
packages/browseros-agent/apps/eval/data/agisdk-real.jsonl
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
{"query_id": "agisdk-dashdish-10", "dataset": "agisdk-real", "query": "Place an order from \"Souvla\" for a \"Medium Classic Cheeseburger\" and a \"Small Bacon Double Cheeseburger\" with \"Standard Delivery\" as the method with the default charged options.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-dashdish.vercel.app", "metadata": {"original_task_id": "dashdish-10", "website": "DashDish", "category": "agisdk-real", "additional": {"agisdk_task_id": "dashdish-10", "challenge_type": "action", "difficulty": "hard", "similar_to": "Doordash"}}}
|
||||
{"query_id": "agisdk-fly-unified-5", "dataset": "agisdk-real", "query": "Find me the cheapest fare for a flight from Orlando to Milwaukee on December 5th, 2024 and book it.\nPassenger: John Doe\nDate of Birth: 01/01/1990\nSex: Male\nSeat Selection: No\nPayment: Credit Card (378342143523967), Exp: 12/25, Security Code: 420 Address: 123 Main St, San Francisco, CA, 94105, USA, Phone: 555-123-4567, Email: johndoe@example.com.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-fly-unified.vercel.app", "metadata": {"original_task_id": "fly-unified-5", "website": "Fly Unified", "category": "agisdk-real", "additional": {"agisdk_task_id": "fly-unified-5", "challenge_type": "retrieval-action", "difficulty": "medium", "similar_to": "United Airlines"}}}
|
||||
{"query_id": "agisdk-udriver-10", "dataset": "agisdk-real", "query": "Order me a ride for 4pm, I'll be at the de Young muesum headed to the Waterbar, fanciest option possible please.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-udriver.vercel.app", "metadata": {"original_task_id": "udriver-10", "website": "UDriver", "category": "agisdk-real", "additional": {"agisdk_task_id": "udriver-10", "challenge_type": "action", "difficulty": "hard", "similar_to": "Uber"}}}
|
||||
{"query_id": "agisdk-udriver-9", "dataset": "agisdk-real", "query": "Book me a ride from the thai restaurant I last took a ride to for later today at 2pm, I'll be at 333 Apartments on Fremont", "graders": ["agisdk_state_diff"], "start_url": "https://evals-udriver.vercel.app", "metadata": {"original_task_id": "udriver-9", "website": "UDriver", "category": "agisdk-real", "additional": {"agisdk_task_id": "udriver-9", "challenge_type": "retrieval-action", "difficulty": "hard", "similar_to": "Uber"}}}
|
||||
{"query_id": "agisdk-topwork-4", "dataset": "agisdk-real", "query": "Create a job post for a UI/UX Designer with expertise in Figma, Sketch, and Adobe Creative Suite, including project details, timeline, and required skills (Wireframing, Prototyping, Responsive Design).", "graders": ["agisdk_state_diff"], "start_url": "https://evals-topwork.vercel.app", "metadata": {"original_task_id": "topwork-4", "website": "TopWork", "category": "agisdk-real", "additional": {"agisdk_task_id": "topwork-4", "challenge_type": "action", "difficulty": "medium", "similar_to": "Upwork"}}}
|
||||
{"query_id": "agisdk-gocalendar-4", "dataset": "agisdk-real", "query": "Change the \"Team Check-In\" event on July 18, 2024, name to \"Project Kickoff\" and update the location to \"Zoom\"", "graders": ["agisdk_state_diff"], "start_url": "https://evals-gocalendar.vercel.app", "metadata": {"original_task_id": "gocalendar-4", "website": "GoCalendar", "category": "agisdk-real", "additional": {"agisdk_task_id": "gocalendar-4", "challenge_type": "action", "difficulty": "medium", "similar_to": "Google Calendar"}}}
|
||||
{"query_id": "agisdk-staynb-6", "dataset": "agisdk-real", "query": "Find and book the stay with the best value for money (cheapest stay with the best reviews) for 1 day. For fields you don't know the answer for, just fill them in with anything of your choice.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-staynb.vercel.app", "metadata": {"original_task_id": "staynb-6", "website": "StayNB", "category": "agisdk-real", "additional": {"agisdk_task_id": "staynb-6", "challenge_type": "retrieval-action", "difficulty": "medium", "similar_to": "Airbnb"}}}
|
||||
{"query_id": "agisdk-fly-unified-9", "dataset": "agisdk-real", "query": "Book me a flight from San Francisco to Chicago in Basic Economy on December 18th at 10:00. Ensure no seat selection is made.\nPassenger: David Lee\nDate of Birth: 07/22/1985\nSex: Male\nSeat Selection: No\nPayment: Credit Card (9999 8888 7777), Exp: 03/30, Address: 987 Cedar St, Chicago, IL, 60601, USA, Phone: 555-987-1234, Email: davidlee@example.com.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-fly-unified.vercel.app", "metadata": {"original_task_id": "fly-unified-9", "website": "Fly Unified", "category": "agisdk-real", "additional": {"agisdk_task_id": "fly-unified-9", "challenge_type": "action", "difficulty": "hard", "similar_to": "United Airlines"}}}
|
||||
{"query_id": "agisdk-networkin-9", "dataset": "agisdk-real", "query": "Find a professional who attended Stanford and send them a connection request and a message.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-networkin.vercel.app", "metadata": {"original_task_id": "networkin-9", "website": "Networkin", "category": "agisdk-real", "additional": {"agisdk_task_id": "networkin-9", "challenge_type": "retrieval-action", "difficulty": "medium", "similar_to": "LinkedIn"}}}
|
||||
{"query_id": "agisdk-udriver-11", "dataset": "agisdk-real", "query": "I need to go from Pacific Catch on Chestnut back home to 333 Fremont now. If the fancy version is within ten dollars of the regular one, book that.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-udriver.vercel.app", "metadata": {"original_task_id": "udriver-11", "website": "UDriver", "category": "agisdk-real", "additional": {"agisdk_task_id": "udriver-11", "challenge_type": "action", "difficulty": "hard", "similar_to": "Uber"}}}
|
||||
{"query_id": "agisdk-fly-unified-4", "dataset": "agisdk-real", "query": "Book me a round-trip flight from Providence (Rhode Island) to Indianapolis, departing on December 5th, 2024 at 08:00 and returning on December 9th at 14:00.\nPassenger: Jane Smith\nDate of Birth: 02/14/1995\nSex: Female\nSeat Selection: Yes (Window seat)\nPayment: Credit Card (378342143523967), Exp: 06/26, security code: 345 Address: 456 Elm St, Miami, FL, 33101, USA, Phone: 555-987-6543, Email: janesmith@example.com.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-fly-unified.vercel.app", "metadata": {"original_task_id": "fly-unified-4", "website": "Fly Unified", "category": "agisdk-real", "additional": {"agisdk_task_id": "fly-unified-4", "challenge_type": "action", "difficulty": "medium", "similar_to": "United Airlines"}}}
|
||||
{"query_id": "agisdk-networkin-5", "dataset": "agisdk-real", "query": "Send a connection request to John Smith.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-networkin.vercel.app", "metadata": {"original_task_id": "networkin-5", "website": "Networkin", "category": "agisdk-real", "additional": {"agisdk_task_id": "networkin-5", "challenge_type": "action", "difficulty": "easy", "similar_to": "LinkedIn"}}}
|
||||
{"query_id": "agisdk-zilloft-6", "dataset": "agisdk-real", "query": "Select a property listed in San Francisco as \"Condos\" within a price range under $300,000 and request a tour for tomorrow at 4:00 PM. Use these contact details: Name: Sarah Brown, Email: sarahbrown@example.com, Phone: 555-987-6543.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-zilloft.vercel.app", "metadata": {"original_task_id": "zilloft-6", "website": "Zilloft", "category": "agisdk-real", "additional": {"agisdk_task_id": "zilloft-6", "challenge_type": "action", "difficulty": "medium", "similar_to": "Zillow"}}}
|
||||
{"query_id": "agisdk-topwork-2", "dataset": "agisdk-real", "query": "Create a job posting for a Backend Developer specializing in Python, Django, and Flask to develop a high-performance web application. Include project details such as required skills (PostgreSQL, Docker, AWS, CI/CD), estimated project timeline, and budget.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-topwork.vercel.app", "metadata": {"original_task_id": "topwork-2", "website": "TopWork", "category": "agisdk-real", "additional": {"agisdk_task_id": "topwork-2", "challenge_type": "action", "difficulty": "medium", "similar_to": "Upwork"}}}
|
||||
{"query_id": "agisdk-gocalendar-3", "dataset": "agisdk-real", "query": "Delete the event titled \"Breakfast Meeting with Client\" scheduled for July 19, 2024", "graders": ["agisdk_state_diff"], "start_url": "https://evals-gocalendar.vercel.app", "metadata": {"original_task_id": "gocalendar-3", "website": "GoCalendar", "category": "agisdk-real", "additional": {"agisdk_task_id": "gocalendar-3", "challenge_type": "action", "difficulty": "easy", "similar_to": "Google Calendar"}}}
|
||||
{"query_id": "agisdk-topwork-3", "dataset": "agisdk-real", "query": "Create a job listing for a Full-Stack Developer with expertise in Java, Spring Boot, and Angular, outlining the project scope, estimated duration, and required skills (MySQL, Docker, Kubernetes, and Jenkins). The ideal candidate should have experience in enterprise-level applications and building scalable microservices. After creating the job post, please describe what you included in the job listing.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-topwork.vercel.app", "metadata": {"original_task_id": "topwork-3", "website": "TopWork", "category": "agisdk-real", "additional": {"agisdk_task_id": "topwork-3", "challenge_type": "retrieval", "difficulty": "medium", "similar_to": "Upwork"}}}
|
||||
{"query_id": "agisdk-fly-unified-2", "dataset": "agisdk-real", "query": "Book me a one-way flight from Indiana to New York on December 2nd 2024 at 12:00.\nPassenger: John Doe\nDate of Birth: 01/01/1990\nSex: Male\nSeat Selection: No\nPayment: Credit Card (378342143523967), Exp: 12/25, Security Code: 245, Address: 123 Main St, San Francisco, CA, 94105, USA, Phone: 555-123-4567, Email: johndoe@example.com.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-fly-unified.vercel.app", "metadata": {"original_task_id": "fly-unified-2", "website": "Fly Unified", "category": "agisdk-real", "additional": {"agisdk_task_id": "fly-unified-2", "challenge_type": "action", "difficulty": "easy", "similar_to": "United Airlines"}}}
|
||||
{"query_id": "agisdk-dashdish-7", "dataset": "agisdk-real", "query": "Select \"Express Delivery\" for an order from \"DragonEats\" of \"Mushroom Swiss Burger\" and complete the checkout with the pre-loaded Visa card.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-dashdish.vercel.app", "metadata": {"original_task_id": "dashdish-7", "website": "DashDish", "category": "agisdk-real", "additional": {"agisdk_task_id": "dashdish-7", "challenge_type": "action", "difficulty": "hard", "similar_to": "Doordash"}}}
|
||||
{"query_id": "agisdk-networkin-3", "dataset": "agisdk-real", "query": "Write a post inviting users to a networking event, including details about the event's purpose, date, and target audience.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-networkin.vercel.app", "metadata": {"original_task_id": "networkin-3", "website": "Networkin", "category": "agisdk-real", "additional": {"agisdk_task_id": "networkin-3", "challenge_type": "action", "difficulty": "medium", "similar_to": "LinkedIn"}}}
|
||||
{"query_id": "agisdk-gomail-7", "dataset": "agisdk-real", "query": "Delete the email with the subject \"New Leadership Articles You Can't Miss\" from the Inbox.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-gomail.vercel.app", "metadata": {"original_task_id": "gomail-7", "website": "GoMail", "category": "agisdk-real", "additional": {"agisdk_task_id": "gomail-7", "challenge_type": "retrieval-action", "difficulty": "hard", "similar_to": "Gmail"}}}
|
||||
{"query_id": "agisdk-opendining-8", "dataset": "agisdk-real", "query": "Identify and book the restaurant with the lowest rating. For fields you don't know the answer for, just fill them in with anything of your choice.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-opendining.vercel.app", "metadata": {"original_task_id": "opendining-8", "website": "OpenDining", "category": "agisdk-real", "additional": {"agisdk_task_id": "opendining-8", "challenge_type": "retrieval-action", "difficulty": "easy", "similar_to": "OpenTable"}}}
|
||||
{"query_id": "agisdk-udriver-1", "dataset": "agisdk-real", "query": "Book a ride from Fitness Urbano to Pacific Cafe", "graders": ["agisdk_state_diff"], "start_url": "https://evals-udriver.vercel.app", "metadata": {"original_task_id": "udriver-1", "website": "UDriver", "category": "agisdk-real", "additional": {"agisdk_task_id": "udriver-1", "challenge_type": "action", "difficulty": "easy", "similar_to": "Uber"}}}
|
||||
{"query_id": "agisdk-staynb-2", "dataset": "agisdk-real", "query": "Click on one of the stays displayed on the homepage and book it for a family of 4 (2 adults and 2 children). For fields you don't know the answer for, just fill them in with anything of your choice.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-staynb.vercel.app", "metadata": {"original_task_id": "staynb-2", "website": "StayNB", "category": "agisdk-real", "additional": {"agisdk_task_id": "staynb-2", "challenge_type": "action", "difficulty": "easy", "similar_to": "Airbnb"}}}
|
||||
{"query_id": "agisdk-opendining-10", "dataset": "agisdk-real", "query": "Check the menus of all restaurants for vegetarian options and make a reservation at the one with the most vegetarian choices. For fields you don't know the answer for, just fill them in with anything of your choice.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-opendining.vercel.app", "metadata": {"original_task_id": "opendining-10", "website": "OpenDining", "category": "agisdk-real", "additional": {"agisdk_task_id": "opendining-10", "challenge_type": "retrieval-action", "difficulty": "medium", "similar_to": "OpenTable"}}}
|
||||
{"query_id": "agisdk-opendining-4", "dataset": "agisdk-real", "query": "Use the search bar to search for a restaurant on September 2nd at 4:30 PM for 7 people, using \"Japanese\" as the search term, and book the first result. For fields you don't know the answer for, just fill them in with anything of your choice.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-opendining.vercel.app", "metadata": {"original_task_id": "opendining-4", "website": "OpenDining", "category": "agisdk-real", "additional": {"agisdk_task_id": "opendining-4", "challenge_type": "action", "difficulty": "hard", "similar_to": "OpenTable"}}}
|
||||
{"query_id": "agisdk-gomail-8", "dataset": "agisdk-real", "query": "Clear all emails from \"GitHub\" in the inbox to trash.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-gomail.vercel.app", "metadata": {"original_task_id": "gomail-8", "website": "GoMail", "category": "agisdk-real", "additional": {"agisdk_task_id": "gomail-8", "challenge_type": "action", "difficulty": "medium", "similar_to": "Gmail"}}}
|
||||
{"query_id": "agisdk-dashdish-4", "dataset": "agisdk-real", "query": "Schedule a delivery order from \"Taco Bell\" adding a \"Classic Cheeseburger\" large size for later and add the note \"Leave at the front door\".", "graders": ["agisdk_state_diff"], "start_url": "https://evals-dashdish.vercel.app", "metadata": {"original_task_id": "dashdish-4", "website": "DashDish", "category": "agisdk-real", "additional": {"agisdk_task_id": "dashdish-4", "challenge_type": "action", "difficulty": "medium", "similar_to": "Doordash"}}}
|
||||
{"query_id": "agisdk-networkin-1", "dataset": "agisdk-real", "query": "Create a new text post for the feed with a professional update about AI trends in 2025, mentioning three key advancements and their impact on the job market.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-networkin.vercel.app", "metadata": {"original_task_id": "networkin-1", "website": "Networkin", "category": "agisdk-real", "additional": {"agisdk_task_id": "networkin-1", "challenge_type": "action", "difficulty": "medium", "similar_to": "LinkedIn"}}}
|
||||
{"query_id": "agisdk-dashdish-5", "dataset": "agisdk-real", "query": "Add three \"Loaded Bacon Cheese Fries\" to the shopping cart from \"Man vs. Fries\". Proceed to checkout and select \"Pickup\" as the delivery method.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-dashdish.vercel.app", "metadata": {"original_task_id": "dashdish-5", "website": "DashDish", "category": "agisdk-real", "additional": {"agisdk_task_id": "dashdish-5", "challenge_type": "retrieval-action", "difficulty": "medium", "similar_to": "Doordash"}}}
|
||||
{"query_id": "agisdk-opendining-5", "dataset": "agisdk-real", "query": "Scroll through the homepage carousel until \"Ocean Breeze\" is visible, select the second available time slot, and complete the reservation. For fields you don't know the answer for, just fill them in with anything of your choice.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-opendining.vercel.app", "metadata": {"original_task_id": "opendining-5", "website": "OpenDining", "category": "agisdk-real", "additional": {"agisdk_task_id": "opendining-5", "challenge_type": "action", "difficulty": "medium", "similar_to": "OpenTable"}}}
|
||||
{"query_id": "agisdk-topwork-1", "dataset": "agisdk-real", "query": "Create a new job post for a Frontend Developer with expertise in React and TypeScript, specifying project details such as estimated duration, required skills, and budget.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-topwork.vercel.app", "metadata": {"original_task_id": "topwork-1", "website": "TopWork", "category": "agisdk-real", "additional": {"agisdk_task_id": "topwork-1", "challenge_type": "action", "difficulty": "medium", "similar_to": "Upwork"}}}
|
||||
{"query_id": "agisdk-gocalendar-1", "dataset": "agisdk-real", "query": "Create a new event titled \"Team Meeting\" on July 19, 2024, from 2 PM to 2:30 PM, and include \"Conference Room A\" as the location", "graders": ["agisdk_state_diff"], "start_url": "https://evals-gocalendar.vercel.app", "metadata": {"original_task_id": "gocalendar-1", "website": "GoCalendar", "category": "agisdk-real", "additional": {"agisdk_task_id": "gocalendar-1", "challenge_type": "action", "difficulty": "medium", "similar_to": "Google Calendar"}}}
|
||||
{"query_id": "agisdk-gomail-5", "dataset": "agisdk-real", "query": "Schedule an email to jane.doe@example.com with the subject \"Weekly Update\" to be sent next Monday at 9:00 AM.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-gomail.vercel.app", "metadata": {"original_task_id": "gomail-5", "website": "GoMail", "category": "agisdk-real", "additional": {"agisdk_task_id": "gomail-5", "challenge_type": "retrieval-action", "difficulty": "medium", "similar_to": "Gmail"}}}
|
||||
{"query_id": "agisdk-staynb-4", "dataset": "agisdk-real", "query": "Book a stay for 2 children with 1 adult. For fields you don't know the answer for, just fill them in with anything of your choice.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-staynb.vercel.app", "metadata": {"original_task_id": "staynb-4", "website": "StayNB", "category": "agisdk-real", "additional": {"agisdk_task_id": "staynb-4", "challenge_type": "action", "difficulty": "medium", "similar_to": "Airbnb"}}}
|
||||
{"query_id": "agisdk-networkin-6", "dataset": "agisdk-real", "query": "Choose a random person who you haven't connected with, connect with them, and send them a message saying, 'howdy, partner'.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-networkin.vercel.app", "metadata": {"original_task_id": "networkin-6", "website": "Networkin", "category": "agisdk-real", "additional": {"agisdk_task_id": "networkin-6", "challenge_type": "action", "difficulty": "medium", "similar_to": "LinkedIn"}}}
|
||||
{"query_id": "agisdk-dashdish-2", "dataset": "agisdk-real", "query": "Add a \"Medium Pepperoni Pizza\" from the restaurant \"Papa Johns Pizza\" to the shopping cart and purchase it.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-dashdish.vercel.app", "metadata": {"original_task_id": "dashdish-2", "website": "DashDish", "category": "agisdk-real", "additional": {"agisdk_task_id": "dashdish-2", "challenge_type": "action", "difficulty": "easy", "similar_to": "Doordash"}}}
|
||||
{"query_id": "agisdk-staynb-8", "dataset": "agisdk-real", "query": "Scroll through the homepage and book the last stay located in Paris.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-staynb.vercel.app", "metadata": {"original_task_id": "staynb-8", "website": "StayNB", "category": "agisdk-real", "additional": {"agisdk_task_id": "staynb-8", "challenge_type": "retrieval-action", "difficulty": "medium", "similar_to": "Airbnb"}}}
|
||||
{"query_id": "agisdk-gomail-2", "dataset": "agisdk-real", "query": "Mark the first email in the Inbox as \"read\".", "graders": ["agisdk_state_diff"], "start_url": "https://evals-gomail.vercel.app", "metadata": {"original_task_id": "gomail-2", "website": "GoMail", "category": "agisdk-real", "additional": {"agisdk_task_id": "gomail-2", "challenge_type": "action", "difficulty": "easy", "similar_to": "Gmail"}}}
|
||||
{"query_id": "agisdk-networkin-10", "dataset": "agisdk-real", "query": "Generate a polite follow-up message for a previous unanswered chat, starting with \"Following up on\".", "graders": ["agisdk_state_diff"], "start_url": "https://evals-networkin.vercel.app", "metadata": {"original_task_id": "networkin-10", "website": "Networkin", "category": "agisdk-real", "additional": {"agisdk_task_id": "networkin-10", "challenge_type": "action", "difficulty": "medium", "similar_to": "LinkedIn"}}}
|
||||
{"query_id": "agisdk-gomail-3", "dataset": "agisdk-real", "query": "Compose a new email to jonathan.smith@example.com with the subject \"Meeting Notes\" and body \"Please find the meeting notes attached.\"", "graders": ["agisdk_state_diff"], "start_url": "https://evals-gomail.vercel.app", "metadata": {"original_task_id": "gomail-3", "website": "GoMail", "category": "agisdk-real", "additional": {"agisdk_task_id": "gomail-3", "challenge_type": "action", "difficulty": "easy", "similar_to": "Gmail"}}}
|
||||
{"query_id": "agisdk-udriver-6", "dataset": "agisdk-real", "query": "Me and 4 friends need a ride from the Palace Hotel to dinner at Osha Thai leaving now", "graders": ["agisdk_state_diff"], "start_url": "https://evals-udriver.vercel.app", "metadata": {"original_task_id": "udriver-6", "website": "UDriver", "category": "agisdk-real", "additional": {"agisdk_task_id": "udriver-6", "challenge_type": "action", "difficulty": "hard", "similar_to": "Uber"}}}
|
||||
{"query_id": "agisdk-staynb-9", "dataset": "agisdk-real", "query": "Book a stay with the maximum number of guests supported. For fields you don't know the answer for, just fill them in with anything of your choice.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-staynb.vercel.app", "metadata": {"original_task_id": "staynb-9", "website": "StayNB", "category": "agisdk-real", "additional": {"agisdk_task_id": "staynb-9", "challenge_type": "action", "difficulty": "hard", "similar_to": "Airbnb"}}}
|
||||
{"query_id": "agisdk-zilloft-3", "dataset": "agisdk-real", "query": "Find a home in San Diego priced under $150,000 with at least 2 bedrooms and request a tour. Use these details: Contact Name: John Doe, Email: johndoe@example.com, Phone: 555-123-4567, Tour Time: 2:00 PM, Tour Date: First available.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-zilloft.vercel.app", "metadata": {"original_task_id": "zilloft-3", "website": "Zilloft", "category": "agisdk-real", "additional": {"agisdk_task_id": "zilloft-3", "challenge_type": "retrieval-action", "difficulty": "easy", "similar_to": "Zillow"}}}
|
||||
{"query_id": "agisdk-fly-unified-6", "dataset": "agisdk-real", "query": "Reserve me a seat for the flight from Austin to Pittsburgh departing on December 11th, 2024 at 8:00 in Basic Economy.\nPassenger: Alice Brown\nDate of Birth: 05/20/1992\nSex: Female\nSeat Selection: Yes (Aisle seat)\nPayment: Credit Card (378342143523967), Exp: 09/27, security code: 332 Address: 789 Pine St, Los Angeles, CA, 90012, USA, Phone: 555-456-7890, Email: alicebrown@example.com.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-fly-unified.vercel.app", "metadata": {"original_task_id": "fly-unified-6", "website": "Fly Unified", "category": "agisdk-real", "additional": {"agisdk_task_id": "fly-unified-6", "challenge_type": "action", "difficulty": "medium", "similar_to": "United Airlines"}}}
|
||||
{"query_id": "agisdk-opendining-3", "dataset": "agisdk-real", "query": "Book a table at \"The Royal Dine\" for a party of 4 on July 20, 2024, at 7 PM. For fields you don't know the answer for, just fill them in with anything of your choice.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-opendining.vercel.app", "metadata": {"original_task_id": "opendining-3", "website": "OpenDining", "category": "agisdk-real", "additional": {"agisdk_task_id": "opendining-3", "challenge_type": "action", "difficulty": "easy", "similar_to": "OpenTable"}}}
|
||||
{"query_id": "agisdk-gocalendar-7", "dataset": "agisdk-real", "query": "Reschedule the \"Morning Coffee with sister\" event from July 18, 2024, at 9 AM to July 19, 2024, at 10AM using drag-and-drop functionality", "graders": ["agisdk_state_diff"], "start_url": "https://evals-gocalendar.vercel.app", "metadata": {"original_task_id": "gocalendar-7", "website": "GoCalendar", "category": "agisdk-real", "additional": {"agisdk_task_id": "gocalendar-7", "challenge_type": "action", "difficulty": "medium", "similar_to": "Google Calendar"}}}
|
||||
{"query_id": "agisdk-staynb-5", "dataset": "agisdk-real", "query": "Use the search bar to look for a stay. For the \"Where\" section, use the \"Search by region\" popover and select \"Europe\". Set the check-in date to October 13th and the check-out date to October 23rd. For the \"Who\" section, select 1 infant, 2 children, and 2 adults. Press the search button, select the first stay, and book it.", "graders": ["agisdk_state_diff"], "start_url": "https://evals-staynb.vercel.app", "metadata": {"original_task_id": "staynb-5", "website": "StayNB", "category": "agisdk-real", "additional": {"agisdk_task_id": "staynb-5", "challenge_type": "action", "difficulty": "medium", "similar_to": "Airbnb"}}}
|
||||
50
packages/browseros-agent/apps/eval/data/webarena-infinity-hard-50.jsonl
vendored
Normal file
50
packages/browseros-agent/apps/eval/data/webarena-infinity-hard-50.jsonl
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
{"query_id": "infinity-elation-prescriptions-task_h69", "dataset": "webarena-infinity", "query": "Approve all pending refill requests except for any medication that is involved in a major drug-drug interaction with another of the patient's active medications. Deny those with the reason 'Drug interaction \u2014 needs provider review before renewal'.", "graders": ["infinity_state"], "start_url": "http://localhost:8020", "metadata": {"original_task_id": "elation-prescriptions-task_h69", "website": "elation-prescriptions", "category": "webarena-infinity", "additional": {"app_name": "elation-prescriptions", "difficulty": "hard", "verifier_path": "real-tasks/task_h69.py", "app_base_port": 8020}}}
|
||||
{"query_id": "infinity-elation-clinical-records-task_h52", "dataset": "webarena-infinity", "query": "Add the document tag 'Provider-Reviewed' to every visit note template that was created by the current logged-in provider. Do not modify templates created by other providers.", "graders": ["infinity_state"], "start_url": "http://localhost:8000", "metadata": {"original_task_id": "elation-clinical-records-task_h52", "website": "elation-clinical-records", "category": "webarena-infinity", "additional": {"app_name": "elation-clinical-records", "difficulty": "hard", "verifier_path": "real-tasks/task_h52.py", "app_base_port": 8000}}}
|
||||
{"query_id": "infinity-gmail-accounts-and-contacts-task_h44", "dataset": "webarena-infinity", "query": "Your sister's husband is one of your contacts. Find him, star his entry, and add the Friends label.", "graders": ["infinity_state"], "start_url": "http://localhost:8070", "metadata": {"original_task_id": "gmail-accounts-and-contacts-task_h44", "website": "gmail-accounts-and-contacts", "category": "webarena-infinity", "additional": {"app_name": "gmail-accounts-and-contacts", "difficulty": "hard", "verifier_path": "real-tasks/task_h44.py", "app_base_port": 8070}}}
|
||||
{"query_id": "infinity-gmail-task_h2", "dataset": "webarena-infinity", "query": "Update the Datadog alerts filter to also archive matching emails and forward them to priya.sharma@cloudnine.dev instead of nate.patel@devops.tools.", "graders": ["infinity_state"], "start_url": "http://localhost:8060", "metadata": {"original_task_id": "gmail-task_h2", "website": "gmail", "category": "webarena-infinity", "additional": {"app_name": "gmail", "difficulty": "hard", "verifier_path": "real-tasks/task_h2.py", "app_base_port": 8060}}}
|
||||
{"query_id": "infinity-gitlab-plan-and-track-task_h58", "dataset": "webarena-infinity", "query": "The Performance Initiative epic has two child epics. For the child epic with more open issues, set the weight of every issue in it to 13. For the other child epic, close all its open issues.", "graders": ["infinity_state"], "start_url": "http://localhost:8050", "metadata": {"original_task_id": "gitlab-plan-and-track-task_h58", "website": "gitlab-plan-and-track", "category": "webarena-infinity", "additional": {"app_name": "gitlab-plan-and-track", "difficulty": "hard", "verifier_path": "real-tasks/task_h58.py", "app_base_port": 8050}}}
|
||||
{"query_id": "infinity-figma-slides-task_h46", "dataset": "webarena-infinity", "query": "There are two slides with tables in the deck. Lock the table that compares competitors, and change the font size to 16 on the table that tracks quarterly feature adoption.", "graders": ["infinity_state"], "start_url": "http://localhost:8030", "metadata": {"original_task_id": "figma-slides-task_h46", "website": "figma-slides", "category": "webarena-infinity", "additional": {"app_name": "figma-slides", "difficulty": "hard", "verifier_path": "real-tasks/task_h46.py", "app_base_port": 8030}}}
|
||||
{"query_id": "infinity-elation-prescriptions-task_h50", "dataset": "webarena-infinity", "query": "Deny the pending refill for the patient's cholesterol medication because his lipid panel is overdue. Then deny the Lisinopril refill as well \u2014 he needs a follow-up blood pressure check first.", "graders": ["infinity_state"], "start_url": "http://localhost:8020", "metadata": {"original_task_id": "elation-prescriptions-task_h50", "website": "elation-prescriptions", "category": "webarena-infinity", "additional": {"app_name": "elation-prescriptions", "difficulty": "hard", "verifier_path": "real-tasks/task_h50.py", "app_base_port": 8020}}}
|
||||
{"query_id": "infinity-elation-prescriptions-task_h19", "dataset": "webarena-infinity", "query": "Discontinue the Omeprazole and prescribe Famotidine 20mg tablet twice daily as a replacement for GERD \u2014 qty 60, 3 refills, send to CVS #4521.", "graders": ["infinity_state"], "start_url": "http://localhost:8020", "metadata": {"original_task_id": "elation-prescriptions-task_h19", "website": "elation-prescriptions", "category": "webarena-infinity", "additional": {"app_name": "elation-prescriptions", "difficulty": "hard", "verifier_path": "real-tasks/task_h19.py", "app_base_port": 8020}}}
|
||||
{"query_id": "infinity-paypal-my-wallet-task_h25", "dataset": "webarena-infinity", "query": "Convert all of my Australian dollars to euros.", "graders": ["infinity_state"], "start_url": "http://localhost:8100", "metadata": {"original_task_id": "paypal-my-wallet-task_h25", "website": "paypal-my-wallet", "category": "webarena-infinity", "additional": {"app_name": "paypal-my-wallet", "difficulty": "hard", "verifier_path": "real-tasks/task_h25.py", "app_base_port": 8100}}}
|
||||
{"query_id": "infinity-elation-clinical-records-task_h66", "dataset": "webarena-infinity", "query": "Create a new template called 'Anxiety Management' with HPI and Assessment sections, and billing code 99213 with description 'Office visit, established, low complexity'. Then create a visit note for Emily Nakamura using that new template and the Telehealth category, add a Psychological Status block to the note, and sign it.", "graders": ["infinity_state"], "start_url": "http://localhost:8000", "metadata": {"original_task_id": "elation-clinical-records-task_h66", "website": "elation-clinical-records", "category": "webarena-infinity", "additional": {"app_name": "elation-clinical-records", "difficulty": "hard", "verifier_path": "real-tasks/task_h66.py", "app_base_port": 8000}}}
|
||||
{"query_id": "infinity-elation-clinical-records-task_h62", "dataset": "webarena-infinity", "query": "Look up which template is assigned to the COVID Vaccine appointment type. Remove all its existing document tags and replace them with the single tag 'COVID-Protocol'. Then also assign that same template to the Urgent Same-Day appointment type.", "graders": ["infinity_state"], "start_url": "http://localhost:8000", "metadata": {"original_task_id": "elation-clinical-records-task_h62", "website": "elation-clinical-records", "category": "webarena-infinity", "additional": {"app_name": "elation-clinical-records", "difficulty": "hard", "verifier_path": "real-tasks/task_h62.py", "app_base_port": 8000}}}
|
||||
{"query_id": "infinity-elation-prescriptions-task_h32", "dataset": "webarena-infinity", "query": "The patient has a medication that's being dispensed as written (brand name only). Discontinue that prescription and replace it with a new one \u2014 same medication, same sig, same pharmacy \u2014 but allow generic substitution this time. Qty 30, 3 refills, 30 days supply.", "graders": ["infinity_state"], "start_url": "http://localhost:8020", "metadata": {"original_task_id": "elation-prescriptions-task_h32", "website": "elation-prescriptions", "category": "webarena-infinity", "additional": {"app_name": "elation-prescriptions", "difficulty": "hard", "verifier_path": "real-tasks/task_h32.py", "app_base_port": 8020}}}
|
||||
{"query_id": "infinity-gitlab-plan-and-track-task_h48", "dataset": "webarena-infinity", "query": "Add the 'breaking-change' label to every open issue in the API v3 Migration epic and remove any existing workflow-scoped labels from those issues.", "graders": ["infinity_state"], "start_url": "http://localhost:8050", "metadata": {"original_task_id": "gitlab-plan-and-track-task_h48", "website": "gitlab-plan-and-track", "category": "webarena-infinity", "additional": {"app_name": "gitlab-plan-and-track", "difficulty": "hard", "verifier_path": "real-tasks/task_h48.py", "app_base_port": 8050}}}
|
||||
{"query_id": "infinity-gitlab-plan-and-track-task_h77", "dataset": "webarena-infinity", "query": "Rename the 'UX' label to 'user-experience', change its type to 'group', and then add it to every open issue in the Frontend Modernization epic that doesn't already have it.", "graders": ["infinity_state"], "start_url": "http://localhost:8050", "metadata": {"original_task_id": "gitlab-plan-and-track-task_h77", "website": "gitlab-plan-and-track", "category": "webarena-infinity", "additional": {"app_name": "gitlab-plan-and-track", "difficulty": "hard", "verifier_path": "real-tasks/task_h77.py", "app_base_port": 8050}}}
|
||||
{"query_id": "infinity-xero-invoicing-task_h15", "dataset": "webarena-infinity", "query": "Create a new invoice for Summit Health Group for an annual software license and 12 months of support with a 10% discount on support.", "graders": ["infinity_state"], "start_url": "http://localhost:8120", "metadata": {"original_task_id": "xero-invoicing-task_h15", "website": "xero-invoicing", "category": "webarena-infinity", "additional": {"app_name": "xero-invoicing", "difficulty": "hard", "verifier_path": "real-tasks/task_h15.py", "app_base_port": 8120}}}
|
||||
{"query_id": "infinity-elation-clinical-records-task_h55", "dataset": "webarena-infinity", "query": "Resolve every problem across all patients in the system that currently has a status of Controlled.", "graders": ["infinity_state"], "start_url": "http://localhost:8000", "metadata": {"original_task_id": "elation-clinical-records-task_h55", "website": "elation-clinical-records", "category": "webarena-infinity", "additional": {"app_name": "elation-clinical-records", "difficulty": "hard", "verifier_path": "real-tasks/task_h55.py", "app_base_port": 8000}}}
|
||||
{"query_id": "infinity-gitlab-plan-and-track-task_h8", "dataset": "webarena-infinity", "query": "Create a confidential issue titled 'Emergency security patch' with priority::critical and the 'security' label, assigned to James O'Brien and Oliver Schmidt, with weight 2 in the Security Hardening milestone.", "graders": ["infinity_state"], "start_url": "http://localhost:8050", "metadata": {"original_task_id": "gitlab-plan-and-track-task_h8", "website": "gitlab-plan-and-track", "category": "webarena-infinity", "additional": {"app_name": "gitlab-plan-and-track", "difficulty": "hard", "verifier_path": "real-tasks/task_h8.py", "app_base_port": 8050}}}
|
||||
{"query_id": "infinity-paypal-my-wallet-task_h20", "dataset": "webarena-infinity", "query": "Make a $200 payment on PayPal Credit and change autopay to pay the full balance.", "graders": ["infinity_state"], "start_url": "http://localhost:8100", "metadata": {"original_task_id": "paypal-my-wallet-task_h20", "website": "paypal-my-wallet", "category": "webarena-infinity", "additional": {"app_name": "paypal-my-wallet", "difficulty": "hard", "verifier_path": "real-tasks/task_h20.py", "app_base_port": 8100}}}
|
||||
{"query_id": "infinity-gitlab-plan-and-track-task_h52", "dataset": "webarena-infinity", "query": "Create a new board called 'Performance Tracker' with lists for the priority::critical, priority::high, and priority::medium labels. Then add the 'priority::high' label to every open issue in the v4.1 milestone that has the 'performance' label.", "graders": ["infinity_state"], "start_url": "http://localhost:8050", "metadata": {"original_task_id": "gitlab-plan-and-track-task_h52", "website": "gitlab-plan-and-track", "category": "webarena-infinity", "additional": {"app_name": "gitlab-plan-and-track", "difficulty": "hard", "verifier_path": "real-tasks/task_h52.py", "app_base_port": 8050}}}
|
||||
{"query_id": "infinity-paypal-my-wallet-task_h80", "dataset": "webarena-infinity", "query": "Save all available Food & Drink offers, buy a $25 DoorDash gift card for yourself, and switch currency conversion to use my card issuer.", "graders": ["infinity_state"], "start_url": "http://localhost:8100", "metadata": {"original_task_id": "paypal-my-wallet-task_h80", "website": "paypal-my-wallet", "category": "webarena-infinity", "additional": {"app_name": "paypal-my-wallet", "difficulty": "hard", "verifier_path": "real-tasks/task_h80.py", "app_base_port": 8100}}}
|
||||
{"query_id": "infinity-gmail-accounts-and-contacts-task_h50", "dataset": "webarena-infinity", "query": "Add the Emergency label to every contact who is currently listed as a delegate (active, pending, or expired). Then remove all delegates whose status is not 'active'.", "graders": ["infinity_state"], "start_url": "http://localhost:8070", "metadata": {"original_task_id": "gmail-accounts-and-contacts-task_h50", "website": "gmail-accounts-and-contacts", "category": "webarena-infinity", "additional": {"app_name": "gmail-accounts-and-contacts", "difficulty": "hard", "verifier_path": "real-tasks/task_h50.py", "app_base_port": 8070}}}
|
||||
{"query_id": "infinity-elation-clinical-records-task_h14", "dataset": "webarena-infinity", "query": "Add the tag 'Flu-Season' to every patient whose primary provider is Dr. Sarah Chen.", "graders": ["infinity_state"], "start_url": "http://localhost:8000", "metadata": {"original_task_id": "elation-clinical-records-task_h14", "website": "elation-clinical-records", "category": "webarena-infinity", "additional": {"app_name": "elation-clinical-records", "difficulty": "hard", "verifier_path": "real-tasks/task_h14.py", "app_base_port": 8000}}}
|
||||
{"query_id": "infinity-figma-text-and-typography-task_h7", "dataset": "webarena-infinity", "query": "Remove all list formatting from every layer.", "graders": ["infinity_state"], "start_url": "http://localhost:8040", "metadata": {"original_task_id": "figma-text-and-typography-task_h7", "website": "figma-text-and-typography", "category": "webarena-infinity", "additional": {"app_name": "figma-text-and-typography", "difficulty": "hard", "verifier_path": "real-tasks/task_h7.py", "app_base_port": 8040}}}
|
||||
{"query_id": "infinity-paypal-my-wallet-task_h26", "dataset": "webarena-infinity", "query": "Send a $50 Amazon gift card to sarah.chen@email.com with 'Thank you!' as the message, and save the Amazon cashback offer.", "graders": ["infinity_state"], "start_url": "http://localhost:8100", "metadata": {"original_task_id": "paypal-my-wallet-task_h26", "website": "paypal-my-wallet", "category": "webarena-infinity", "additional": {"app_name": "paypal-my-wallet", "difficulty": "hard", "verifier_path": "real-tasks/task_h26.py", "app_base_port": 8100}}}
|
||||
{"query_id": "infinity-handshake-career-exploration-task_h97", "dataset": "webarena-infinity", "query": "Find the single most helpful answer across all Q&A questions and mark it helpful. Then find the most-viewed question and submit your own answer to it.", "graders": ["infinity_state"], "start_url": "http://localhost:8080", "metadata": {"original_task_id": "handshake-career-exploration-task_h97", "website": "handshake-career-exploration", "category": "webarena-infinity", "additional": {"app_name": "handshake-career-exploration", "difficulty": "hard", "verifier_path": "real-tasks/task_h97.py", "app_base_port": 8080}}}
|
||||
{"query_id": "infinity-figma-slides-task_h79", "dataset": "webarena-infinity", "query": "In the adoption table, find the feature with the highest Target Q4 percentage. In the competitive table, change DesignCraft's entry for that same feature to 'Market Leader'. Then update that feature's Target Q4 to '95%'.", "graders": ["infinity_state"], "start_url": "http://localhost:8030", "metadata": {"original_task_id": "figma-slides-task_h79", "website": "figma-slides", "category": "webarena-infinity", "additional": {"app_name": "figma-slides", "difficulty": "hard", "verifier_path": "real-tasks/task_h79.py", "app_base_port": 8030}}}
|
||||
{"query_id": "infinity-gitlab-plan-and-track-task_h41", "dataset": "webarena-infinity", "query": "For every open issue in the v4.2 - Security Hardening milestone: if it is already confidential, set its health status to 'at risk'. If it is not confidential, make it confidential and set its health status to 'needs attention'.", "graders": ["infinity_state"], "start_url": "http://localhost:8050", "metadata": {"original_task_id": "gitlab-plan-and-track-task_h41", "website": "gitlab-plan-and-track", "category": "webarena-infinity", "additional": {"app_name": "gitlab-plan-and-track", "difficulty": "hard", "verifier_path": "real-tasks/task_h41.py", "app_base_port": 8050}}}
|
||||
{"query_id": "infinity-handshake-career-exploration-task_h90", "dataset": "webarena-infinity", "query": "A student in the feed mentioned attending the NSBE conference. That student also answered a Q&A question about diversity programs in tech. Submit your own answer to that same question sharing your experience, then bookmark that student's feed post.", "graders": ["infinity_state"], "start_url": "http://localhost:8080", "metadata": {"original_task_id": "handshake-career-exploration-task_h90", "website": "handshake-career-exploration", "category": "webarena-infinity", "additional": {"app_name": "handshake-career-exploration", "difficulty": "hard", "verifier_path": "real-tasks/task_h90.py", "app_base_port": 8080}}}
|
||||
{"query_id": "infinity-elation-prescriptions-task_h30", "dataset": "webarena-infinity", "query": "The patient has three temporary medications. Discontinue the corticosteroid taper and the penicillin antibiotic \u2014 the patient completed both courses. Move the remaining temporary medication to permanent Rx.", "graders": ["infinity_state"], "start_url": "http://localhost:8020", "metadata": {"original_task_id": "elation-prescriptions-task_h30", "website": "elation-prescriptions", "category": "webarena-infinity", "additional": {"app_name": "elation-prescriptions", "difficulty": "hard", "verifier_path": "real-tasks/task_h30.py", "app_base_port": 8020}}}
|
||||
{"query_id": "infinity-linear-account-settings-task_h19", "dataset": "webarena-infinity", "query": "Turn off all desktop application settings: open in desktop app, notification badge, and spell check.", "graders": ["infinity_state"], "start_url": "http://localhost:8090", "metadata": {"original_task_id": "linear-account-settings-task_h19", "website": "linear-account-settings", "category": "webarena-infinity", "additional": {"app_name": "linear-account-settings", "difficulty": "hard", "verifier_path": "real-tasks/task_h19.py", "app_base_port": 8090}}}
|
||||
{"query_id": "infinity-elation-prescriptions-task_h39", "dataset": "webarena-infinity", "query": "Change the default pharmacy to Express Scripts Mail Pharmacy for mail-order prescriptions. Then document that the patient takes Magnesium Citrate 400mg tablet as an OTC supplement \u2014 once daily at bedtime, 30-day supply.", "graders": ["infinity_state"], "start_url": "http://localhost:8020", "metadata": {"original_task_id": "elation-prescriptions-task_h39", "website": "elation-prescriptions", "category": "webarena-infinity", "additional": {"app_name": "elation-prescriptions", "difficulty": "hard", "verifier_path": "real-tasks/task_h39.py", "app_base_port": 8020}}}
|
||||
{"query_id": "infinity-handshake-career-exploration-task_h136", "dataset": "webarena-infinity", "query": "Your earliest completed appointment was a specific type. Schedule a follow-up appointment of the same category and type with the same staff member, for March 28, 2026 at 9:00 AM, in person.", "graders": ["infinity_state"], "start_url": "http://localhost:8080", "metadata": {"original_task_id": "handshake-career-exploration-task_h136", "website": "handshake-career-exploration", "category": "webarena-infinity", "additional": {"app_name": "handshake-career-exploration", "difficulty": "hard", "verifier_path": "real-tasks/task_h136.py", "app_base_port": 8080}}}
|
||||
{"query_id": "infinity-handshake-career-exploration-task_h105", "dataset": "webarena-infinity", "query": "Find the second-most-viewed question in Q&A. It has two answers \u2014 mark the one with fewer helpful votes as helpful.", "graders": ["infinity_state"], "start_url": "http://localhost:8080", "metadata": {"original_task_id": "handshake-career-exploration-task_h105", "website": "handshake-career-exploration", "category": "webarena-infinity", "additional": {"app_name": "handshake-career-exploration", "difficulty": "hard", "verifier_path": "real-tasks/task_h105.py", "app_base_port": 8080}}}
|
||||
{"query_id": "infinity-gmail-accounts-and-contacts-task_h22", "dataset": "webarena-infinity", "query": "The Engineering Manager at TechCorp is listed as one of your delegates. Remove her delegation and unstar her contact.", "graders": ["infinity_state"], "start_url": "http://localhost:8070", "metadata": {"original_task_id": "gmail-accounts-and-contacts-task_h22", "website": "gmail-accounts-and-contacts", "category": "webarena-infinity", "additional": {"app_name": "gmail-accounts-and-contacts", "difficulty": "hard", "verifier_path": "real-tasks/task_h22.py", "app_base_port": 8070}}}
|
||||
{"query_id": "infinity-elation-patient-communication-task_h9", "dataset": "webarena-infinity", "query": "Acknowledge all unacknowledged reminders in the system.", "graders": ["infinity_state"], "start_url": "http://localhost:8010", "metadata": {"original_task_id": "elation-patient-communication-task_h9", "website": "elation-patient-communication", "category": "webarena-infinity", "additional": {"app_name": "elation-patient-communication", "difficulty": "hard", "verifier_path": "real-tasks/task_h9.py", "app_base_port": 8010}}}
|
||||
{"query_id": "infinity-superhuman-general-task_h1", "dataset": "webarena-infinity", "query": "Label the FinancePlus partnership email and the QuantumLab prototype email as 'Clients'.", "graders": ["infinity_state"], "start_url": "http://localhost:8110", "metadata": {"original_task_id": "superhuman-general-task_h1", "website": "superhuman-general", "category": "webarena-infinity", "additional": {"app_name": "superhuman-general", "difficulty": "hard", "verifier_path": "real-tasks/task_h1.py", "app_base_port": 8110}}}
|
||||
{"query_id": "infinity-xero-invoicing-task_h79", "dataset": "webarena-infinity", "query": "Change the invoice prefix to 'AUS-' and the next number to 100, then create a new invoice for CloudNine Analytics for 8 hours of UI/UX design work.", "graders": ["infinity_state"], "start_url": "http://localhost:8120", "metadata": {"original_task_id": "xero-invoicing-task_h79", "website": "xero-invoicing", "category": "webarena-infinity", "additional": {"app_name": "xero-invoicing", "difficulty": "hard", "verifier_path": "real-tasks/task_h79.py", "app_base_port": 8120}}}
|
||||
{"query_id": "infinity-figma-slides-task_h16", "dataset": "webarena-infinity", "query": "Enable slide numbers on every slide using the 'with total' format and change the aspect ratio to 4:3.", "graders": ["infinity_state"], "start_url": "http://localhost:8030", "metadata": {"original_task_id": "figma-slides-task_h16", "website": "figma-slides", "category": "webarena-infinity", "additional": {"app_name": "figma-slides", "difficulty": "hard", "verifier_path": "real-tasks/task_h16.py", "app_base_port": 8030}}}
|
||||
{"query_id": "infinity-linear-account-settings-task_h16", "dataset": "webarena-infinity", "query": "Revoke all API keys that have an expiration date.", "graders": ["infinity_state"], "start_url": "http://localhost:8090", "metadata": {"original_task_id": "linear-account-settings-task_h16", "website": "linear-account-settings", "category": "webarena-infinity", "additional": {"app_name": "linear-account-settings", "difficulty": "hard", "verifier_path": "real-tasks/task_h16.py", "app_base_port": 8090}}}
|
||||
{"query_id": "infinity-elation-prescriptions-task_h2", "dataset": "webarena-infinity", "query": "Prescribe Buspirone 10mg for the patient's anxiety \u2014 once daily in the morning, qty 30, 5 refills. Send it to the same pharmacy that fills his Sertraline.", "graders": ["infinity_state"], "start_url": "http://localhost:8020", "metadata": {"original_task_id": "elation-prescriptions-task_h2", "website": "elation-prescriptions", "category": "webarena-infinity", "additional": {"app_name": "elation-prescriptions", "difficulty": "hard", "verifier_path": "real-tasks/task_h2.py", "app_base_port": 8020}}}
|
||||
{"query_id": "infinity-handshake-career-exploration-task_h1", "dataset": "webarena-infinity", "query": "Follow all consulting firms on Handshake.", "graders": ["infinity_state"], "start_url": "http://localhost:8080", "metadata": {"original_task_id": "handshake-career-exploration-task_h1", "website": "handshake-career-exploration", "category": "webarena-infinity", "additional": {"app_name": "handshake-career-exploration", "difficulty": "hard", "verifier_path": "real-tasks/task_h1.py", "app_base_port": 8080}}}
|
||||
{"query_id": "infinity-handshake-career-exploration-task_h141", "dataset": "webarena-infinity", "query": "Some of your saved jobs are from employers you haven't followed yet. Find and follow each of those employers.", "graders": ["infinity_state"], "start_url": "http://localhost:8080", "metadata": {"original_task_id": "handshake-career-exploration-task_h141", "website": "handshake-career-exploration", "category": "webarena-infinity", "additional": {"app_name": "handshake-career-exploration", "difficulty": "hard", "verifier_path": "real-tasks/task_h141.py", "app_base_port": 8080}}}
|
||||
{"query_id": "infinity-figma-text-and-typography-task_h74", "dataset": "webarena-infinity", "query": "Set the spelling language to Japanese, the big nudge amount to 50, and the default horizontal alignment to right.", "graders": ["infinity_state"], "start_url": "http://localhost:8040", "metadata": {"original_task_id": "figma-text-and-typography-task_h74", "website": "figma-text-and-typography", "category": "webarena-infinity", "additional": {"app_name": "figma-text-and-typography", "difficulty": "hard", "verifier_path": "real-tasks/task_h74.py", "app_base_port": 8040}}}
|
||||
{"query_id": "infinity-elation-patient-communication-task_h63", "dataset": "webarena-infinity", "query": "Check the visit summaries to find the patient whose BNP level improved. Reply to their most recent message confirming they can resume light activity, then update their emergency contact's phone number to (650) 555-0001.", "graders": ["infinity_state"], "start_url": "http://localhost:8010", "metadata": {"original_task_id": "elation-patient-communication-task_h63", "website": "elation-patient-communication", "category": "webarena-infinity", "additional": {"app_name": "elation-patient-communication", "difficulty": "hard", "verifier_path": "real-tasks/task_h63.py", "app_base_port": 8010}}}
|
||||
{"query_id": "infinity-elation-patient-communication-task_h14", "dataset": "webarena-infinity", "query": "Change Dr. Torres's notification timeframe to 'Do not notify me' and remove Dr. Torres from Dr. Chen's General Question routing.", "graders": ["infinity_state"], "start_url": "http://localhost:8010", "metadata": {"original_task_id": "elation-patient-communication-task_h14", "website": "elation-patient-communication", "category": "webarena-infinity", "additional": {"app_name": "elation-patient-communication", "difficulty": "hard", "verifier_path": "real-tasks/task_h14.py", "app_base_port": 8010}}}
|
||||
{"query_id": "infinity-gitlab-plan-and-track-task_h67", "dataset": "webarena-infinity", "query": "Delete all time entries from the GraphQL gateway issue, add a single new entry of 16 hours with summary 'Complete rewrite estimate', and set its time estimate to 40 hours.", "graders": ["infinity_state"], "start_url": "http://localhost:8050", "metadata": {"original_task_id": "gitlab-plan-and-track-task_h67", "website": "gitlab-plan-and-track", "category": "webarena-infinity", "additional": {"app_name": "gitlab-plan-and-track", "difficulty": "hard", "verifier_path": "real-tasks/task_h67.py", "app_base_port": 8050}}}
|
||||
{"query_id": "infinity-gmail-accounts-and-contacts-task_h73", "dataset": "webarena-infinity", "query": "Among the individual people in your other contacts (those with a first and last name), find the one who was saved most recently. Move them to your main contacts, set their company to 'Salesforce', job title to 'Account Executive', and add the Work label.", "graders": ["infinity_state"], "start_url": "http://localhost:8070", "metadata": {"original_task_id": "gmail-accounts-and-contacts-task_h73", "website": "gmail-accounts-and-contacts", "category": "webarena-infinity", "additional": {"app_name": "gmail-accounts-and-contacts", "difficulty": "hard", "verifier_path": "real-tasks/task_h73.py", "app_base_port": 8070}}}
|
||||
{"query_id": "infinity-elation-prescriptions-task_h4", "dataset": "webarena-infinity", "query": "Run a medication reconciliation and mark the Calcium+D3 supplement for discontinuation during the review.", "graders": ["infinity_state"], "start_url": "http://localhost:8020", "metadata": {"original_task_id": "elation-prescriptions-task_h4", "website": "elation-prescriptions", "category": "webarena-infinity", "additional": {"app_name": "elation-prescriptions", "difficulty": "hard", "verifier_path": "real-tasks/task_h4.py", "app_base_port": 8020}}}
|
||||
{"query_id": "infinity-elation-prescriptions-task_h47", "dataset": "webarena-infinity", "query": "The patient's SSRI is currently dispensed at a different pharmacy than most of his other medications. Prescribe a refill of the same SSRI at the same dose and sig, but send it to CVS #4521 instead \u2014 qty 30, 5 refills, 30 days supply.", "graders": ["infinity_state"], "start_url": "http://localhost:8020", "metadata": {"original_task_id": "elation-prescriptions-task_h47", "website": "elation-prescriptions", "category": "webarena-infinity", "additional": {"app_name": "elation-prescriptions", "difficulty": "hard", "verifier_path": "real-tasks/task_h47.py", "app_base_port": 8020}}}
|
||||
{"query_id": "infinity-paypal-my-wallet-task_h89", "dataset": "webarena-infinity", "query": "If your USD PayPal balance is above $2,500, convert $500 to Japanese Yen. If it is $2,500 or below, first add $500 from your Chase bank account, then convert $500 to JPY. Either way, set the debit card cash back category to Fuel.", "graders": ["infinity_state"], "start_url": "http://localhost:8100", "metadata": {"original_task_id": "paypal-my-wallet-task_h89", "website": "paypal-my-wallet", "category": "webarena-infinity", "additional": {"app_name": "paypal-my-wallet", "difficulty": "hard", "verifier_path": "real-tasks/task_h89.py", "app_base_port": 8100}}}
|
||||
88
packages/browseros-agent/apps/eval/scripts/agisdk-evaluate.py
vendored
Normal file
88
packages/browseros-agent/apps/eval/scripts/agisdk-evaluate.py
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
AGI SDK evaluation helper for BrowserOS eval framework.
|
||||
|
||||
Reads JSON from stdin with task_id and env_state, runs the agisdk
|
||||
evaluator, and outputs the result as JSON to stdout.
|
||||
|
||||
Input format:
|
||||
{"task_id": "dashdish-1", "env_state": {...}, "model_response": ""}
|
||||
|
||||
Output format:
|
||||
{"reward": 0.0, "pass": false, "message": "...", "per_criterion": [...]}
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
data = json.loads(sys.stdin.read())
|
||||
task_id = data["task_id"]
|
||||
env_state = data["env_state"]
|
||||
model_response = data.get("model_response", "")
|
||||
|
||||
try:
|
||||
from agisdk.REAL.browsergym.webclones.evaluate import WebCloneEvaluator
|
||||
from agisdk.REAL.browsergym.webclones.task_config import TaskConfig
|
||||
except ImportError:
|
||||
print(
|
||||
json.dumps(
|
||||
{
|
||||
"reward": 0,
|
||||
"pass": False,
|
||||
"message": "agisdk package not installed. Run: pip install agisdk",
|
||||
"per_criterion": [],
|
||||
}
|
||||
)
|
||||
)
|
||||
sys.exit(0)
|
||||
|
||||
try:
|
||||
# Redirect stdout to stderr during evaluation — agisdk's rich logger
|
||||
# prints directly to stdout, which would corrupt our JSON output
|
||||
real_stdout = sys.stdout
|
||||
sys.stdout = sys.stderr
|
||||
|
||||
tc = TaskConfig(task_id)
|
||||
evaluator = WebCloneEvaluator(tc)
|
||||
reward_val, _done, message, info = evaluator.evaluate(
|
||||
env_state=env_state, model_response=model_response
|
||||
)
|
||||
|
||||
sys.stdout = real_stdout
|
||||
|
||||
reward_val = float(reward_val) if reward_val is not None else 0.0
|
||||
results = info.get("results", [])
|
||||
per_criterion = [
|
||||
{"passed": r[0], "detail": str(r[1]) if len(r) > 1 else ""}
|
||||
for r in results
|
||||
]
|
||||
|
||||
print(
|
||||
json.dumps(
|
||||
{
|
||||
"reward": reward_val,
|
||||
"pass": reward_val == 1.0,
|
||||
"message": str(message),
|
||||
"per_criterion": per_criterion,
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
sys.stdout = real_stdout if "real_stdout" in dir() else sys.__stdout__
|
||||
print(
|
||||
json.dumps(
|
||||
{
|
||||
"reward": 0,
|
||||
"pass": False,
|
||||
"message": f"Evaluation error: {str(e)}",
|
||||
"per_criterion": [],
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
92
packages/browseros-agent/apps/eval/scripts/build-agisdk-dataset.py
vendored
Normal file
92
packages/browseros-agent/apps/eval/scripts/build-agisdk-dataset.py
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Build JSONL dataset for AGI SDK / REAL Bench evaluation.
|
||||
|
||||
Reads task definitions from the agisdk package, filters to feasible
|
||||
action-only tasks (excludes llm_boolean evaluators), and outputs JSONL
|
||||
to stdout in the BrowserOS eval framework format.
|
||||
|
||||
Usage:
|
||||
python scripts/build-agisdk-dataset.py > data/agisdk-real.jsonl
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
|
||||
# evals-omnizon.vercel.app was DMCA-takedown'd by Vercel (HTTP 451). Every task
|
||||
# on that site fails grading with "Failed to fetch /finish endpoint".
|
||||
EXCLUDED_WEBSITES = {"omnizon"}
|
||||
|
||||
|
||||
def has_llm_eval(task: dict) -> bool:
|
||||
return any(e.get("type") == "llm_boolean" for e in task.get("evals", []))
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
from agisdk.REAL.tasks import all_tasks
|
||||
except ImportError:
|
||||
print(
|
||||
"Error: agisdk package not installed. Run: pip install agisdk",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
count = 0
|
||||
skipped_infeasible = 0
|
||||
skipped_llm = 0
|
||||
skipped_excluded = 0
|
||||
|
||||
for task in all_tasks:
|
||||
if not task.get("possible", True):
|
||||
skipped_infeasible += 1
|
||||
continue
|
||||
|
||||
if has_llm_eval(task):
|
||||
skipped_llm += 1
|
||||
continue
|
||||
|
||||
website = task.get("website", {})
|
||||
if website.get("id") in EXCLUDED_WEBSITES:
|
||||
skipped_excluded += 1
|
||||
continue
|
||||
|
||||
task_id = task["id"]
|
||||
goal = task.get("goal", "")
|
||||
start_url = website.get("url", "")
|
||||
|
||||
if not start_url or not goal:
|
||||
print(f"Warning: Skipping {task_id} — missing url or goal", file=sys.stderr)
|
||||
continue
|
||||
|
||||
entry = {
|
||||
"query_id": f"agisdk-{task_id}",
|
||||
"dataset": "agisdk-real",
|
||||
"query": goal,
|
||||
"graders": ["agisdk_state_diff"],
|
||||
"start_url": start_url,
|
||||
"metadata": {
|
||||
"original_task_id": task_id,
|
||||
"website": website.get("name", ""),
|
||||
"category": "agisdk-real",
|
||||
"additional": {
|
||||
"agisdk_task_id": task_id,
|
||||
"challenge_type": task.get("challengeType", "action"),
|
||||
"difficulty": task.get("difficulty", "unknown"),
|
||||
"similar_to": website.get("similarTo", ""),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
print(json.dumps(entry))
|
||||
count += 1
|
||||
|
||||
print(
|
||||
f"Generated {count} tasks (skipped {skipped_infeasible} infeasible, "
|
||||
f"{skipped_llm} llm_boolean, {skipped_excluded} excluded sites)",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
118
packages/browseros-agent/apps/eval/scripts/build-infinity-dataset.py
vendored
Normal file
118
packages/browseros-agent/apps/eval/scripts/build-infinity-dataset.py
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Dataset generator for WebArena-Infinity benchmark.
|
||||
|
||||
Reads real-tasks.json from each app directory and outputs JSONL
|
||||
in the eval framework's TaskSchema format.
|
||||
|
||||
Usage:
|
||||
python build-infinity-dataset.py --apps-dir /path/to/webarena-infinity/apps
|
||||
python build-infinity-dataset.py --apps-dir /path/to/apps --apps gmail linear --difficulty medium
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def load_tasks(app_dir: str) -> list[dict]:
|
||||
tasks_file = os.path.join(app_dir, "real-tasks.json")
|
||||
if not os.path.exists(tasks_file):
|
||||
print(f"Warning: No real-tasks.json found in {app_dir}", file=sys.stderr)
|
||||
return []
|
||||
with open(tasks_file) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def build_task_entry(
|
||||
app_name: str,
|
||||
task: dict,
|
||||
base_port: int,
|
||||
) -> dict:
|
||||
task_id = task.get("id", task.get("task_id", "unknown"))
|
||||
difficulty = task.get("difficulty", "unknown")
|
||||
query = task.get("query", task.get("instruction", task.get("task", "")))
|
||||
verifier_path = task.get(
|
||||
"verify",
|
||||
task.get("verifier_path", f"real-tasks/{task_id}.py"),
|
||||
)
|
||||
|
||||
return {
|
||||
"query_id": f"infinity-{app_name}-{task_id}",
|
||||
"dataset": "webarena-infinity",
|
||||
"query": query,
|
||||
"graders": ["infinity_state"],
|
||||
"start_url": f"http://localhost:{base_port}",
|
||||
"setup_script": f"POST http://localhost:{base_port}/api/reset",
|
||||
"metadata": {
|
||||
"original_task_id": f"{app_name}-{task_id}",
|
||||
"website": app_name,
|
||||
"category": "webarena-infinity",
|
||||
"additional": {
|
||||
"app_name": app_name,
|
||||
"difficulty": difficulty,
|
||||
"verifier_path": verifier_path,
|
||||
"app_base_port": base_port,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate JSONL dataset from WebArena-Infinity apps"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--apps-dir",
|
||||
required=True,
|
||||
help="Path to webarena-infinity/apps/ directory",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--apps",
|
||||
nargs="*",
|
||||
default=None,
|
||||
help="Filter to specific app names (default: all)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--difficulty",
|
||||
choices=["easy", "medium", "hard"],
|
||||
default=None,
|
||||
help="Filter by difficulty tier",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--base-port",
|
||||
type=int,
|
||||
default=8000,
|
||||
help="Starting port number for apps (default: 8000)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.isdir(args.apps_dir):
|
||||
print(f"Error: {args.apps_dir} is not a directory", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
app_dirs = sorted(os.listdir(args.apps_dir))
|
||||
if args.apps:
|
||||
app_dirs = [d for d in app_dirs if d in args.apps]
|
||||
|
||||
port = args.base_port
|
||||
for app_name in app_dirs:
|
||||
app_path = os.path.join(args.apps_dir, app_name)
|
||||
if not os.path.isdir(app_path):
|
||||
continue
|
||||
|
||||
tasks = load_tasks(app_path)
|
||||
for task in tasks:
|
||||
difficulty = task.get("difficulty", "unknown")
|
||||
if args.difficulty and difficulty != args.difficulty:
|
||||
continue
|
||||
|
||||
entry = build_task_entry(app_name, task, port)
|
||||
print(json.dumps(entry))
|
||||
|
||||
port += 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
82
packages/browseros-agent/apps/eval/scripts/infinity-evaluate.py
vendored
Normal file
82
packages/browseros-agent/apps/eval/scripts/infinity-evaluate.py
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Evaluation helper for WebArena-Infinity verifier scripts.
|
||||
|
||||
Reads JSON from stdin with app_server_url, verifier_path, and task_id.
|
||||
Runs the verifier against the app server and outputs a JSON result.
|
||||
|
||||
Verifiers have the signature: verify(server_url: str) -> tuple[bool, str]
|
||||
They fetch /api/state internally and return (passed, message).
|
||||
|
||||
Usage:
|
||||
echo '{"app_server_url": "http://localhost:8000", "verifier_path": "/path/to/verify.py"}' | python infinity-evaluate.py
|
||||
"""
|
||||
|
||||
import importlib.util
|
||||
import json
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
|
||||
def load_verifier(verifier_path: str):
|
||||
spec = importlib.util.spec_from_file_location("verifier", verifier_path)
|
||||
if spec is None or spec.loader is None:
|
||||
raise ImportError(f"Cannot load verifier from {verifier_path}")
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
data = json.loads(sys.stdin.read())
|
||||
except json.JSONDecodeError as e:
|
||||
print(json.dumps({"pass": False, "reward": 0.0, "message": f"Invalid JSON input: {e}"}))
|
||||
sys.exit(1)
|
||||
|
||||
server_url = data.get("app_server_url", "")
|
||||
verifier_path = data.get("verifier_path", "")
|
||||
|
||||
if not server_url or not verifier_path:
|
||||
print(json.dumps({
|
||||
"pass": False,
|
||||
"reward": 0.0,
|
||||
"message": "Missing app_server_url or verifier_path",
|
||||
}))
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
verifier = load_verifier(verifier_path)
|
||||
fn = getattr(verifier, "verify", None)
|
||||
if not callable(fn):
|
||||
raise AttributeError(
|
||||
f"Verifier has no verify() function. "
|
||||
f"Available: {[a for a in dir(verifier) if not a.startswith('_')]}"
|
||||
)
|
||||
|
||||
# Verifiers take server_url and fetch state internally
|
||||
result = fn(server_url)
|
||||
|
||||
# Return is tuple[bool, str]
|
||||
if isinstance(result, tuple) and len(result) >= 2:
|
||||
passed, message = result[0], str(result[1])
|
||||
else:
|
||||
passed, message = bool(result), str(result)
|
||||
|
||||
except Exception as e:
|
||||
print(json.dumps({
|
||||
"pass": False,
|
||||
"reward": 0.0,
|
||||
"message": f"Verifier error: {e}\n{traceback.format_exc()}",
|
||||
}))
|
||||
sys.exit(1)
|
||||
|
||||
print(json.dumps({
|
||||
"pass": passed,
|
||||
"reward": 1.0 if passed else 0.0,
|
||||
"message": message,
|
||||
}))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -59,6 +59,8 @@ interface RunSummary {
|
||||
}
|
||||
|
||||
const PASS_FAIL_GRADER_ORDER = [
|
||||
'agisdk_state_diff',
|
||||
'infinity_state',
|
||||
'performance_grader',
|
||||
'webvoyager_grader',
|
||||
'fara_combined',
|
||||
|
||||
202
packages/browseros-agent/apps/eval/src/graders/benchmark/agisdk-state-diff.ts
vendored
Normal file
202
packages/browseros-agent/apps/eval/src/graders/benchmark/agisdk-state-diff.ts
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
import { spawn } from 'node:child_process'
|
||||
import { join } from 'node:path'
|
||||
import type { GraderResult } from '../../types'
|
||||
import { callMcpTool } from '../../utils/mcp-client'
|
||||
import type { Grader, GraderInput } from '../types'
|
||||
|
||||
const EVAL_SCRIPT = join(
|
||||
import.meta.dirname,
|
||||
'..',
|
||||
'..',
|
||||
'..',
|
||||
'scripts',
|
||||
'agisdk-evaluate.py',
|
||||
)
|
||||
|
||||
export class AgisdkStateDiffGrader implements Grader {
|
||||
name = 'agisdk_state_diff'
|
||||
|
||||
async grade(input: GraderInput): Promise<GraderResult> {
|
||||
const taskId = this.extractTaskId(input.task.query_id)
|
||||
const startUrl = this.extractStartUrl(input)
|
||||
const mcpEndpoint =
|
||||
input.mcpUrl ||
|
||||
`${process.env.BROWSEROS_SERVER_URL || 'http://127.0.0.1:9110'}/mcp`
|
||||
|
||||
if (!startUrl) {
|
||||
return {
|
||||
score: 0,
|
||||
pass: false,
|
||||
reasoning: 'Could not determine clone site URL from task',
|
||||
}
|
||||
}
|
||||
|
||||
const origin = new URL(startUrl).origin
|
||||
|
||||
let envState: Record<string, unknown>
|
||||
try {
|
||||
envState = await this.fetchFinishState(origin, mcpEndpoint)
|
||||
} catch (error) {
|
||||
return {
|
||||
score: 0,
|
||||
pass: false,
|
||||
reasoning: `Failed to fetch /finish endpoint: ${error instanceof Error ? error.message : String(error)}`,
|
||||
details: { origin, error: true },
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await this.runPythonEvaluator(
|
||||
taskId,
|
||||
envState,
|
||||
input.finalAnswer || '',
|
||||
)
|
||||
return {
|
||||
score: result.reward,
|
||||
pass: result.pass,
|
||||
reasoning:
|
||||
result.message ||
|
||||
(result.pass ? 'All criteria passed' : 'Some criteria failed'),
|
||||
details: {
|
||||
reward: result.reward,
|
||||
per_criterion: result.per_criterion,
|
||||
origin,
|
||||
agisdk_task_id: taskId,
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
score: 0,
|
||||
pass: false,
|
||||
reasoning: `Python evaluator error: ${error instanceof Error ? error.message : String(error)}`,
|
||||
details: { error: true },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private extractTaskId(queryId: string): string {
|
||||
return queryId.replace(/^agisdk-/, '')
|
||||
}
|
||||
|
||||
private extractStartUrl(input: GraderInput): string | null {
|
||||
// Derive from task_id: "dashdish-10" → "https://evals-dashdish.vercel.app"
|
||||
// Task IDs are "{site}-{number}" where site may contain hyphens (e.g. "fly-unified-5")
|
||||
const taskId = this.extractTaskId(input.task.query_id)
|
||||
const siteId = taskId.replace(/-\d+$/, '')
|
||||
if (siteId) return `https://evals-${siteId}.vercel.app`
|
||||
|
||||
// Fallback: search messages for vercel.app URLs
|
||||
for (const msg of input.messages) {
|
||||
const text =
|
||||
msg.type === 'user'
|
||||
? msg.content
|
||||
: msg.type === 'tool-input-available'
|
||||
? JSON.stringify(msg.input)
|
||||
: ''
|
||||
const urlMatch = text.match(/https?:\/\/[^\s"']+\.vercel\.app/)
|
||||
if (urlMatch) return urlMatch[0]
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
private async fetchFinishState(
|
||||
origin: string,
|
||||
mcpEndpoint: string,
|
||||
): Promise<Record<string, unknown>> {
|
||||
const finishUrl = `${origin}/finish`
|
||||
|
||||
// Navigate browser to /finish page (state diff is rendered client-side)
|
||||
await callMcpTool(mcpEndpoint, 'navigate_page', {
|
||||
url: finishUrl,
|
||||
page: 1,
|
||||
})
|
||||
|
||||
// Wait for the page to render, then extract JSON from <pre> element
|
||||
const result = await callMcpTool(mcpEndpoint, 'evaluate_script', {
|
||||
page: 1,
|
||||
expression: `
|
||||
new Promise((resolve, reject) => {
|
||||
let attempts = 0;
|
||||
const check = () => {
|
||||
const pre = document.querySelector('pre');
|
||||
if (pre && pre.textContent.trim().startsWith('{')) {
|
||||
resolve(pre.textContent);
|
||||
} else if (++attempts > 20) {
|
||||
reject(new Error('Timed out waiting for <pre> JSON on /finish'));
|
||||
} else {
|
||||
setTimeout(check, 500);
|
||||
}
|
||||
};
|
||||
check();
|
||||
})
|
||||
`,
|
||||
})
|
||||
|
||||
const textContent = result.content?.find(
|
||||
(c: { type: string }) => c.type === 'text',
|
||||
)
|
||||
if (!textContent?.text) {
|
||||
throw new Error('No text content returned from /finish page')
|
||||
}
|
||||
|
||||
return JSON.parse(textContent.text) as Record<string, unknown>
|
||||
}
|
||||
|
||||
private runPythonEvaluator(
|
||||
taskId: string,
|
||||
envState: Record<string, unknown>,
|
||||
modelResponse: string,
|
||||
): Promise<{
|
||||
reward: number
|
||||
pass: boolean
|
||||
message: string
|
||||
per_criterion: unknown[]
|
||||
}> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const proc = spawn('python3', [EVAL_SCRIPT], {
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
})
|
||||
|
||||
const inputData = JSON.stringify({
|
||||
task_id: taskId,
|
||||
env_state: envState,
|
||||
model_response: modelResponse,
|
||||
})
|
||||
|
||||
let stdout = ''
|
||||
let stderr = ''
|
||||
|
||||
proc.stdout.on('data', (data: Buffer) => {
|
||||
stdout += data.toString()
|
||||
})
|
||||
|
||||
proc.stderr.on('data', (data: Buffer) => {
|
||||
stderr += data.toString()
|
||||
})
|
||||
|
||||
proc.on('close', (code) => {
|
||||
if (code !== 0) {
|
||||
reject(
|
||||
new Error(`Python evaluator exited with code ${code}: ${stderr}`),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
const result = JSON.parse(stdout.trim())
|
||||
resolve(result)
|
||||
} catch {
|
||||
reject(new Error(`Failed to parse evaluator output: ${stdout}`))
|
||||
}
|
||||
})
|
||||
|
||||
proc.on('error', (err) => {
|
||||
reject(new Error(`Failed to spawn Python evaluator: ${err.message}`))
|
||||
})
|
||||
|
||||
proc.stdin.write(inputData)
|
||||
proc.stdin.end()
|
||||
})
|
||||
}
|
||||
}
|
||||
134
packages/browseros-agent/apps/eval/src/graders/benchmark/infinity-state.ts
vendored
Normal file
134
packages/browseros-agent/apps/eval/src/graders/benchmark/infinity-state.ts
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
import { join, resolve } from 'node:path'
|
||||
import type { GraderResult } from '../../types'
|
||||
import type { Grader, GraderInput } from '../types'
|
||||
|
||||
interface InfinityEvalInput {
|
||||
app_server_url: string
|
||||
verifier_path: string
|
||||
task_id: string
|
||||
}
|
||||
|
||||
interface InfinityEvalOutput {
|
||||
pass: boolean
|
||||
reward: number
|
||||
message: string
|
||||
}
|
||||
|
||||
const EVAL_SCRIPT = resolve(
|
||||
import.meta.dir,
|
||||
'../../../scripts/infinity-evaluate.py',
|
||||
)
|
||||
|
||||
export class InfinityStateGrader implements Grader {
|
||||
name = 'infinity_state'
|
||||
|
||||
async grade(input: GraderInput): Promise<GraderResult> {
|
||||
const parsed = this.parseQueryId(input.task.query_id)
|
||||
if (!parsed) {
|
||||
return {
|
||||
score: 0,
|
||||
pass: false,
|
||||
reasoning: `Cannot parse query_id "${input.task.query_id}" — expected format: infinity-{app}-{task_id}`,
|
||||
}
|
||||
}
|
||||
|
||||
const appServerUrl = this.resolveAppServerUrl(input)
|
||||
if (!appServerUrl) {
|
||||
return {
|
||||
score: 0,
|
||||
pass: false,
|
||||
reasoning: 'Cannot determine app server URL',
|
||||
}
|
||||
}
|
||||
|
||||
const infinityDir = process.env.WEBARENA_INFINITY_DIR
|
||||
if (!infinityDir) {
|
||||
return {
|
||||
score: 0,
|
||||
pass: false,
|
||||
reasoning:
|
||||
'WEBARENA_INFINITY_DIR env var not set. Point it to the webarena-infinity repo root.',
|
||||
}
|
||||
}
|
||||
|
||||
const verifierPath = join(
|
||||
infinityDir,
|
||||
'apps',
|
||||
parsed.appName,
|
||||
'real-tasks',
|
||||
`${parsed.taskId}.py`,
|
||||
)
|
||||
|
||||
const evalInput: InfinityEvalInput = {
|
||||
app_server_url: appServerUrl,
|
||||
verifier_path: verifierPath,
|
||||
task_id: input.task.query_id,
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await this.runPythonEvaluator(evalInput)
|
||||
return {
|
||||
score: result.pass ? 1 : 0,
|
||||
pass: result.pass,
|
||||
reasoning: result.message,
|
||||
details: {
|
||||
reward: result.reward,
|
||||
app_name: parsed.appName,
|
||||
app_server_url: appServerUrl,
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
score: 0,
|
||||
pass: false,
|
||||
reasoning: `Evaluator process error: ${error instanceof Error ? error.message : String(error)}`,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private parseQueryId(
|
||||
queryId: string,
|
||||
): { appName: string; taskId: string } | null {
|
||||
// Task IDs start with "task_", app names may contain hyphens
|
||||
// e.g. "infinity-elation-prescriptions-task_h69"
|
||||
const match = queryId.match(/^infinity-(.+)-(task_.+)$/)
|
||||
if (!match) return null
|
||||
return { appName: match[1], taskId: match[2] }
|
||||
}
|
||||
|
||||
private resolveAppServerUrl(input: GraderInput): string | null {
|
||||
// Passed directly from task executor (started by InfinityAppManager)
|
||||
if (input.infinityAppUrl) return input.infinityAppUrl
|
||||
|
||||
// Fallback: env var for manual testing
|
||||
if (process.env.INFINITY_APP_URL) return process.env.INFINITY_APP_URL
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
private async runPythonEvaluator(
|
||||
evalInput: InfinityEvalInput,
|
||||
): Promise<InfinityEvalOutput> {
|
||||
const proc = Bun.spawn(['python3', EVAL_SCRIPT], {
|
||||
stdin: 'pipe',
|
||||
stdout: 'pipe',
|
||||
stderr: 'pipe',
|
||||
})
|
||||
|
||||
const inputJson = JSON.stringify(evalInput)
|
||||
proc.stdin.write(inputJson)
|
||||
proc.stdin.end()
|
||||
|
||||
const stdout = await new Response(proc.stdout).text()
|
||||
const stderr = await new Response(proc.stderr).text()
|
||||
const exitCode = await proc.exited
|
||||
|
||||
if (exitCode !== 0) {
|
||||
throw new Error(
|
||||
`Python evaluator exited with code ${exitCode}: ${stderr || stdout}`,
|
||||
)
|
||||
}
|
||||
|
||||
return JSON.parse(stdout.trim()) as InfinityEvalOutput
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
import type { GraderResult } from '../types'
|
||||
import { AgisdkStateDiffGrader } from './benchmark/agisdk-state-diff'
|
||||
import { InfinityStateGrader } from './benchmark/infinity-state'
|
||||
import { Mind2WebJudgeGrader } from './benchmark/mind2web'
|
||||
import { WebVoyagerGrader } from './benchmark/webvoyager'
|
||||
import { FaraAlignmentGrader } from './fara/alignment'
|
||||
@@ -19,7 +21,13 @@ export function createGrader(
|
||||
options: GraderOptions | null,
|
||||
): Grader | null {
|
||||
switch (name) {
|
||||
// Benchmark graders
|
||||
// Deterministic benchmark graders (no LLM judge)
|
||||
case 'agisdk_state_diff':
|
||||
return new AgisdkStateDiffGrader()
|
||||
case 'infinity_state':
|
||||
return new InfinityStateGrader()
|
||||
|
||||
// LLM-based benchmark graders
|
||||
case 'webvoyager_grader':
|
||||
if (!options?.apiKey) return null
|
||||
return new WebVoyagerGrader(
|
||||
@@ -107,10 +115,12 @@ export async function runGraders(
|
||||
|
||||
// Export grader classes for direct use
|
||||
export {
|
||||
AgisdkStateDiffGrader,
|
||||
FaraAlignmentGrader,
|
||||
FaraCombinedGrader,
|
||||
FaraMultimodalGrader,
|
||||
FaraRubricGrader,
|
||||
InfinityStateGrader,
|
||||
Mind2WebJudgeGrader,
|
||||
PerformanceGrader,
|
||||
WebVoyagerGrader,
|
||||
|
||||
@@ -11,6 +11,8 @@ export interface GraderInput {
|
||||
finalAnswer: string | null
|
||||
expectedAnswer?: string | null
|
||||
outputDir: string
|
||||
mcpUrl?: string
|
||||
infinityAppUrl?: string
|
||||
}
|
||||
|
||||
export interface Grader {
|
||||
|
||||
89
packages/browseros-agent/apps/eval/src/runner/infinity-app-manager.ts
vendored
Normal file
89
packages/browseros-agent/apps/eval/src/runner/infinity-app-manager.ts
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/**
|
||||
* Manages WebArena-Infinity app server lifecycle per task.
|
||||
*
|
||||
* Each worker gets a unique port: base_port + worker_index.
|
||||
* Server is started fresh before each task and killed after,
|
||||
* guaranteeing clean state.
|
||||
*/
|
||||
|
||||
import { type ChildProcess, spawn } from 'node:child_process'
|
||||
import { join } from 'node:path'
|
||||
|
||||
export class InfinityAppManager {
|
||||
private proc: ChildProcess | null = null
|
||||
private port: number
|
||||
private infinityDir: string
|
||||
|
||||
constructor(
|
||||
private workerIndex: number,
|
||||
private basePort: number = 8000,
|
||||
) {
|
||||
this.port = basePort + workerIndex
|
||||
this.infinityDir = process.env.WEBARENA_INFINITY_DIR || ''
|
||||
}
|
||||
|
||||
async startApp(appName: string): Promise<string> {
|
||||
await this.stop()
|
||||
|
||||
if (!this.infinityDir) {
|
||||
throw new Error('WEBARENA_INFINITY_DIR env var not set')
|
||||
}
|
||||
|
||||
const serverScript = join(this.infinityDir, 'apps', appName, 'server.py')
|
||||
this.proc = spawn('python3', [serverScript, '--port', String(this.port)], {
|
||||
stdio: ['ignore', 'pipe', 'pipe'],
|
||||
cwd: join(this.infinityDir, 'apps', appName),
|
||||
})
|
||||
|
||||
// Wait for server to be ready
|
||||
const url = `http://localhost:${this.port}`
|
||||
await this.waitForReady(url)
|
||||
return url
|
||||
}
|
||||
|
||||
async stop(): Promise<void> {
|
||||
if (this.proc) {
|
||||
this.proc.kill('SIGTERM')
|
||||
await new Promise<void>((resolve) => {
|
||||
const timeout = setTimeout(() => {
|
||||
this.proc?.kill('SIGKILL')
|
||||
resolve()
|
||||
}, 3000)
|
||||
this.proc?.on('exit', () => {
|
||||
clearTimeout(timeout)
|
||||
resolve()
|
||||
})
|
||||
})
|
||||
this.proc = null
|
||||
}
|
||||
}
|
||||
|
||||
getPort(): number {
|
||||
return this.port
|
||||
}
|
||||
|
||||
getUrl(): string {
|
||||
return `http://localhost:${this.port}`
|
||||
}
|
||||
|
||||
private async waitForReady(
|
||||
url: string,
|
||||
maxAttempts = 30,
|
||||
intervalMs = 500,
|
||||
): Promise<void> {
|
||||
for (let i = 0; i < maxAttempts; i++) {
|
||||
try {
|
||||
const resp = await fetch(url, {
|
||||
signal: AbortSignal.timeout(2000),
|
||||
})
|
||||
if (resp.ok) return
|
||||
} catch {
|
||||
// Server not ready yet
|
||||
}
|
||||
await new Promise((r) => setTimeout(r, intervalMs))
|
||||
}
|
||||
throw new Error(
|
||||
`Infinity app server not ready after ${maxAttempts * intervalMs}ms on port ${this.port}`,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -160,6 +160,7 @@ export class ParallelExecutor {
|
||||
}
|
||||
const executor = createTaskExecutor(
|
||||
workerConfig,
|
||||
workerIndex,
|
||||
this.config.outputDir,
|
||||
this.config.graderOptions,
|
||||
this.config.onEvent,
|
||||
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
import { runGraders } from '../graders/registry'
|
||||
import type { ErrorSource, EvalConfig, GraderResult, Task } from '../types'
|
||||
import { callMcpTool } from '../utils/mcp-client'
|
||||
import { InfinityAppManager } from './infinity-app-manager'
|
||||
import type { GraderOptions, TaskResult } from './types'
|
||||
|
||||
// ============================================================================
|
||||
@@ -46,6 +47,7 @@ export interface TaskExecutorDeps {
|
||||
export class TaskExecutor {
|
||||
constructor(
|
||||
private readonly config: EvalConfig,
|
||||
private readonly workerIndex: number,
|
||||
private readonly outputDir: string,
|
||||
private readonly deps: TaskExecutorDeps,
|
||||
) {}
|
||||
@@ -101,6 +103,35 @@ export class TaskExecutor {
|
||||
// Resolve page ID once — fresh browser has exactly one page
|
||||
const pageId = await this.resolveInitialPageId(mcpUrl)
|
||||
|
||||
// For Infinity tasks, start a fresh app server per task
|
||||
let infinityManager: InfinityAppManager | null = null
|
||||
let actualStartUrl = task.start_url
|
||||
|
||||
if (task.dataset === 'webarena-infinity') {
|
||||
const appName = (task.metadata?.additional as Record<string, unknown>)
|
||||
?.app_name as string
|
||||
const appBasePort =
|
||||
((task.metadata?.additional as Record<string, unknown>)
|
||||
?.app_base_port as number) || 8000
|
||||
|
||||
if (appName && process.env.WEBARENA_INFINITY_DIR) {
|
||||
infinityManager = new InfinityAppManager(this.workerIndex, appBasePort)
|
||||
try {
|
||||
actualStartUrl = await infinityManager.startApp(appName)
|
||||
console.log(
|
||||
` Infinity app "${appName}" started on port ${infinityManager.getPort()}`,
|
||||
)
|
||||
} catch (error) {
|
||||
throw new TaskExecutionError(
|
||||
`Failed to start Infinity app: ${error instanceof Error ? error.message : String(error)}`,
|
||||
task,
|
||||
'navigation',
|
||||
error instanceof Error ? error : undefined,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// Phase 1: Set viewport + navigate to start URL
|
||||
try {
|
||||
@@ -114,10 +145,10 @@ export class TaskExecutor {
|
||||
)
|
||||
}
|
||||
|
||||
if (task.start_url && task.start_url !== 'about:blank') {
|
||||
if (actualStartUrl && actualStartUrl !== 'about:blank') {
|
||||
try {
|
||||
await callMcpTool(mcpUrl, 'navigate_page', {
|
||||
url: task.start_url,
|
||||
url: actualStartUrl,
|
||||
page: pageId,
|
||||
})
|
||||
} catch (error) {
|
||||
@@ -134,7 +165,11 @@ export class TaskExecutor {
|
||||
const agentResult = await this.executeAgent(task, pageId)
|
||||
|
||||
// Phase 3: Run graders
|
||||
const graderResults = await this.runGraders(task, agentResult)
|
||||
const graderResults = await this.runGraders(
|
||||
task,
|
||||
agentResult,
|
||||
infinityManager?.getUrl(),
|
||||
)
|
||||
|
||||
const status =
|
||||
agentResult.metadata.termination_reason === 'timeout'
|
||||
@@ -169,6 +204,11 @@ export class TaskExecutor {
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
|
||||
// Stop Infinity app server if running
|
||||
if (infinityManager) {
|
||||
await infinityManager.stop().catch(() => {})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,6 +249,7 @@ export class TaskExecutor {
|
||||
private async runGraders(
|
||||
task: Task,
|
||||
agentResult: AgentResult,
|
||||
infinityAppUrl?: string,
|
||||
): Promise<Record<string, GraderResult>> {
|
||||
const configGraders = this.config.graders ?? []
|
||||
const taskGraders = task.graders ?? []
|
||||
@@ -234,6 +275,8 @@ export class TaskExecutor {
|
||||
expectedAnswer: (task.metadata?.additional as Record<string, unknown>)
|
||||
?.answer as string | undefined,
|
||||
outputDir: join(this.outputDir, task.query_id),
|
||||
mcpUrl: `${this.config.browseros.server_url}/mcp`,
|
||||
infinityAppUrl,
|
||||
},
|
||||
this.deps.graderOptions,
|
||||
)
|
||||
@@ -269,11 +312,12 @@ export class TaskExecutor {
|
||||
|
||||
export function createTaskExecutor(
|
||||
config: EvalConfig,
|
||||
workerIndex: number,
|
||||
outputDir: string,
|
||||
graderOptions: GraderOptions | null,
|
||||
onEvent?: (taskId: string, event: Record<string, unknown>) => void,
|
||||
): TaskExecutor {
|
||||
return new TaskExecutor(config, outputDir, {
|
||||
return new TaskExecutor(config, workerIndex, outputDir, {
|
||||
graderOptions,
|
||||
onEvent,
|
||||
})
|
||||
|
||||
@@ -100,6 +100,8 @@ export interface TaskResultSummary {
|
||||
// ============================================================================
|
||||
|
||||
export const PASS_FAIL_GRADER_ORDER = [
|
||||
'agisdk_state_diff',
|
||||
'infinity_state',
|
||||
'performance_grader',
|
||||
'webvoyager_grader',
|
||||
'fara_combined',
|
||||
|
||||
@@ -20,11 +20,171 @@ import {
|
||||
OpenClawSessionNotFoundError,
|
||||
} from '../services/openclaw/errors'
|
||||
import { getOpenClawCliProvider } from '../services/openclaw/openclaw-cli-providers/registry'
|
||||
import type { OpenClawChatContentPart } from '../services/openclaw/openclaw-http-client'
|
||||
import { isUnsupportedOpenClawProviderError } from '../services/openclaw/openclaw-provider-map'
|
||||
import {
|
||||
getOpenClawService,
|
||||
normalizeBrowserOSChatSessionKey,
|
||||
} from '../services/openclaw/openclaw-service'
|
||||
import type { QueuedItemPublic } from '../services/queue'
|
||||
import { getOutboundQueueService } from '../services/queue'
|
||||
|
||||
/**
|
||||
* Inbound attachment shapes the chat route accepts. Images travel as
|
||||
* data: URLs (the gateway is on 127.0.0.1 so we don't pay public-network
|
||||
* cost for the base64 overhead). Files arrive with their text already
|
||||
* extracted on the client — we just inline them as a fenced text part on
|
||||
* the user message.
|
||||
*/
|
||||
type ImageAttachment = {
|
||||
kind: 'image'
|
||||
mediaType: string
|
||||
dataUrl: string
|
||||
name?: string
|
||||
}
|
||||
type FileAttachment = {
|
||||
kind: 'file'
|
||||
mediaType: string
|
||||
name: string
|
||||
text: string
|
||||
}
|
||||
type ChatAttachment = ImageAttachment | FileAttachment
|
||||
|
||||
const MAX_ATTACHMENTS = 10
|
||||
const MAX_IMAGE_BYTES = 5 * 1024 * 1024 // 5 MB after compression
|
||||
// data: URLs encode bytes as base64 (~4/3 inflation) plus a small media-type
|
||||
// prefix; cap the encoded string against that, not 2× the byte budget.
|
||||
const MAX_IMAGE_DATA_URL_LENGTH = Math.ceil(MAX_IMAGE_BYTES * (4 / 3)) + 100
|
||||
const MAX_FILE_TEXT_BYTES = 1 * 1024 * 1024 // 1 MB extracted text
|
||||
const ALLOWED_IMAGE_MEDIA_TYPES = new Set([
|
||||
'image/png',
|
||||
'image/jpeg',
|
||||
'image/jpg',
|
||||
'image/webp',
|
||||
'image/gif',
|
||||
])
|
||||
const ALLOWED_FILE_MEDIA_TYPE_PREFIXES = ['text/', 'application/json']
|
||||
|
||||
function validateChatAttachments(input: unknown): {
|
||||
attachments: ChatAttachment[] | null
|
||||
error: string | null
|
||||
} {
|
||||
if (input === undefined || input === null) {
|
||||
return { attachments: null, error: null }
|
||||
}
|
||||
if (!Array.isArray(input)) {
|
||||
return { attachments: null, error: 'attachments must be an array' }
|
||||
}
|
||||
if (input.length > MAX_ATTACHMENTS) {
|
||||
return {
|
||||
attachments: null,
|
||||
error: `at most ${MAX_ATTACHMENTS} attachments are allowed per message`,
|
||||
}
|
||||
}
|
||||
|
||||
const result: ChatAttachment[] = []
|
||||
for (const raw of input) {
|
||||
if (!raw || typeof raw !== 'object') {
|
||||
return { attachments: null, error: 'invalid attachment entry' }
|
||||
}
|
||||
const entry = raw as Record<string, unknown>
|
||||
if (entry.kind === 'image') {
|
||||
const mediaType =
|
||||
typeof entry.mediaType === 'string' ? entry.mediaType : ''
|
||||
const dataUrl = typeof entry.dataUrl === 'string' ? entry.dataUrl : ''
|
||||
if (!ALLOWED_IMAGE_MEDIA_TYPES.has(mediaType)) {
|
||||
return {
|
||||
attachments: null,
|
||||
error: `unsupported image type: ${mediaType || 'unknown'}`,
|
||||
}
|
||||
}
|
||||
if (!dataUrl.startsWith('data:')) {
|
||||
return {
|
||||
attachments: null,
|
||||
error: 'image attachment must include a data: URL',
|
||||
}
|
||||
}
|
||||
if (dataUrl.length > MAX_IMAGE_DATA_URL_LENGTH) {
|
||||
return {
|
||||
attachments: null,
|
||||
error: `image exceeds ${MAX_IMAGE_BYTES} bytes`,
|
||||
}
|
||||
}
|
||||
result.push({
|
||||
kind: 'image',
|
||||
mediaType,
|
||||
dataUrl,
|
||||
name: typeof entry.name === 'string' ? entry.name : undefined,
|
||||
})
|
||||
continue
|
||||
}
|
||||
if (entry.kind === 'file') {
|
||||
const mediaType =
|
||||
typeof entry.mediaType === 'string' ? entry.mediaType : ''
|
||||
const name = typeof entry.name === 'string' ? entry.name : ''
|
||||
const text = typeof entry.text === 'string' ? entry.text : ''
|
||||
const allowed = ALLOWED_FILE_MEDIA_TYPE_PREFIXES.some((prefix) =>
|
||||
mediaType.startsWith(prefix),
|
||||
)
|
||||
if (!allowed) {
|
||||
return {
|
||||
attachments: null,
|
||||
error: `unsupported file type: ${mediaType || 'unknown'}`,
|
||||
}
|
||||
}
|
||||
if (!name) {
|
||||
return {
|
||||
attachments: null,
|
||||
error: 'file attachment must include a name',
|
||||
}
|
||||
}
|
||||
if (text.length > MAX_FILE_TEXT_BYTES) {
|
||||
return {
|
||||
attachments: null,
|
||||
error: `file "${name}" exceeds ${MAX_FILE_TEXT_BYTES} bytes`,
|
||||
}
|
||||
}
|
||||
result.push({ kind: 'file', mediaType, name, text })
|
||||
continue
|
||||
}
|
||||
return {
|
||||
attachments: null,
|
||||
error: 'attachment kind must be "image" or "file"',
|
||||
}
|
||||
}
|
||||
return { attachments: result, error: null }
|
||||
}
|
||||
|
||||
function buildMessagePartsFromAttachments(
|
||||
message: string,
|
||||
attachments: ChatAttachment[],
|
||||
): { text: string; parts: OpenClawChatContentPart[] | undefined } {
|
||||
const images = attachments.filter(
|
||||
(a): a is ImageAttachment => a.kind === 'image',
|
||||
)
|
||||
const files = attachments.filter(
|
||||
(a): a is FileAttachment => a.kind === 'file',
|
||||
)
|
||||
|
||||
const fileBlocks = files
|
||||
.map(
|
||||
(f) => `<attachment name="${f.name}" mediaType="${f.mediaType}">
|
||||
${f.text}
|
||||
</attachment>`,
|
||||
)
|
||||
.join('\n\n')
|
||||
const text = fileBlocks ? `${message}\n\n${fileBlocks}`.trim() : message
|
||||
|
||||
if (images.length === 0) {
|
||||
return { text, parts: undefined }
|
||||
}
|
||||
|
||||
const parts: OpenClawChatContentPart[] = [{ type: 'text', text }]
|
||||
for (const image of images) {
|
||||
parts.push({ type: 'image_url', image_url: { url: image.dataUrl } })
|
||||
}
|
||||
return { text, parts }
|
||||
}
|
||||
|
||||
function getCreateAgentValidationError(body: { name?: string }): string | null {
|
||||
if (!body.name?.trim()) {
|
||||
@@ -284,15 +444,89 @@ export function createOpenClawRoutes() {
|
||||
}
|
||||
})
|
||||
|
||||
.get('/dashboard', (c) => {
|
||||
try {
|
||||
const dashboard = getOpenClawService().getDashboard()
|
||||
return c.json(dashboard)
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : String(err)
|
||||
return c.json({ error: message }, 500)
|
||||
}
|
||||
})
|
||||
|
||||
.get('/dashboard/stream', (c) => {
|
||||
c.header('Content-Type', 'text/event-stream')
|
||||
c.header('Cache-Control', 'no-cache')
|
||||
c.header('Connection', 'keep-alive')
|
||||
|
||||
return stream(c, async (s) => {
|
||||
const encoder = new TextEncoder()
|
||||
|
||||
// Send initial snapshot
|
||||
try {
|
||||
const dashboard = getOpenClawService().getDashboard()
|
||||
await s.write(
|
||||
encoder.encode(
|
||||
`event: snapshot\ndata: ${JSON.stringify(dashboard)}\n\n`,
|
||||
),
|
||||
)
|
||||
} catch {}
|
||||
|
||||
// Subscribe to live status changes
|
||||
const unsubscribe = getOpenClawService().onAgentStatusChange(
|
||||
(agentId, entry) => {
|
||||
const event = {
|
||||
agentId,
|
||||
status: entry.status,
|
||||
currentTool: entry.currentTool,
|
||||
error: entry.error,
|
||||
timestamp: entry.lastEventAt,
|
||||
}
|
||||
s.write(
|
||||
encoder.encode(
|
||||
`event: status\ndata: ${JSON.stringify(event)}\n\n`,
|
||||
),
|
||||
).catch(() => {})
|
||||
},
|
||||
)
|
||||
|
||||
// Heartbeat every 15s to keep connection alive
|
||||
const heartbeat = setInterval(() => {
|
||||
s.write(
|
||||
encoder.encode(
|
||||
`event: heartbeat\ndata: ${JSON.stringify({ ts: Date.now() })}\n\n`,
|
||||
),
|
||||
).catch(() => {})
|
||||
}, 15_000)
|
||||
|
||||
// Wait until client disconnects
|
||||
try {
|
||||
await new Promise<void>((resolve) => {
|
||||
s.onAbort(() => resolve())
|
||||
})
|
||||
} finally {
|
||||
unsubscribe()
|
||||
clearInterval(heartbeat)
|
||||
}
|
||||
})
|
||||
})
|
||||
.post('/agents/:id/chat', async (c) => {
|
||||
const { id } = c.req.param()
|
||||
const body = await c.req.json<{
|
||||
message: string
|
||||
sessionKey?: string
|
||||
history?: MonitoringChatTurn[]
|
||||
attachments?: unknown
|
||||
}>()
|
||||
|
||||
if (!body.message?.trim()) {
|
||||
const trimmedMessage = body.message?.trim() ?? ''
|
||||
const attachmentValidation = validateChatAttachments(body.attachments)
|
||||
if (attachmentValidation.error) {
|
||||
return c.json({ error: attachmentValidation.error }, 400)
|
||||
}
|
||||
const attachments = attachmentValidation.attachments ?? []
|
||||
// Either a non-empty text body or at least one attachment is required.
|
||||
if (!trimmedMessage && attachments.length === 0) {
|
||||
return c.json({ error: 'Message is required' }, 400)
|
||||
}
|
||||
|
||||
@@ -309,19 +543,35 @@ export function createOpenClawRoutes() {
|
||||
),
|
||||
)
|
||||
: []
|
||||
if (getMonitoringService().getActiveSessionId(id)) {
|
||||
|
||||
// Replace the immediate 409 with a bounded wait so back-to-back user
|
||||
// sends or a cron / hook turn that's still finishing don't reject the
|
||||
// user-chat outright. The client-side outbound queue (Feature 2) keeps
|
||||
// the per-agent send rate at 1, so this only kicks in for cross-source
|
||||
// contention.
|
||||
try {
|
||||
await getMonitoringService().waitForSessionFree(id, {
|
||||
timeoutMs: 30_000,
|
||||
})
|
||||
} catch (err) {
|
||||
return c.json(
|
||||
{
|
||||
error:
|
||||
'A monitored chat session is already active for this agent. Wait for it to finish before starting another.',
|
||||
err instanceof Error
|
||||
? err.message
|
||||
: 'Agent is busy. Try again shortly.',
|
||||
},
|
||||
409,
|
||||
503,
|
||||
)
|
||||
}
|
||||
|
||||
const { text: composedMessage, parts: messageParts } =
|
||||
buildMessagePartsFromAttachments(trimmedMessage, attachments)
|
||||
|
||||
const monitoringContext = await getMonitoringService().startSession({
|
||||
agentId: id,
|
||||
sessionKey,
|
||||
originalPrompt: body.message.trim(),
|
||||
originalPrompt: composedMessage,
|
||||
chatHistory: history,
|
||||
})
|
||||
|
||||
@@ -329,8 +579,9 @@ export function createOpenClawRoutes() {
|
||||
const eventStream = await getOpenClawService().chatStream(
|
||||
id,
|
||||
sessionKey,
|
||||
body.message,
|
||||
composedMessage,
|
||||
history,
|
||||
{ messageParts },
|
||||
)
|
||||
|
||||
c.header('Content-Type', 'text/event-stream')
|
||||
@@ -407,6 +658,110 @@ export function createOpenClawRoutes() {
|
||||
}
|
||||
})
|
||||
|
||||
.post('/agents/:id/queue', async (c) => {
|
||||
const { id } = c.req.param()
|
||||
const body = await c.req.json<{
|
||||
message: string
|
||||
sessionKey?: string
|
||||
history?: MonitoringChatTurn[]
|
||||
attachments?: unknown
|
||||
// Optional client-provided id — when set, the queue uses it as
|
||||
// the canonical item id so the browser's optimistic row and the
|
||||
// SSE snapshot reconcile on the same key.
|
||||
id?: string
|
||||
}>()
|
||||
const trimmedMessage = body.message?.trim() ?? ''
|
||||
const attachmentValidation = validateChatAttachments(body.attachments)
|
||||
if (attachmentValidation.error) {
|
||||
return c.json({ error: attachmentValidation.error }, 400)
|
||||
}
|
||||
const attachments = attachmentValidation.attachments ?? []
|
||||
if (!trimmedMessage && attachments.length === 0) {
|
||||
return c.json({ error: 'Message is required' }, 400)
|
||||
}
|
||||
|
||||
const sessionKey = body.sessionKey
|
||||
? normalizeBrowserOSChatSessionKey(id, body.sessionKey)
|
||||
: undefined
|
||||
const history = Array.isArray(body.history)
|
||||
? body.history.filter((entry): entry is MonitoringChatTurn =>
|
||||
Boolean(
|
||||
entry &&
|
||||
(entry.role === 'user' || entry.role === 'assistant') &&
|
||||
typeof entry.content === 'string',
|
||||
),
|
||||
)
|
||||
: []
|
||||
|
||||
const { text: composedMessage, parts: messageParts } =
|
||||
buildMessagePartsFromAttachments(trimmedMessage, attachments)
|
||||
|
||||
const item = getOutboundQueueService().enqueue({
|
||||
agentId: id,
|
||||
id: typeof body.id === 'string' && body.id ? body.id : undefined,
|
||||
message: composedMessage,
|
||||
messageParts,
|
||||
sessionKey,
|
||||
history,
|
||||
attachmentsPreview: attachments.map((a) => ({
|
||||
kind: a.kind,
|
||||
mediaType: a.mediaType,
|
||||
name: 'name' in a ? a.name : undefined,
|
||||
})),
|
||||
})
|
||||
return c.json({ id: item.id }, 202)
|
||||
})
|
||||
|
||||
.delete('/agents/:id/queue/:itemId', (c) => {
|
||||
const { id, itemId } = c.req.param()
|
||||
const result = getOutboundQueueService().cancel(id, itemId)
|
||||
if (!result.ok) {
|
||||
const code = result.reason === 'dispatching' ? 409 : 404
|
||||
const message =
|
||||
result.reason === 'dispatching'
|
||||
? 'Item is already dispatching'
|
||||
: 'Item not found'
|
||||
return c.json({ error: message }, code)
|
||||
}
|
||||
return c.json({ ok: true })
|
||||
})
|
||||
|
||||
.post('/agents/:id/queue/:itemId/retry', (c) => {
|
||||
const { id, itemId } = c.req.param()
|
||||
const result = getOutboundQueueService().retry(id, itemId)
|
||||
if (!result.ok) {
|
||||
return c.json({ error: 'Item not found or not failed' }, 404)
|
||||
}
|
||||
return c.json({ ok: true })
|
||||
})
|
||||
|
||||
.get('/agents/:id/queue/stream', (c) => {
|
||||
const { id } = c.req.param()
|
||||
c.header('Content-Type', 'text/event-stream')
|
||||
c.header('Cache-Control', 'no-cache')
|
||||
return stream(c, async (s) => {
|
||||
const encoder = new TextEncoder()
|
||||
const sendSnapshot = (items: QueuedItemPublic[]) => {
|
||||
void s.write(encoder.encode(`data: ${JSON.stringify({ items })}\n\n`))
|
||||
}
|
||||
const unsubscribe = getOutboundQueueService().subscribe(
|
||||
id,
|
||||
sendSnapshot,
|
||||
)
|
||||
const heartbeat = setInterval(() => {
|
||||
void s.write(encoder.encode(': keep-alive\n\n'))
|
||||
}, 15_000)
|
||||
try {
|
||||
await new Promise<void>((resolve) => {
|
||||
s.onAbort(() => resolve())
|
||||
})
|
||||
} finally {
|
||||
clearInterval(heartbeat)
|
||||
unsubscribe()
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
.get('/session/:key/history', async (c) => {
|
||||
const key = c.req.param('key')
|
||||
const limitRaw = c.req.query('limit')
|
||||
|
||||
@@ -0,0 +1,267 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* In-memory state machine tracking the live status of every OpenClaw agent
|
||||
* session. Acts as the single source of truth for "is agent X running?"
|
||||
*
|
||||
* Two data sources feed it:
|
||||
* 1. JSONL files (seed) — on init, reads the latest events for each agent
|
||||
* to infer whether a session is running or idle. This handles the case
|
||||
* where an agent was already mid-task when BrowserOS started.
|
||||
* 2. Gateway WS events (live) — the OpenClawObserver pipes chat broadcast
|
||||
* events into this state machine for real-time transitions.
|
||||
*
|
||||
* Consumers (SSE streams, dashboard endpoint) read from this class and get
|
||||
* correct state from the first call — no "unknown" period while waiting for
|
||||
* the first WS event.
|
||||
*/
|
||||
|
||||
import { logger } from '../../../lib/logger'
|
||||
import type { ClawEvent, OpenClawJsonlReader } from './openclaw-jsonl-reader'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export type AgentLiveStatus = 'working' | 'idle' | 'error' | 'unknown'
|
||||
|
||||
export interface AgentSessionState {
|
||||
status: AgentLiveStatus
|
||||
sessionKey: string | null
|
||||
lastEventAt: number
|
||||
currentTool: string | null
|
||||
error: string | null
|
||||
}
|
||||
|
||||
export type SessionStateListener = (
|
||||
agentId: string,
|
||||
state: AgentSessionState,
|
||||
) => void
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// State machine
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export class ClawSession {
|
||||
private readonly states = new Map<string, AgentSessionState>()
|
||||
private readonly listeners = new Set<SessionStateListener>()
|
||||
private seeded = false
|
||||
|
||||
/**
|
||||
* Seed the state machine from JSONL files. Call this once when the
|
||||
* gateway becomes ready. For each agent, reads the latest session's
|
||||
* events and infers whether the agent is currently working or idle.
|
||||
*
|
||||
* A session is considered "working" if:
|
||||
* - The last message-type event is a user.message (agent hasn't replied yet)
|
||||
* - The last event is an agent.tool_use without a matching agent.tool_result
|
||||
*
|
||||
* Otherwise it's "idle".
|
||||
*/
|
||||
seedFromJsonl(reader: OpenClawJsonlReader): void {
|
||||
const agents = reader.listAgents()
|
||||
|
||||
for (const agentId of agents) {
|
||||
const sessions = reader.listSessions(agentId)
|
||||
if (sessions.length === 0) continue
|
||||
|
||||
const latestSession = sessions[0]
|
||||
const events = reader.listBySession(agentId, latestSession.key)
|
||||
const state = inferStateFromEvents(events, latestSession.key)
|
||||
|
||||
this.states.set(agentId, state)
|
||||
|
||||
if (state.status === 'working') {
|
||||
logger.info('ClawSession seed: agent is working', {
|
||||
agentId,
|
||||
currentTool: state.currentTool,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
this.seeded = true
|
||||
logger.info('ClawSession seeded from JSONL', {
|
||||
agentCount: agents.length,
|
||||
working: [...this.states.values()].filter((s) => s.status === 'working')
|
||||
.length,
|
||||
})
|
||||
}
|
||||
|
||||
/** Whether seedFromJsonl() has been called. */
|
||||
isSeeded(): boolean {
|
||||
return this.seeded
|
||||
}
|
||||
|
||||
/** Get the current state of an agent. */
|
||||
getState(agentId: string): AgentSessionState {
|
||||
return (
|
||||
this.states.get(agentId) ?? {
|
||||
status: 'unknown',
|
||||
sessionKey: null,
|
||||
lastEventAt: 0,
|
||||
currentTool: null,
|
||||
error: null,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
/** Get all tracked agent states. */
|
||||
getAllStates(): Map<string, AgentSessionState> {
|
||||
return this.states
|
||||
}
|
||||
|
||||
/**
|
||||
* Transition an agent's state. Called by the OpenClawObserver when
|
||||
* a chat WS event arrives.
|
||||
*/
|
||||
transition(
|
||||
agentId: string,
|
||||
status: AgentLiveStatus,
|
||||
update: {
|
||||
sessionKey?: string | null
|
||||
currentTool?: string | null
|
||||
error?: string | null
|
||||
} = {},
|
||||
): void {
|
||||
const prev = this.states.get(agentId)
|
||||
const entry: AgentSessionState = {
|
||||
status,
|
||||
sessionKey: update.sessionKey ?? prev?.sessionKey ?? null,
|
||||
lastEventAt: Date.now(),
|
||||
currentTool:
|
||||
status === 'working'
|
||||
? (update.currentTool ?? prev?.currentTool ?? null)
|
||||
: null,
|
||||
error: status === 'error' ? (update.error ?? null) : null,
|
||||
}
|
||||
|
||||
this.states.set(agentId, entry)
|
||||
|
||||
for (const listener of this.listeners) {
|
||||
try {
|
||||
listener(agentId, entry)
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
|
||||
/** Subscribe to state changes. Returns unsubscribe function. */
|
||||
onStateChange(listener: SessionStateListener): () => void {
|
||||
this.listeners.add(listener)
|
||||
return () => this.listeners.delete(listener)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// JSONL state inference
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Infer the current session state from JSONL events.
|
||||
*
|
||||
* The key insight: if the last meaningful event in the JSONL is a
|
||||
* user.message with no subsequent agent.message, the agent is still
|
||||
* processing (working). Similarly, an agent.tool_use without a matching
|
||||
* agent.tool_result means the agent is mid-tool-call.
|
||||
*
|
||||
* We also check event recency — if the last event was more than 5 minutes
|
||||
* ago, we assume the session is idle regardless (handles cases where the
|
||||
* agent crashed without writing a final event).
|
||||
*/
|
||||
function inferStateFromEvents(
|
||||
events: ClawEvent[],
|
||||
sessionKey: string,
|
||||
): AgentSessionState {
|
||||
if (events.length === 0) {
|
||||
return {
|
||||
status: 'idle',
|
||||
sessionKey,
|
||||
lastEventAt: 0,
|
||||
currentTool: null,
|
||||
error: null,
|
||||
}
|
||||
}
|
||||
|
||||
const lastEvent = events[events.length - 1]!
|
||||
const lastEventAt = lastEvent.createdAt
|
||||
|
||||
// If the last event is older than 5 minutes, assume idle — the agent
|
||||
// likely finished or crashed without writing a final event.
|
||||
const STALE_THRESHOLD_MS = 5 * 60 * 1000
|
||||
if (Date.now() - lastEventAt > STALE_THRESHOLD_MS) {
|
||||
return {
|
||||
status: 'idle',
|
||||
sessionKey,
|
||||
lastEventAt,
|
||||
currentTool: null,
|
||||
error: null,
|
||||
}
|
||||
}
|
||||
|
||||
// Walk backward to find the last meaningful event
|
||||
let lastUserMessageIdx = -1
|
||||
let lastAssistantMessageIdx = -1
|
||||
let lastToolUseIdx = -1
|
||||
let lastToolResultIdx = -1
|
||||
|
||||
for (let i = events.length - 1; i >= 0; i--) {
|
||||
const e = events[i]!
|
||||
if (e.type === 'user.message' && lastUserMessageIdx === -1) {
|
||||
lastUserMessageIdx = i
|
||||
}
|
||||
if (e.type === 'agent.message' && lastAssistantMessageIdx === -1) {
|
||||
lastAssistantMessageIdx = i
|
||||
}
|
||||
if (e.type === 'agent.tool_use' && lastToolUseIdx === -1) {
|
||||
lastToolUseIdx = i
|
||||
}
|
||||
if (e.type === 'agent.tool_result' && lastToolResultIdx === -1) {
|
||||
lastToolResultIdx = i
|
||||
}
|
||||
// Stop scanning once we've found all event types
|
||||
if (
|
||||
lastUserMessageIdx !== -1 &&
|
||||
lastAssistantMessageIdx !== -1 &&
|
||||
lastToolUseIdx !== -1 &&
|
||||
lastToolResultIdx !== -1
|
||||
) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Agent is working if the last user message came AFTER the last
|
||||
// assistant message — the agent hasn't replied yet
|
||||
if (
|
||||
lastUserMessageIdx !== -1 &&
|
||||
lastUserMessageIdx > lastAssistantMessageIdx
|
||||
) {
|
||||
return {
|
||||
status: 'working',
|
||||
sessionKey,
|
||||
lastEventAt,
|
||||
currentTool: null,
|
||||
error: null,
|
||||
}
|
||||
}
|
||||
|
||||
// Agent is working if there's a tool_use without a subsequent tool_result
|
||||
if (lastToolUseIdx !== -1 && lastToolUseIdx > lastToolResultIdx) {
|
||||
const toolEvent = events[lastToolUseIdx]!
|
||||
return {
|
||||
status: 'working',
|
||||
sessionKey,
|
||||
lastEventAt,
|
||||
currentTool: toolEvent.toolName ?? null,
|
||||
error: null,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
status: 'idle',
|
||||
sessionKey,
|
||||
lastEventAt,
|
||||
currentTool: null,
|
||||
error: null,
|
||||
}
|
||||
}
|
||||
@@ -13,10 +13,27 @@ export interface OpenClawChatHistoryMessage {
|
||||
content: string
|
||||
}
|
||||
|
||||
/**
|
||||
* OpenAI-compatible content parts for multimodal user messages. OpenClaw's
|
||||
* gateway accepts the standard `content: [{ type: 'text', ... }, { type:
|
||||
* 'image_url', image_url: { url } }]` shape on /v1/chat/completions and
|
||||
* routes it to whichever upstream provider the agent's model points at.
|
||||
*/
|
||||
export type OpenClawChatContentPart =
|
||||
| { type: 'text'; text: string }
|
||||
| {
|
||||
type: 'image_url'
|
||||
image_url: { url: string; detail?: 'auto' | 'low' | 'high' }
|
||||
}
|
||||
|
||||
export interface OpenClawChatRequest {
|
||||
agentId: string
|
||||
sessionKey: string
|
||||
message: string
|
||||
// When present, sent as the user message's `content` array verbatim. The
|
||||
// legacy string `message` is folded into a leading text part if no text
|
||||
// part is present in `messageParts`.
|
||||
messageParts?: OpenClawChatContentPart[]
|
||||
history?: OpenClawChatHistoryMessage[]
|
||||
signal?: AbortSignal
|
||||
}
|
||||
@@ -117,6 +134,7 @@ export class OpenClawHttpClient {
|
||||
|
||||
private async fetchChat(input: OpenClawChatRequest): Promise<Response> {
|
||||
const token = await this.getToken()
|
||||
const userContent = buildUserContent(input)
|
||||
const response = await fetch(
|
||||
`http://127.0.0.1:${this.hostPort}/v1/chat/completions`,
|
||||
{
|
||||
@@ -130,7 +148,7 @@ export class OpenClawHttpClient {
|
||||
stream: true,
|
||||
messages: [
|
||||
...(input.history ?? []),
|
||||
{ role: 'user', content: input.message },
|
||||
{ role: 'user', content: userContent },
|
||||
],
|
||||
user: `browseros:${input.agentId}:${input.sessionKey}`,
|
||||
}),
|
||||
@@ -197,6 +215,30 @@ function resolveAgentModel(agentId: string): string {
|
||||
return agentId === 'main' ? 'openclaw' : `openclaw/${agentId}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the OpenAI-compatible `content` payload for the trailing user
|
||||
* message. When the caller supplies multimodal parts via `messageParts`,
|
||||
* use them as-is, ensuring at least one text part is present (we fold the
|
||||
* legacy `message` string in as a leading text part if not). Otherwise,
|
||||
* fall back to a plain string `content` so simple text-only sends keep
|
||||
* the same wire shape we've always sent.
|
||||
*/
|
||||
function buildUserContent(
|
||||
input: OpenClawChatRequest,
|
||||
): string | OpenClawChatContentPart[] {
|
||||
if (!input.messageParts || input.messageParts.length === 0) {
|
||||
return input.message
|
||||
}
|
||||
|
||||
const hasText = input.messageParts.some((p) => p.type === 'text')
|
||||
if (hasText) return input.messageParts
|
||||
|
||||
const trimmed = input.message.trim()
|
||||
if (!trimmed) return input.messageParts
|
||||
|
||||
return [{ type: 'text', text: input.message }, ...input.messageParts]
|
||||
}
|
||||
|
||||
function createEventStream(
|
||||
body: ReadableStream<Uint8Array>,
|
||||
signal?: AbortSignal,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import { readdirSync, readFileSync } from 'node:fs'
|
||||
import { existsSync, readdirSync, readFileSync, statSync } from 'node:fs'
|
||||
import { resolve } from 'node:path'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -14,9 +14,22 @@ import { resolve } from 'node:path'
|
||||
interface PiContentBlock {
|
||||
type: string
|
||||
text?: string
|
||||
// OpenClaw stores reasoning blocks as { type: 'thinking', thinking: '...' }
|
||||
// — the prose lives on a `thinking` field, not `text`.
|
||||
thinking?: string
|
||||
id?: string
|
||||
name?: string
|
||||
arguments?: Record<string, unknown>
|
||||
// OpenAI-shaped image blocks: { type: 'image_url', image_url: { url } }.
|
||||
// The data: URL carries mediaType + base64 in one string.
|
||||
image_url?: { url?: string; detail?: string }
|
||||
// Anthropic-shaped image blocks: { type: 'image', source: { type:
|
||||
// 'base64', media_type, data } } and the simpler { type: 'image', data }
|
||||
// variant the gateway emits on tool results.
|
||||
source?: { type?: string; media_type?: string; data?: string }
|
||||
data?: string
|
||||
media_type?: string
|
||||
mediaType?: string
|
||||
}
|
||||
|
||||
interface PiMessage {
|
||||
@@ -65,6 +78,7 @@ type SessionsJson = Record<string, SessionsJsonEntry>
|
||||
|
||||
export type ClawEventType =
|
||||
| 'user.message'
|
||||
| 'user.attachment'
|
||||
| 'agent.message'
|
||||
| 'agent.thinking'
|
||||
| 'agent.tool_use'
|
||||
@@ -73,6 +87,16 @@ export type ClawEventType =
|
||||
| 'session.thinking_level_change'
|
||||
| 'session.compaction'
|
||||
|
||||
export interface ClawAttachmentInfo {
|
||||
kind: 'image' | 'file'
|
||||
mediaType: string
|
||||
// For images we always emit a data: URL so downstream consumers don't
|
||||
// have to reconstruct it. `name` is best-effort (JSONL rarely carries
|
||||
// a filename for inline image content blocks).
|
||||
dataUrl?: string
|
||||
name?: string
|
||||
}
|
||||
|
||||
export interface ClawEvent {
|
||||
eventId: string
|
||||
type: ClawEventType
|
||||
@@ -86,6 +110,7 @@ export interface ClawEvent {
|
||||
toolCallId?: string
|
||||
toolArguments?: Record<string, unknown>
|
||||
isError?: boolean
|
||||
attachment?: ClawAttachmentInfo
|
||||
}
|
||||
|
||||
export interface JsonlSessionEntry {
|
||||
@@ -148,12 +173,19 @@ export class OpenClawJsonlReader {
|
||||
}
|
||||
}
|
||||
|
||||
/** Read and parse all events from a session's JSONL file. */
|
||||
/**
|
||||
* Read and parse all events from a session's JSONL file.
|
||||
*
|
||||
* Uses resolveJsonlPath() which handles a known OpenClaw quirk: the
|
||||
* Pi session ID recorded in sessions.json can drift from the actual
|
||||
* JSONL filename after context compaction or session restart. When the
|
||||
* mapped ID doesn't match a file on disk, we fall back to the most
|
||||
* recently modified JSONL in the agent's sessions directory.
|
||||
*/
|
||||
listBySession(agentId: string, sessionKey: string): ClawEvent[] {
|
||||
const piSessionId = this.resolvePiSessionId(agentId, sessionKey)
|
||||
if (!piSessionId) return []
|
||||
const filePath = this.resolveJsonlPath(agentId, sessionKey)
|
||||
if (!filePath) return []
|
||||
|
||||
const filePath = this.jsonlPath(agentId, piSessionId)
|
||||
let raw: string
|
||||
try {
|
||||
raw = readFileSync(filePath, 'utf8')
|
||||
@@ -255,31 +287,91 @@ export class OpenClawJsonlReader {
|
||||
}
|
||||
}
|
||||
|
||||
private resolvePiSessionId(
|
||||
agentId: string,
|
||||
sessionKey: string,
|
||||
): string | undefined {
|
||||
/**
|
||||
* Resolve the path to a session's JSONL file. Tries the sessions.json
|
||||
* mapping first (fast), then falls back to scanning the directory for
|
||||
* the most recently modified JSONL file when the mapped ID doesn't
|
||||
* match an actual file on disk.
|
||||
*
|
||||
* This fallback handles a known OpenClaw behavior where the Pi session
|
||||
* ID in sessions.json can become stale after context compaction or
|
||||
* session restart — the JSONL file on disk has a different UUID than
|
||||
* what sessions.json records.
|
||||
*/
|
||||
private resolveJsonlPath(agentId: string, sessionKey: string): string | null {
|
||||
const sessionsJson = this.readSessionsJson(agentId)
|
||||
if (!sessionsJson) return undefined
|
||||
if (!sessionsJson) return null
|
||||
|
||||
// Try exact key match first
|
||||
// Try exact key match in sessions.json
|
||||
let resolvedId: string | undefined
|
||||
const entry = sessionsJson[sessionKey]
|
||||
if (entry && typeof entry.sessionId === 'string') {
|
||||
return entry.sessionId
|
||||
resolvedId = entry.sessionId
|
||||
}
|
||||
|
||||
// Try matching by scanning all keys (handles key format variations)
|
||||
for (const [key, value] of Object.entries(sessionsJson)) {
|
||||
if (key === sessionKey || key.endsWith(`:${sessionKey}`)) {
|
||||
if (typeof value.sessionId === 'string') return value.sessionId
|
||||
if (!resolvedId) {
|
||||
for (const [key, value] of Object.entries(sessionsJson)) {
|
||||
if (key === sessionKey || key.endsWith(`:${sessionKey}`)) {
|
||||
if (typeof value.sessionId === 'string') {
|
||||
resolvedId = value.sessionId
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return undefined
|
||||
// If we found a sessionId and the file exists, use it
|
||||
if (resolvedId) {
|
||||
const path = this.safePath(
|
||||
'agents',
|
||||
agentId,
|
||||
'sessions',
|
||||
`${resolvedId}.jsonl`,
|
||||
)
|
||||
if (existsSync(path)) return path
|
||||
}
|
||||
|
||||
// Fallback: scan the sessions directory for the most recent JSONL
|
||||
// file. This handles stale sessions.json entries where the Pi
|
||||
// session ID doesn't match the actual file on disk.
|
||||
return this.findMostRecentJsonl(agentId)
|
||||
}
|
||||
|
||||
private jsonlPath(agentId: string, piSessionId: string): string {
|
||||
return this.safePath('agents', agentId, 'sessions', `${piSessionId}.jsonl`)
|
||||
/**
|
||||
* Scan the sessions directory and return the path to the most recently
|
||||
* modified JSONL file. Used as a fallback when sessions.json points to
|
||||
* a non-existent file.
|
||||
*/
|
||||
private findMostRecentJsonl(agentId: string): string | null {
|
||||
let sessionsDir: string
|
||||
try {
|
||||
sessionsDir = this.safePath('agents', agentId, 'sessions')
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
|
||||
let names: string[]
|
||||
try {
|
||||
names = readdirSync(sessionsDir).filter(
|
||||
(n): n is string => typeof n === 'string' && n.endsWith('.jsonl'),
|
||||
)
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
|
||||
let best: { path: string; mtime: number } | null = null
|
||||
for (const name of names) {
|
||||
const fullPath = this.safePath('agents', agentId, 'sessions', name)
|
||||
try {
|
||||
const st = statSync(fullPath)
|
||||
if (!best || st.mtimeMs > best.mtime) {
|
||||
best = { path: fullPath, mtime: st.mtimeMs }
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
|
||||
return best?.path ?? null
|
||||
}
|
||||
}
|
||||
|
||||
@@ -338,9 +430,7 @@ function mapMessageToEvents(
|
||||
createdAt: number,
|
||||
): ClawEvent[] {
|
||||
if (msg.role === 'user') {
|
||||
const text = extractText(msg.content)
|
||||
if (!text) return []
|
||||
return [{ eventId, type: 'user.message', content: text, createdAt }]
|
||||
return mapUserMessage(msg, eventId, createdAt)
|
||||
}
|
||||
|
||||
if (msg.role === 'assistant') {
|
||||
@@ -365,6 +455,92 @@ function mapMessageToEvents(
|
||||
return []
|
||||
}
|
||||
|
||||
/**
|
||||
* Build events for a user JSONL message. Each image content block becomes
|
||||
* a separate `user.attachment` event ordered before the `user.message`
|
||||
* text event so downstream accumulators (in jsonlEventsToHistoryItems)
|
||||
* can flush attachments onto the message they arrived with.
|
||||
*/
|
||||
function mapUserMessage(
|
||||
msg: PiMessage,
|
||||
eventId: string,
|
||||
createdAt: number,
|
||||
): ClawEvent[] {
|
||||
const events: ClawEvent[] = []
|
||||
const text = extractText(msg.content)
|
||||
|
||||
if (msg.content) {
|
||||
let attachmentIdx = 0
|
||||
for (const block of msg.content) {
|
||||
const attachment = extractImageAttachment(block)
|
||||
if (!attachment) continue
|
||||
events.push({
|
||||
eventId: `${eventId}:attachment:${attachmentIdx}`,
|
||||
type: 'user.attachment',
|
||||
content: attachment.dataUrl ?? '',
|
||||
createdAt,
|
||||
attachment,
|
||||
})
|
||||
attachmentIdx++
|
||||
}
|
||||
}
|
||||
|
||||
if (text) {
|
||||
events.push({ eventId, type: 'user.message', content: text, createdAt })
|
||||
} else if (events.length > 0) {
|
||||
// User sent only attachments and no caption — synthesize an empty
|
||||
// user.message so downstream pipelines that gate on user.message still
|
||||
// see a turn boundary.
|
||||
events.push({ eventId, type: 'user.message', content: '', createdAt })
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a normalised image attachment from a single content block.
|
||||
* Handles all three shapes the OpenClaw gateway round-trips:
|
||||
* - OpenAI: `{ type: 'image_url', image_url: { url } }` (data: URL)
|
||||
* - Anthropic: `{ type: 'image', source: { type: 'base64', media_type, data } }`
|
||||
* - Bare: `{ type: 'image', data: '<base64>' }` (used by tool-result outputs)
|
||||
*/
|
||||
function extractImageAttachment(
|
||||
block: PiContentBlock,
|
||||
): ClawAttachmentInfo | null {
|
||||
if (block.type === 'image_url') {
|
||||
const url = block.image_url?.url
|
||||
if (typeof url !== 'string' || !url.startsWith('data:')) return null
|
||||
const mediaType =
|
||||
url.slice(5, url.indexOf(';')).trim() || 'application/octet-stream'
|
||||
return { kind: 'image', mediaType, dataUrl: url }
|
||||
}
|
||||
|
||||
if (block.type === 'image') {
|
||||
const sourceData = block.source?.data
|
||||
const sourceMediaType =
|
||||
block.source?.media_type ?? block.media_type ?? block.mediaType
|
||||
const bareData = block.data
|
||||
if (typeof sourceData === 'string' && typeof sourceMediaType === 'string') {
|
||||
return {
|
||||
kind: 'image',
|
||||
mediaType: sourceMediaType,
|
||||
dataUrl: `data:${sourceMediaType};base64,${sourceData}`,
|
||||
}
|
||||
}
|
||||
if (typeof bareData === 'string') {
|
||||
const mediaType =
|
||||
typeof sourceMediaType === 'string' ? sourceMediaType : 'image/png'
|
||||
return {
|
||||
kind: 'image',
|
||||
mediaType,
|
||||
dataUrl: `data:${mediaType};base64,${bareData}`,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
function mapAssistantMessage(
|
||||
msg: PiMessage,
|
||||
eventId: string,
|
||||
@@ -377,18 +553,20 @@ function mapAssistantMessage(
|
||||
let thinkingIdx = 0
|
||||
let toolIdx = 0
|
||||
for (const block of msg.content) {
|
||||
if (
|
||||
block.type === 'thinking' &&
|
||||
typeof block.text === 'string' &&
|
||||
block.text.length > 0
|
||||
) {
|
||||
events.push({
|
||||
eventId: `${eventId}:thinking:${thinkingIdx}`,
|
||||
type: 'agent.thinking',
|
||||
content: block.text,
|
||||
createdAt,
|
||||
})
|
||||
thinkingIdx++
|
||||
if (block.type === 'thinking') {
|
||||
const thinkingText =
|
||||
(typeof block.thinking === 'string' && block.thinking) ||
|
||||
(typeof block.text === 'string' && block.text) ||
|
||||
''
|
||||
if (thinkingText.length > 0) {
|
||||
events.push({
|
||||
eventId: `${eventId}:thinking:${thinkingIdx}`,
|
||||
type: 'agent.thinking',
|
||||
content: thinkingText,
|
||||
createdAt,
|
||||
})
|
||||
thinkingIdx++
|
||||
}
|
||||
}
|
||||
if (block.type === 'toolCall' && block.name) {
|
||||
events.push({
|
||||
@@ -439,3 +617,51 @@ function combineModel(
|
||||
if (!model) return undefined
|
||||
return provider ? `${provider}/${model}` : model
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tool activity summary
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const TOOL_DESCRIPTIONS: Record<string, (count: number) => string> = {
|
||||
browser_navigate: (n) => `Browsed ${n} page${n !== 1 ? 's' : ''}`,
|
||||
browser_take_screenshot: (n) => `Took ${n} screenshot${n !== 1 ? 's' : ''}`,
|
||||
browser_click: (n) => `Clicked ${n} element${n !== 1 ? 's' : ''}`,
|
||||
browser_fill: (n) => `Filled ${n} field${n !== 1 ? 's' : ''}`,
|
||||
browser_type: (n) => `Typed in ${n} field${n !== 1 ? 's' : ''}`,
|
||||
google_calendar_list_events: (n) =>
|
||||
n > 1 ? `Checked calendar ${n} times` : 'Checked calendar',
|
||||
gmail_search: (n) => (n > 1 ? `Searched email ${n} times` : 'Searched email'),
|
||||
gmail_send: (n) => `Sent ${n} email${n !== 1 ? 's' : ''}`,
|
||||
slack_post_message: (n) => `Sent ${n} Slack message${n !== 1 ? 's' : ''}`,
|
||||
file_write: (n) => `Wrote ${n} file${n !== 1 ? 's' : ''}`,
|
||||
file_read: (n) => `Read ${n} file${n !== 1 ? 's' : ''}`,
|
||||
}
|
||||
|
||||
function defaultToolDescription(toolName: string, count: number): string {
|
||||
const short = toolName
|
||||
.replace(/^(browser_|google_|mcp_)/, '')
|
||||
.replaceAll('_', ' ')
|
||||
return count > 1 ? `Used ${short} ${count} times` : `Used ${short}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert raw tool-use events into a human-readable activity summary.
|
||||
*
|
||||
* Example output: "Browsed 3 pages, took 2 screenshots"
|
||||
*/
|
||||
export function summarizeToolActivity(events: ClawEvent[]): string | null {
|
||||
const toolCounts = new Map<string, number>()
|
||||
for (const e of events) {
|
||||
if (e.type === 'agent.tool_use' && e.toolName) {
|
||||
toolCounts.set(e.toolName, (toolCounts.get(e.toolName) ?? 0) + 1)
|
||||
}
|
||||
}
|
||||
if (toolCounts.size === 0) return null
|
||||
|
||||
const parts: string[] = []
|
||||
for (const [tool, count] of toolCounts) {
|
||||
const describe = TOOL_DESCRIPTIONS[tool]
|
||||
parts.push(describe ? describe(count) : defaultToolDescription(tool, count))
|
||||
}
|
||||
return parts.join(', ')
|
||||
}
|
||||
|
||||
@@ -0,0 +1,276 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Connects to the OpenClaw gateway's WebSocket control plane and pipes
|
||||
* chat broadcast events into a ClawSession state machine. The observer
|
||||
* is a transport layer only — it handles the WS connection lifecycle
|
||||
* (connect, handshake, reconnect) and delegates all state management
|
||||
* to ClawSession.
|
||||
*/
|
||||
|
||||
import WebSocket from 'ws'
|
||||
import { logger } from '../../../lib/logger'
|
||||
import type { ClawSession } from './claw-session'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Protocol types (subset of OpenClaw gateway protocol v3)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const PROTOCOL_VERSION = 3
|
||||
const HANDSHAKE_REQUEST_ID = 'connect'
|
||||
const RECONNECT_DELAY_MS = 5_000
|
||||
const CONNECT_TIMEOUT_MS = 10_000
|
||||
|
||||
interface RequestFrame {
|
||||
type: 'req'
|
||||
id: string
|
||||
method: string
|
||||
params: Record<string, unknown>
|
||||
}
|
||||
|
||||
type IncomingFrame =
|
||||
| { type: 'res'; id: string; ok: true; payload?: unknown }
|
||||
| {
|
||||
type: 'res'
|
||||
id: string
|
||||
ok: false
|
||||
error: { code: string; message: string }
|
||||
}
|
||||
| { type: 'event'; event: string; payload?: unknown }
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Observer
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export class OpenClawObserver {
|
||||
private ws: WebSocket | null = null
|
||||
private reconnectTimer: ReturnType<typeof setTimeout> | null = null
|
||||
private connected = false
|
||||
private closed = false
|
||||
private gatewayUrl: string | null = null
|
||||
private gatewayToken: string | null = null
|
||||
|
||||
constructor(private readonly session: ClawSession) {}
|
||||
|
||||
/** Start observing the gateway at the given URL with the given token. */
|
||||
connect(gatewayUrl: string, token: string): void {
|
||||
this.gatewayUrl = gatewayUrl
|
||||
this.gatewayToken = token
|
||||
this.closed = false
|
||||
this.doConnect()
|
||||
}
|
||||
|
||||
/** Stop observing and close the WebSocket. */
|
||||
disconnect(): void {
|
||||
this.closed = true
|
||||
this.clearReconnect()
|
||||
if (this.ws) {
|
||||
try {
|
||||
this.ws.close()
|
||||
} catch {}
|
||||
this.ws = null
|
||||
}
|
||||
this.connected = false
|
||||
}
|
||||
|
||||
/** Whether the observer has an active WS connection. */
|
||||
isConnected(): boolean {
|
||||
return this.connected
|
||||
}
|
||||
|
||||
// ── Private ─────────────────────────────────────────────────────────
|
||||
|
||||
private doConnect(): void {
|
||||
if (this.closed || !this.gatewayUrl || !this.gatewayToken) return
|
||||
|
||||
const wsUrl = this.gatewayUrl
|
||||
.replace(/^http:\/\//, 'ws://')
|
||||
.replace(/^https:\/\//, 'wss://')
|
||||
|
||||
logger.debug('OpenClaw observer connecting', { url: wsUrl })
|
||||
|
||||
const ws = new WebSocket(wsUrl)
|
||||
this.ws = ws
|
||||
|
||||
const connectTimeout = setTimeout(() => {
|
||||
logger.warn('OpenClaw observer handshake timeout')
|
||||
ws.terminate()
|
||||
}, CONNECT_TIMEOUT_MS)
|
||||
|
||||
let handshakeSent = false
|
||||
|
||||
ws.on('message', (raw) => {
|
||||
let frame: IncomingFrame
|
||||
try {
|
||||
frame = JSON.parse(raw.toString('utf8')) as IncomingFrame
|
||||
} catch {
|
||||
return
|
||||
}
|
||||
|
||||
// The gateway sends a connect.challenge event before accepting
|
||||
// the connect request. Send the handshake after receiving it.
|
||||
if (
|
||||
frame.type === 'event' &&
|
||||
frame.event === 'connect.challenge' &&
|
||||
!handshakeSent
|
||||
) {
|
||||
handshakeSent = true
|
||||
const connectReq: RequestFrame = {
|
||||
type: 'req',
|
||||
id: HANDSHAKE_REQUEST_ID,
|
||||
method: 'connect',
|
||||
params: {
|
||||
minProtocol: PROTOCOL_VERSION,
|
||||
maxProtocol: PROTOCOL_VERSION,
|
||||
client: {
|
||||
id: 'openclaw-tui',
|
||||
displayName: 'browseros-observer',
|
||||
version: '1.0.0',
|
||||
platform: 'node',
|
||||
mode: 'ui',
|
||||
},
|
||||
role: 'operator',
|
||||
scopes: ['operator.read'],
|
||||
auth: { token: this.gatewayToken },
|
||||
},
|
||||
}
|
||||
ws.send(JSON.stringify(connectReq))
|
||||
return
|
||||
}
|
||||
|
||||
// Handshake response
|
||||
if (frame.type === 'res' && frame.id === HANDSHAKE_REQUEST_ID) {
|
||||
clearTimeout(connectTimeout)
|
||||
if (frame.ok) {
|
||||
this.connected = true
|
||||
logger.info('OpenClaw observer connected')
|
||||
} else {
|
||||
logger.warn('OpenClaw observer handshake failed', {
|
||||
error: frame.error,
|
||||
})
|
||||
ws.close()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Broadcast events (only process after handshake completes)
|
||||
if (frame.type === 'event' && this.connected) {
|
||||
this.handleEvent(frame.event, frame.payload)
|
||||
}
|
||||
})
|
||||
|
||||
ws.on('close', () => {
|
||||
clearTimeout(connectTimeout)
|
||||
this.connected = false
|
||||
this.ws = null
|
||||
|
||||
// Reset any agents stuck in "working" to "unknown" — we missed
|
||||
// the final/end event because the WS closed mid-task. The
|
||||
// ClawSession will re-infer correct state from JSONL when the
|
||||
// observer reconnects and ensureObserverConnected() re-seeds.
|
||||
for (const [agentId, state] of this.session.getAllStates()) {
|
||||
if (state.status === 'working') {
|
||||
this.session.transition(agentId, 'unknown')
|
||||
}
|
||||
}
|
||||
|
||||
if (!this.closed) {
|
||||
logger.debug('OpenClaw observer disconnected, scheduling reconnect')
|
||||
this.scheduleReconnect()
|
||||
}
|
||||
})
|
||||
|
||||
ws.on('error', (err) => {
|
||||
clearTimeout(connectTimeout)
|
||||
logger.debug('OpenClaw observer WS error', {
|
||||
message: err.message,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
private handleEvent(eventName: string, payload: unknown): void {
|
||||
if (eventName === 'chat') {
|
||||
this.handleChatEvent(payload)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a gateway chat broadcast event and transition the ClawSession
|
||||
* state machine accordingly.
|
||||
*/
|
||||
private handleChatEvent(payload: unknown): void {
|
||||
if (!payload || typeof payload !== 'object') return
|
||||
const p = payload as Record<string, unknown>
|
||||
|
||||
const sessionKey = typeof p.sessionKey === 'string' ? p.sessionKey : null
|
||||
const state = typeof p.state === 'string' ? p.state : null
|
||||
|
||||
if (!sessionKey || !state) return
|
||||
|
||||
const agentId = extractAgentId(sessionKey)
|
||||
if (!agentId) return
|
||||
|
||||
if (state === 'delta' || state === 'streaming') {
|
||||
this.session.transition(agentId, 'working', {
|
||||
sessionKey,
|
||||
currentTool: extractToolName(p),
|
||||
})
|
||||
} else if (state === 'final' || state === 'end') {
|
||||
this.session.transition(agentId, 'idle', { sessionKey })
|
||||
} else if (state === 'error') {
|
||||
const errorMsg =
|
||||
typeof p.errorMessage === 'string'
|
||||
? p.errorMessage
|
||||
: typeof p.error === 'string'
|
||||
? p.error
|
||||
: 'Unknown error'
|
||||
this.session.transition(agentId, 'error', { sessionKey, error: errorMsg })
|
||||
}
|
||||
}
|
||||
|
||||
private scheduleReconnect(): void {
|
||||
this.clearReconnect()
|
||||
this.reconnectTimer = setTimeout(() => {
|
||||
this.reconnectTimer = null
|
||||
this.doConnect()
|
||||
}, RECONNECT_DELAY_MS)
|
||||
}
|
||||
|
||||
private clearReconnect(): void {
|
||||
if (this.reconnectTimer) {
|
||||
clearTimeout(this.reconnectTimer)
|
||||
this.reconnectTimer = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Extract agentId from an OpenClaw session key.
|
||||
* Format: "agent:<agentId>:..." — we take the segment after "agent:".
|
||||
*/
|
||||
function extractAgentId(sessionKey: string): string | null {
|
||||
if (!sessionKey.startsWith('agent:')) return null
|
||||
const colonIdx = sessionKey.indexOf(':', 6)
|
||||
if (colonIdx === -1) return sessionKey.slice(6)
|
||||
return sessionKey.slice(6, colonIdx)
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to extract a tool name from a chat event payload.
|
||||
*/
|
||||
function extractToolName(payload: Record<string, unknown>): string | null {
|
||||
if (typeof payload.toolName === 'string') return payload.toolName
|
||||
if (typeof payload.tool === 'string') return payload.tool
|
||||
const content = payload.content
|
||||
if (content && typeof content === 'object' && 'name' in content) {
|
||||
const name = (content as Record<string, unknown>).name
|
||||
if (typeof name === 'string') return name
|
||||
}
|
||||
return null
|
||||
}
|
||||
@@ -18,6 +18,12 @@ import { DEFAULT_PORTS } from '@browseros/shared/constants/ports'
|
||||
import { getOpenClawDir } from '../../../lib/browseros-dir'
|
||||
import { logger } from '../../../lib/logger'
|
||||
import type { MonitoringChatTurn } from '../../../monitoring/types'
|
||||
import { buildToolLabel } from '../../../tools/tool-label-registry'
|
||||
import {
|
||||
type AgentLiveStatus,
|
||||
type AgentSessionState,
|
||||
ClawSession,
|
||||
} from './claw-session'
|
||||
import type {
|
||||
ContainerRuntime,
|
||||
GatewayContainerSpec,
|
||||
@@ -54,11 +60,17 @@ import {
|
||||
mergeEnvContent,
|
||||
} from './openclaw-env'
|
||||
import {
|
||||
type OpenClawChatContentPart,
|
||||
OpenClawHttpClient,
|
||||
type OpenClawSessionHistory,
|
||||
type OpenClawSessionHistoryEvent,
|
||||
} from './openclaw-http-client'
|
||||
import { type ClawEvent, OpenClawJsonlReader } from './openclaw-jsonl-reader'
|
||||
import {
|
||||
type ClawEvent,
|
||||
OpenClawJsonlReader,
|
||||
summarizeToolActivity,
|
||||
} from './openclaw-jsonl-reader'
|
||||
import { OpenClawObserver } from './openclaw-observer'
|
||||
import {
|
||||
type ResolvedOpenClawProviderConfig,
|
||||
resolveSupportedOpenClawProvider,
|
||||
@@ -157,6 +169,33 @@ export interface BrowserOSOpenClawAgentSessionResponse {
|
||||
session: BrowserOSOpenClawSession | null
|
||||
}
|
||||
|
||||
export interface BrowserOSChatHistoryToolCall {
|
||||
toolCallId?: string
|
||||
toolName: string
|
||||
label: string
|
||||
subject?: string
|
||||
status: 'completed' | 'failed'
|
||||
input?: Record<string, unknown>
|
||||
output?: string
|
||||
error?: string
|
||||
durationMs?: number
|
||||
}
|
||||
|
||||
export interface BrowserOSChatHistoryReasoning {
|
||||
text: string
|
||||
durationMs?: number
|
||||
}
|
||||
|
||||
export interface BrowserOSChatHistoryAttachment {
|
||||
kind: 'image' | 'file'
|
||||
mediaType: string
|
||||
// Images carry the full data: URL so the client can render directly.
|
||||
// Files (text / pdf / etc) currently round-trip via inline text in the
|
||||
// message body and don't reach this field — kept on the type for v2.
|
||||
dataUrl?: string
|
||||
name?: string
|
||||
}
|
||||
|
||||
export interface BrowserOSChatHistoryItem {
|
||||
id: string
|
||||
role: 'user' | 'assistant'
|
||||
@@ -165,6 +204,12 @@ export interface BrowserOSChatHistoryItem {
|
||||
messageSeq: number
|
||||
sessionKey: string
|
||||
source: OpenClawSessionSource
|
||||
costUsd?: number
|
||||
tokensIn?: number
|
||||
tokensOut?: number
|
||||
toolCalls?: BrowserOSChatHistoryToolCall[]
|
||||
reasoning?: BrowserOSChatHistoryReasoning
|
||||
attachments?: BrowserOSChatHistoryAttachment[]
|
||||
}
|
||||
|
||||
export interface BrowserOSOpenClawHistoryPageResponse {
|
||||
@@ -247,13 +292,104 @@ function jsonlEventsToHistoryItems(
|
||||
const items: BrowserOSChatHistoryItem[] = []
|
||||
let seq = 0
|
||||
|
||||
// Accumulate tool calls between text messages. The agent emits
|
||||
// tool_use → tool_result pairs interspersed with assistant text. We
|
||||
// pair them by toolCallId and attach the resulting list to the next
|
||||
// assistant message (the one that follows the tool sequence).
|
||||
let pendingToolCalls: BrowserOSChatHistoryToolCall[] = []
|
||||
const pendingToolStarts = new Map<string, ClawEvent>()
|
||||
|
||||
// Accumulate thinking blocks across the turn — there can be multiple
|
||||
// (e.g., think → tool → think → tool → answer). We collapse them into
|
||||
// a single Reasoning block per assistant message so the UI shows one
|
||||
// collapsible per turn, with duration = first thinking → final answer.
|
||||
let pendingReasoningTexts: string[] = []
|
||||
let pendingReasoningFirstAt: number | null = null
|
||||
|
||||
// Accumulate user-side attachments. The reader emits them as separate
|
||||
// `user.attachment` events ordered immediately before the user.message
|
||||
// they belong to (same JSONL line). We flush them onto the next user
|
||||
// history item and reset the buffer alongside the per-turn buffers.
|
||||
let pendingAttachments: BrowserOSChatHistoryAttachment[] = []
|
||||
|
||||
for (const event of events) {
|
||||
if (event.type === 'user.attachment') {
|
||||
if (event.attachment) {
|
||||
pendingAttachments.push({
|
||||
kind: event.attachment.kind,
|
||||
mediaType: event.attachment.mediaType,
|
||||
dataUrl: event.attachment.dataUrl,
|
||||
name: event.attachment.name,
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (event.type === 'agent.thinking') {
|
||||
const text = event.content.trim()
|
||||
if (text) pendingReasoningTexts.push(text)
|
||||
if (pendingReasoningFirstAt == null) {
|
||||
pendingReasoningFirstAt = event.createdAt
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (event.type === 'agent.tool_use') {
|
||||
if (event.toolCallId) {
|
||||
pendingToolStarts.set(event.toolCallId, event)
|
||||
}
|
||||
// Keep order — record the tool call now with status pending; we'll
|
||||
// patch the result/error/duration when the matching tool_result arrives.
|
||||
const rawName = event.toolName ?? event.content
|
||||
const { label, subject } = buildToolLabel(rawName, event.toolArguments)
|
||||
pendingToolCalls.push({
|
||||
toolCallId: event.toolCallId,
|
||||
toolName: rawName,
|
||||
label,
|
||||
subject,
|
||||
status: 'completed', // optimistic; downgraded if a failed result arrives
|
||||
input: event.toolArguments,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if (event.type === 'agent.tool_result') {
|
||||
// Find the matching tool_use entry by toolCallId and patch it
|
||||
const match = pendingToolCalls.find(
|
||||
(t) => t.toolCallId && t.toolCallId === event.toolCallId,
|
||||
)
|
||||
if (match) {
|
||||
if (event.isError) {
|
||||
match.status = 'failed'
|
||||
match.error = event.content
|
||||
} else {
|
||||
match.output = event.content
|
||||
}
|
||||
const start = event.toolCallId
|
||||
? pendingToolStarts.get(event.toolCallId)
|
||||
: undefined
|
||||
if (start) {
|
||||
match.durationMs = Math.max(0, event.createdAt - start.createdAt)
|
||||
if (event.toolCallId) pendingToolStarts.delete(event.toolCallId)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (event.type !== 'user.message' && event.type !== 'agent.message') {
|
||||
continue
|
||||
}
|
||||
|
||||
let text = event.content.trim()
|
||||
if (!text) continue
|
||||
// Allow user messages with no text body when attachments are present —
|
||||
// the user can attach an image and rely on the model to describe it.
|
||||
if (!text) {
|
||||
if (event.type === 'user.message' && pendingAttachments.length > 0) {
|
||||
// fall through; the empty text is acceptable when paired with media
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Filter assistant heartbeats
|
||||
if (event.type === 'agent.message' && text.startsWith('HEARTBEAT')) continue
|
||||
@@ -279,12 +415,18 @@ function jsonlEventsToHistoryItems(
|
||||
.trim()
|
||||
.replace(/^User:\s*/i, '')
|
||||
} else {
|
||||
// Reset all per-turn buffers — they belong to a discarded turn
|
||||
pendingToolCalls = []
|
||||
pendingToolStarts.clear()
|
||||
pendingReasoningTexts = []
|
||||
pendingReasoningFirstAt = null
|
||||
pendingAttachments = []
|
||||
continue
|
||||
}
|
||||
if (!text) continue
|
||||
}
|
||||
|
||||
items.push({
|
||||
const item: BrowserOSChatHistoryItem = {
|
||||
id: `${sessionKey}:${seq}`,
|
||||
role: event.type === 'user.message' ? 'user' : 'assistant',
|
||||
text,
|
||||
@@ -292,13 +434,68 @@ function jsonlEventsToHistoryItems(
|
||||
messageSeq: seq,
|
||||
sessionKey,
|
||||
source,
|
||||
})
|
||||
}
|
||||
|
||||
if (event.type === 'agent.message') {
|
||||
// Pass through per-turn cost and token data
|
||||
if (event.costUsd) item.costUsd = event.costUsd
|
||||
if (event.tokensIn) item.tokensIn = event.tokensIn
|
||||
if (event.tokensOut) item.tokensOut = event.tokensOut
|
||||
|
||||
// Attach any tool calls that happened before this assistant message
|
||||
if (pendingToolCalls.length > 0) {
|
||||
item.toolCalls = pendingToolCalls
|
||||
pendingToolCalls = []
|
||||
pendingToolStarts.clear()
|
||||
}
|
||||
|
||||
// Attach accumulated thinking. Duration is from the first thinking
|
||||
// event to the final answer — the wall-clock time the user waited
|
||||
// through the model's reasoning loop.
|
||||
if (pendingReasoningTexts.length > 0) {
|
||||
const reasoning: BrowserOSChatHistoryReasoning = {
|
||||
text: pendingReasoningTexts.join('\n\n'),
|
||||
}
|
||||
if (pendingReasoningFirstAt != null) {
|
||||
reasoning.durationMs = Math.max(
|
||||
0,
|
||||
event.createdAt - pendingReasoningFirstAt,
|
||||
)
|
||||
}
|
||||
item.reasoning = reasoning
|
||||
pendingReasoningTexts = []
|
||||
pendingReasoningFirstAt = null
|
||||
}
|
||||
} else if (event.type === 'user.message') {
|
||||
// User messages reset all per-turn buffers — anything pending was
|
||||
// part of an earlier turn that had no final assistant message.
|
||||
pendingToolCalls = []
|
||||
pendingToolStarts.clear()
|
||||
pendingReasoningTexts = []
|
||||
pendingReasoningFirstAt = null
|
||||
|
||||
// Flush accumulated attachments onto this user message.
|
||||
if (pendingAttachments.length > 0) {
|
||||
item.attachments = pendingAttachments
|
||||
pendingAttachments = []
|
||||
}
|
||||
}
|
||||
|
||||
items.push(item)
|
||||
seq++
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
function sumCostFromEvents(events: ClawEvent[]): number {
|
||||
let cost = 0
|
||||
for (const e of events) {
|
||||
if (e.type === 'agent.message' && e.costUsd) cost += e.costUsd
|
||||
}
|
||||
return cost
|
||||
}
|
||||
|
||||
function encodeHistoryCursor(input: {
|
||||
sessionKey: string
|
||||
end: number
|
||||
@@ -330,6 +527,25 @@ function decodeHistoryCursor(
|
||||
}
|
||||
}
|
||||
|
||||
export interface AgentOverview {
|
||||
agentId: string
|
||||
status: AgentLiveStatus
|
||||
latestMessage: string | null
|
||||
latestMessageAt: number | null
|
||||
activitySummary: string | null
|
||||
currentTool: string | null
|
||||
totalCostUsd: number
|
||||
sessionCount: number
|
||||
}
|
||||
|
||||
export interface DashboardResponse {
|
||||
agents: AgentOverview[]
|
||||
summary: {
|
||||
totalAgents: number
|
||||
totalCostUsd: number
|
||||
}
|
||||
}
|
||||
|
||||
export class OpenClawService {
|
||||
private runtime: ContainerRuntime
|
||||
private cliClient: OpenClawCliClient
|
||||
@@ -349,6 +565,8 @@ export class OpenClawService {
|
||||
private lastRecoveryReason: OpenClawGatewayRecoveryReason | null = null
|
||||
private stopLogTail: (() => void) | null = null
|
||||
private lifecycleLock: Promise<void> = Promise.resolve()
|
||||
private clawSession = new ClawSession()
|
||||
private observer = new OpenClawObserver(this.clawSession)
|
||||
|
||||
private _jsonlReader: OpenClawJsonlReader | null = null
|
||||
private get jsonlReader(): OpenClawJsonlReader {
|
||||
@@ -418,6 +636,18 @@ export class OpenClawService {
|
||||
return this.hostPort
|
||||
}
|
||||
|
||||
/** Subscribe to real-time agent status changes from the ClawSession state machine. */
|
||||
onAgentStatusChange(
|
||||
listener: (agentId: string, state: AgentSessionState) => void,
|
||||
): () => void {
|
||||
return this.clawSession.onStateChange(listener)
|
||||
}
|
||||
|
||||
/** Read the current ClawSession state for an agent (read-only snapshot). */
|
||||
getAgentState(agentId: string): AgentSessionState {
|
||||
return this.clawSession.getState(agentId)
|
||||
}
|
||||
|
||||
// ── Lifecycle ────────────────────────────────────────────────────────
|
||||
|
||||
async setup(input: SetupInput, onLog?: (msg: string) => void): Promise<void> {
|
||||
@@ -586,6 +816,7 @@ export class OpenClawService {
|
||||
return this.withLifecycleLock('stop', async () => {
|
||||
logger.info('Stopping OpenClaw service', { hostPort: this.hostPort })
|
||||
this.controlPlaneStatus = 'disconnected'
|
||||
this.observer.disconnect()
|
||||
this.stopGatewayLogTail()
|
||||
await this.runtime.stopGateway()
|
||||
logger.info('OpenClaw container stopped')
|
||||
@@ -659,6 +890,7 @@ export class OpenClawService {
|
||||
|
||||
async shutdown(): Promise<void> {
|
||||
this.controlPlaneStatus = 'disconnected'
|
||||
this.observer.disconnect()
|
||||
this.stopGatewayLogTail()
|
||||
try {
|
||||
await this.runtime.stopGateway()
|
||||
@@ -896,6 +1128,75 @@ export class OpenClawService {
|
||||
}
|
||||
}
|
||||
|
||||
// ── Dashboard ──────────────────────────────────────────────────────
|
||||
|
||||
getDashboard(): DashboardResponse {
|
||||
const agentIds = this.jsonlReader.listAgents()
|
||||
const agentOverviews: AgentOverview[] = []
|
||||
let totalCostUsd = 0
|
||||
|
||||
for (const agentId of agentIds) {
|
||||
const liveStatus = this.clawSession.getState(agentId)
|
||||
const sessions = this.jsonlReader.listSessions(agentId)
|
||||
|
||||
if (sessions.length === 0) {
|
||||
agentOverviews.push({
|
||||
agentId,
|
||||
status: liveStatus.status,
|
||||
latestMessage: null,
|
||||
latestMessageAt: null,
|
||||
activitySummary: null,
|
||||
currentTool: liveStatus.currentTool,
|
||||
totalCostUsd: 0,
|
||||
sessionCount: 0,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
const latestSession = sessions[0]
|
||||
// Read the latest session's JSONL once and derive everything from
|
||||
// the loaded events array — avoids re-reading the same file for
|
||||
// latestAgentMessage() and getSessionStats() individually.
|
||||
const events = this.jsonlReader.listBySession(agentId, latestSession.key)
|
||||
|
||||
let latestMsg: ClawEvent | undefined
|
||||
for (let i = events.length - 1; i >= 0; i--) {
|
||||
if (events[i]?.type === 'agent.message') {
|
||||
latestMsg = events[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Accumulate cost: derive from the already-loaded events for the
|
||||
// latest session, read remaining sessions separately.
|
||||
let agentCost = sumCostFromEvents(events)
|
||||
for (let i = 1; i < sessions.length; i++) {
|
||||
const stats = this.jsonlReader.getSessionStats(
|
||||
agentId,
|
||||
sessions[i]!.key,
|
||||
)
|
||||
agentCost += stats.totalCostUsd
|
||||
}
|
||||
totalCostUsd += agentCost
|
||||
|
||||
agentOverviews.push({
|
||||
agentId,
|
||||
status: liveStatus.status,
|
||||
latestMessage: latestMsg?.content?.slice(0, 200) ?? null,
|
||||
latestMessageAt: latestMsg?.createdAt ?? latestSession.updatedAt,
|
||||
activitySummary: summarizeToolActivity(events),
|
||||
currentTool: liveStatus.currentTool,
|
||||
totalCostUsd: agentCost,
|
||||
sessionCount: sessions.length,
|
||||
})
|
||||
}
|
||||
|
||||
return {
|
||||
agents: agentOverviews,
|
||||
summary: { totalAgents: agentIds.length, totalCostUsd },
|
||||
}
|
||||
}
|
||||
|
||||
// ── Chat Stream (HTTP) ───────────────────────────────────────────────
|
||||
|
||||
async chatStream(
|
||||
@@ -903,6 +1204,10 @@ export class OpenClawService {
|
||||
sessionKey: string,
|
||||
message: string,
|
||||
history: MonitoringChatTurn[] = [],
|
||||
options: {
|
||||
messageParts?: OpenClawChatContentPart[]
|
||||
signal?: AbortSignal
|
||||
} = {},
|
||||
): Promise<ReadableStream<OpenClawStreamEvent>> {
|
||||
await this.assertGatewayReady()
|
||||
const normalizedSessionKey = normalizeBrowserOSChatSessionKey(
|
||||
@@ -914,13 +1219,16 @@ export class OpenClawService {
|
||||
sessionKey: normalizedSessionKey,
|
||||
messageLength: message.length,
|
||||
historyLength: history.length,
|
||||
contentPartCount: options.messageParts?.length ?? 0,
|
||||
})
|
||||
return this.runControlPlaneCall(() =>
|
||||
this.httpClient.streamChat({
|
||||
agentId,
|
||||
sessionKey: normalizedSessionKey,
|
||||
message,
|
||||
messageParts: options.messageParts,
|
||||
history,
|
||||
signal: options.signal,
|
||||
}),
|
||||
)
|
||||
}
|
||||
@@ -1266,6 +1574,7 @@ export class OpenClawService {
|
||||
this.controlPlaneStatus = 'connected'
|
||||
this.lastGatewayError = null
|
||||
this.lastRecoveryReason = null
|
||||
this.ensureObserverConnected()
|
||||
return result
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
@@ -1277,6 +1586,19 @@ export class OpenClawService {
|
||||
}
|
||||
}
|
||||
|
||||
private ensureObserverConnected(): void {
|
||||
// Seed the ClawSession state machine from JSONL on first control plane
|
||||
// call. This gives every agent a correct initial status (working/idle)
|
||||
// before the WS observer has seen any events.
|
||||
if (!this.clawSession.isSeeded()) {
|
||||
this.clawSession.seedFromJsonl(this.jsonlReader)
|
||||
}
|
||||
|
||||
if (this.observer.isConnected()) return
|
||||
const url = `http://127.0.0.1:${this.hostPort}`
|
||||
this.observer.connect(url, this.token)
|
||||
}
|
||||
|
||||
private classifyControlPlaneError(
|
||||
error: unknown,
|
||||
): OpenClawGatewayRecoveryReason {
|
||||
@@ -1346,6 +1668,10 @@ export class OpenClawService {
|
||||
path: 'gateway.controlUi.allowInsecureAuth',
|
||||
value: true,
|
||||
},
|
||||
{
|
||||
path: 'gateway.controlUi.dangerouslyDisableDeviceAuth',
|
||||
value: true,
|
||||
},
|
||||
{
|
||||
path: 'gateway.controlUi.allowedOrigins',
|
||||
value: [
|
||||
|
||||
@@ -0,0 +1,61 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
import { getOpenClawService } from '../openclaw/openclaw-service'
|
||||
import { OutboundQueueService } from './outbound-queue-service'
|
||||
|
||||
let service: OutboundQueueService | null = null
|
||||
|
||||
/**
|
||||
* Lazy singleton — built on first access so the OpenClaw service is
|
||||
* already available. The queue subscribes to ClawSession state changes
|
||||
* via OpenClawService.onAgentStatusChange and dispatches through
|
||||
* OpenClawService.chatStream, so no extra wiring on the openclaw side.
|
||||
*/
|
||||
export function getOutboundQueueService(): OutboundQueueService {
|
||||
if (!service) {
|
||||
const openclaw = getOpenClawService()
|
||||
service = new OutboundQueueService({
|
||||
onAgentStatusChange: (listener) => openclaw.onAgentStatusChange(listener),
|
||||
getAgentState: (agentId) => openclaw.getAgentState(agentId),
|
||||
// Resolve the agent's existing user-chat session for queued sends
|
||||
// so we don't accidentally orphan the conversation by spawning a
|
||||
// fresh session per queued message. Only the very first message
|
||||
// for an agent (no prior session at all) falls back to a new key,
|
||||
// which mirrors what the existing /chat route does.
|
||||
resolveExistingSessionKey: (agentId) =>
|
||||
openclaw.resolveAgentSession(agentId).sessionKey ?? null,
|
||||
chatStream: ({
|
||||
agentId,
|
||||
sessionKey,
|
||||
message,
|
||||
history,
|
||||
messageParts,
|
||||
signal,
|
||||
}) =>
|
||||
openclaw.chatStream(agentId, sessionKey, message, history, {
|
||||
messageParts,
|
||||
signal,
|
||||
}),
|
||||
})
|
||||
}
|
||||
return service
|
||||
}
|
||||
|
||||
/** Tear down the singleton — wired into server shutdown. */
|
||||
export function shutdownOutboundQueueService(): void {
|
||||
if (service) {
|
||||
service.shutdown()
|
||||
service = null
|
||||
}
|
||||
}
|
||||
|
||||
export type {
|
||||
QueuedItem,
|
||||
QueuedItemAttachmentPreview,
|
||||
QueuedItemPublic,
|
||||
QueuedItemStatus,
|
||||
} from './outbound-queue-service'
|
||||
@@ -0,0 +1,289 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Per-agent FIFO queue of outbound chat messages. The user submits a
|
||||
* message via /claw/agents/:id/queue, the server holds it, and a worker
|
||||
* dispatches it through the existing chatStream path the moment the
|
||||
* agent's ClawSession status flips to idle.
|
||||
*
|
||||
* The queue lives in memory only — server restart loses pending items.
|
||||
* Persistence is a follow-up; the deliberate v1 trade-off is keeping the
|
||||
* dispatch reactive (single source of truth = ClawSession) and avoiding
|
||||
* a parallel store that could drift from the agent's actual state.
|
||||
*/
|
||||
|
||||
import { randomUUID } from 'node:crypto'
|
||||
import { logger } from '../../../lib/logger'
|
||||
import type {
|
||||
AgentSessionState,
|
||||
SessionStateListener,
|
||||
} from '../openclaw/claw-session'
|
||||
import type { OpenClawChatContentPart } from '../openclaw/openclaw-http-client'
|
||||
import type { OpenClawStreamEvent } from '../openclaw/openclaw-types'
|
||||
|
||||
export type QueuedItemStatus = 'queued' | 'dispatching' | 'failed'
|
||||
|
||||
export interface QueuedItemAttachmentPreview {
|
||||
kind: 'image' | 'file'
|
||||
mediaType: string
|
||||
name?: string
|
||||
}
|
||||
|
||||
export interface QueuedItem {
|
||||
id: string
|
||||
agentId: string
|
||||
/** Plain text body — what we send through chatStream's `message` arg. */
|
||||
message: string
|
||||
/** Multimodal parts when attachments are present. */
|
||||
messageParts?: OpenClawChatContentPart[]
|
||||
/** Compact preview the SSE feed broadcasts; never includes data URLs. */
|
||||
attachmentsPreview: QueuedItemAttachmentPreview[]
|
||||
sessionKey?: string
|
||||
history: Array<{ role: 'user' | 'assistant'; content: string }>
|
||||
status: QueuedItemStatus
|
||||
error?: string
|
||||
createdAt: number
|
||||
startedAt?: number
|
||||
}
|
||||
|
||||
/** Public projection sent over the SSE feed — strips heavy fields. */
|
||||
export interface QueuedItemPublic {
|
||||
id: string
|
||||
status: QueuedItemStatus
|
||||
message: string
|
||||
attachmentsPreview: QueuedItemAttachmentPreview[]
|
||||
error?: string
|
||||
createdAt: number
|
||||
startedAt?: number
|
||||
}
|
||||
|
||||
interface QueueListener {
|
||||
agentId: string
|
||||
send(items: QueuedItemPublic[]): void
|
||||
}
|
||||
|
||||
/** A "send" delegate — wraps OpenClawService.chatStream to avoid a hard dep. */
|
||||
export type ChatStreamFn = (input: {
|
||||
agentId: string
|
||||
sessionKey: string
|
||||
message: string
|
||||
history: QueuedItem['history']
|
||||
messageParts?: OpenClawChatContentPart[]
|
||||
signal?: AbortSignal
|
||||
}) => Promise<ReadableStream<OpenClawStreamEvent>>
|
||||
|
||||
interface OutboundQueueServiceDeps {
|
||||
/** Subscribe to per-agent status transitions from the ClawSession SM. */
|
||||
onAgentStatusChange(listener: SessionStateListener): () => void
|
||||
/** Read the current ClawSession state for an agent. */
|
||||
getAgentState(agentId: string): AgentSessionState
|
||||
/**
|
||||
* Look up the agent's existing user-chat sessionKey, if any. The worker
|
||||
* uses this to keep queued sends on the same conversation thread —
|
||||
* generating a fresh UUID per queued message would orphan the prior
|
||||
* conversation by spawning a brand-new session each time.
|
||||
*/
|
||||
resolveExistingSessionKey(agentId: string): string | null
|
||||
/** Send a chat — wraps OpenClawService.chatStream. */
|
||||
chatStream: ChatStreamFn
|
||||
}
|
||||
|
||||
export class OutboundQueueService {
|
||||
private readonly queues = new Map<string, QueuedItem[]>()
|
||||
private readonly listeners = new Set<QueueListener>()
|
||||
private readonly workerInflight = new Map<string, AbortController>()
|
||||
private unsubscribe: (() => void) | null = null
|
||||
|
||||
constructor(private readonly deps: OutboundQueueServiceDeps) {
|
||||
this.unsubscribe = deps.onAgentStatusChange((agentId, state) => {
|
||||
if (state.status === 'idle') void this.tryDispatch(agentId)
|
||||
})
|
||||
}
|
||||
|
||||
enqueue(
|
||||
item: Omit<QueuedItem, 'id' | 'status' | 'createdAt'> & { id?: string },
|
||||
): QueuedItem {
|
||||
// Caller-supplied ids let the browser keep its optimistic row and the
|
||||
// server snapshot reconciled on a single key — without that, SSE
|
||||
// can't dedupe the optimistic entry until the POST response lands
|
||||
// and the client learns the server-generated UUID.
|
||||
const list = this.queues.get(item.agentId) ?? []
|
||||
const id =
|
||||
item.id && !list.some((existing) => existing.id === item.id)
|
||||
? item.id
|
||||
: randomUUID()
|
||||
const queued: QueuedItem = {
|
||||
...item,
|
||||
id,
|
||||
status: 'queued',
|
||||
createdAt: Date.now(),
|
||||
}
|
||||
list.push(queued)
|
||||
this.queues.set(item.agentId, list)
|
||||
this.broadcast(item.agentId)
|
||||
void this.tryDispatch(item.agentId)
|
||||
return queued
|
||||
}
|
||||
|
||||
cancel(
|
||||
agentId: string,
|
||||
itemId: string,
|
||||
): { ok: true } | { ok: false; reason: 'not_found' | 'dispatching' } {
|
||||
const list = this.queues.get(agentId) ?? []
|
||||
const idx = list.findIndex((i) => i.id === itemId)
|
||||
if (idx < 0) return { ok: false, reason: 'not_found' }
|
||||
const target = list[idx]
|
||||
if (!target) return { ok: false, reason: 'not_found' }
|
||||
if (target.status === 'dispatching') {
|
||||
return { ok: false, reason: 'dispatching' }
|
||||
}
|
||||
list.splice(idx, 1)
|
||||
this.queues.set(agentId, list)
|
||||
this.broadcast(agentId)
|
||||
return { ok: true }
|
||||
}
|
||||
|
||||
retry(agentId: string, itemId: string): { ok: boolean } {
|
||||
const list = this.queues.get(agentId) ?? []
|
||||
const item = list.find((i) => i.id === itemId)
|
||||
if (!item || item.status !== 'failed') return { ok: false }
|
||||
item.status = 'queued'
|
||||
item.error = undefined
|
||||
this.broadcast(agentId)
|
||||
void this.tryDispatch(agentId)
|
||||
return { ok: true }
|
||||
}
|
||||
|
||||
list(agentId: string): QueuedItemPublic[] {
|
||||
const items = this.queues.get(agentId) ?? []
|
||||
return items.map(toPublic)
|
||||
}
|
||||
|
||||
/** Subscribe to per-agent queue state. Sends a snapshot immediately. */
|
||||
subscribe(
|
||||
agentId: string,
|
||||
send: (items: QueuedItemPublic[]) => void,
|
||||
): () => void {
|
||||
const listener: QueueListener = { agentId, send }
|
||||
this.listeners.add(listener)
|
||||
try {
|
||||
send(this.list(agentId))
|
||||
} catch {
|
||||
// best effort
|
||||
}
|
||||
return () => {
|
||||
this.listeners.delete(listener)
|
||||
}
|
||||
}
|
||||
|
||||
private broadcast(agentId: string): void {
|
||||
const snapshot = this.list(agentId)
|
||||
for (const listener of this.listeners) {
|
||||
if (listener.agentId !== agentId) continue
|
||||
try {
|
||||
listener.send(snapshot)
|
||||
} catch {
|
||||
// ignore — broken listeners GC themselves on next subscribe attempt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async tryDispatch(agentId: string): Promise<void> {
|
||||
if (this.workerInflight.has(agentId)) return
|
||||
const list = this.queues.get(agentId) ?? []
|
||||
const head = list.find((i) => i.status === 'queued')
|
||||
if (!head) return
|
||||
|
||||
// Don't fire if the agent isn't actually idle yet — even if the
|
||||
// listener happened to call us early during a state transition.
|
||||
const state = this.deps.getAgentState(agentId)
|
||||
if (state.status === 'working') return
|
||||
|
||||
head.status = 'dispatching'
|
||||
head.startedAt = Date.now()
|
||||
this.broadcast(agentId)
|
||||
|
||||
const abort = new AbortController()
|
||||
this.workerInflight.set(agentId, abort)
|
||||
|
||||
try {
|
||||
// Resolution order: explicit sessionKey on the queued item ➜
|
||||
// the agent's existing user-chat session ➜ a fresh UUID for the
|
||||
// first-ever message. This prevents the queue from inadvertently
|
||||
// splintering an active conversation into a new session.
|
||||
const targetSessionKey =
|
||||
head.sessionKey ??
|
||||
this.deps.resolveExistingSessionKey(agentId) ??
|
||||
randomUUID()
|
||||
const stream = await this.deps.chatStream({
|
||||
agentId,
|
||||
sessionKey: targetSessionKey,
|
||||
message: head.message,
|
||||
history: head.history,
|
||||
messageParts: head.messageParts,
|
||||
signal: abort.signal,
|
||||
})
|
||||
// Drain the stream to completion so the gateway run finalizes
|
||||
// properly (writes the JSONL turn, releases the run controller).
|
||||
const reader = stream.getReader()
|
||||
try {
|
||||
while (true) {
|
||||
if (abort.signal.aborted) break
|
||||
const { done } = await reader.read()
|
||||
if (done) break
|
||||
}
|
||||
} finally {
|
||||
await reader.cancel().catch(() => {})
|
||||
}
|
||||
this.removeAndBroadcast(agentId, head.id)
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : String(err)
|
||||
logger.warn('OutboundQueue dispatch failed', {
|
||||
agentId,
|
||||
itemId: head.id,
|
||||
error: message,
|
||||
})
|
||||
head.status = 'failed'
|
||||
head.error = message
|
||||
this.broadcast(agentId)
|
||||
} finally {
|
||||
this.workerInflight.delete(agentId)
|
||||
}
|
||||
|
||||
// If anything else is still queued and the agent's still idle, drain
|
||||
// it now without waiting for the next state-change callback.
|
||||
void this.tryDispatch(agentId)
|
||||
}
|
||||
|
||||
private removeAndBroadcast(agentId: string, itemId: string): void {
|
||||
const list = this.queues.get(agentId) ?? []
|
||||
this.queues.set(
|
||||
agentId,
|
||||
list.filter((i) => i.id !== itemId),
|
||||
)
|
||||
this.broadcast(agentId)
|
||||
}
|
||||
|
||||
shutdown(): void {
|
||||
this.unsubscribe?.()
|
||||
this.unsubscribe = null
|
||||
for (const abort of this.workerInflight.values()) abort.abort()
|
||||
this.workerInflight.clear()
|
||||
this.listeners.clear()
|
||||
this.queues.clear()
|
||||
}
|
||||
}
|
||||
|
||||
function toPublic(item: QueuedItem): QueuedItemPublic {
|
||||
return {
|
||||
id: item.id,
|
||||
status: item.status,
|
||||
message: item.message,
|
||||
attachmentsPreview: item.attachmentsPreview,
|
||||
error: item.error,
|
||||
createdAt: item.createdAt,
|
||||
startedAt: item.startedAt,
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import {
|
||||
configureVmRuntime,
|
||||
getOpenClawService,
|
||||
} from './api/services/openclaw/openclaw-service'
|
||||
import { shutdownOutboundQueueService } from './api/services/queue'
|
||||
import { CdpBackend } from './browser/backends/cdp'
|
||||
import { Browser } from './browser/browser'
|
||||
import type { ServerConfig } from './config'
|
||||
@@ -144,6 +145,7 @@ export class Application {
|
||||
stop(reason?: string): void {
|
||||
logger.info('Shutting down server...', { reason })
|
||||
stopSkillSync()
|
||||
shutdownOutboundQueueService()
|
||||
getOpenClawService()
|
||||
.shutdown()
|
||||
.catch(() => {})
|
||||
|
||||
@@ -70,6 +70,56 @@ export class MonitoringService {
|
||||
return this.registry.getActive(agentId)
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve when no monitoring session is active for `agentId`. Used by the
|
||||
* chat route to gate user-chat sends behind any in-flight cron / hook turn
|
||||
* without rejecting the client outright.
|
||||
*
|
||||
* Resolves immediately if the agent is already free. Otherwise registers
|
||||
* a one-shot listener on the session-end event and resolves when it
|
||||
* fires. Rejects with a TimeoutError-shaped Error after `timeoutMs`.
|
||||
*/
|
||||
async waitForSessionFree(
|
||||
agentId: string,
|
||||
options: { timeoutMs?: number } = {},
|
||||
): Promise<void> {
|
||||
if (!this.registry.getActive(agentId)) return
|
||||
|
||||
const timeoutMs = options.timeoutMs ?? 30_000
|
||||
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
let timer: ReturnType<typeof setTimeout> | null = null
|
||||
let unsubscribe: (() => void) | null = null
|
||||
|
||||
const cleanup = () => {
|
||||
if (timer) clearTimeout(timer)
|
||||
unsubscribe?.()
|
||||
}
|
||||
|
||||
timer = setTimeout(() => {
|
||||
cleanup()
|
||||
reject(
|
||||
new Error(
|
||||
`Timed out waiting for agent "${agentId}" to become free after ${timeoutMs}ms`,
|
||||
),
|
||||
)
|
||||
}, timeoutMs)
|
||||
|
||||
unsubscribe = this.registry.onSessionEnd(agentId, () => {
|
||||
if (this.registry.getActive(agentId)) return
|
||||
cleanup()
|
||||
resolve()
|
||||
})
|
||||
|
||||
// Re-check after listener registration to close a race where the
|
||||
// session ended between the initial getActive() and the subscribe.
|
||||
if (!this.registry.getActive(agentId)) {
|
||||
cleanup()
|
||||
resolve()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
resolveSessionForMcpRequest(
|
||||
explicitAgentId?: string,
|
||||
): { agentId: string; monitoringSessionId: string } | undefined {
|
||||
|
||||
@@ -5,11 +5,17 @@ interface ActiveMonitoringSession {
|
||||
source: MonitoringSessionContext['source']
|
||||
}
|
||||
|
||||
type SessionEndListener = () => void
|
||||
|
||||
export class MonitoringSessionRegistry {
|
||||
private readonly activeSessionsByAgent = new Map<
|
||||
string,
|
||||
ActiveMonitoringSession
|
||||
>()
|
||||
private readonly endListenersByAgent = new Map<
|
||||
string,
|
||||
Set<SessionEndListener>
|
||||
>()
|
||||
|
||||
setActive(
|
||||
agentId: string,
|
||||
@@ -19,6 +25,28 @@ export class MonitoringSessionRegistry {
|
||||
this.activeSessionsByAgent.set(agentId, { monitoringSessionId, source })
|
||||
}
|
||||
|
||||
/**
|
||||
* Subscribe to "session ended for this agent" events. The listener fires
|
||||
* once per termination — `clearIfMatches` is the only place that drops an
|
||||
* active session, so each clear notifies all current listeners. Returns an
|
||||
* unsubscribe function. Used by `waitForSessionFree` to gate user-chat
|
||||
* sends behind in-flight cron / hook turns without polling.
|
||||
*/
|
||||
onSessionEnd(agentId: string, listener: SessionEndListener): () => void {
|
||||
let listeners = this.endListenersByAgent.get(agentId)
|
||||
if (!listeners) {
|
||||
listeners = new Set()
|
||||
this.endListenersByAgent.set(agentId, listeners)
|
||||
}
|
||||
listeners.add(listener)
|
||||
return () => {
|
||||
listeners?.delete(listener)
|
||||
if (listeners && listeners.size === 0) {
|
||||
this.endListenersByAgent.delete(agentId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
getActive(agentId: string): string | undefined {
|
||||
return this.activeSessionsByAgent.get(agentId)?.monitoringSessionId
|
||||
}
|
||||
@@ -64,5 +92,16 @@ export class MonitoringSessionRegistry {
|
||||
return
|
||||
}
|
||||
this.activeSessionsByAgent.delete(agentId)
|
||||
const listeners = this.endListenersByAgent.get(agentId)
|
||||
if (listeners) {
|
||||
// Snapshot the set: listeners commonly unsubscribe themselves inside
|
||||
// their own callback (one-shot waiters), which would mutate the live
|
||||
// set mid-iteration.
|
||||
for (const listener of [...listeners]) {
|
||||
try {
|
||||
listener()
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,325 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 BrowserOS
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*
|
||||
* Maps raw tool names + arguments to human-readable activity labels for
|
||||
* the chat UI activity view. The MCP ToolRegistry is the source of truth
|
||||
* for tool *existence*; this file is the editorial layer that turns
|
||||
* snake_case identifiers into agent-speak verbs.
|
||||
*/
|
||||
|
||||
const VERB_OVERRIDES: Record<string, string> = {
|
||||
// Navigation
|
||||
navigate_page: 'Navigated to',
|
||||
new_page: 'Opened tab',
|
||||
new_hidden_page: 'Opened tab',
|
||||
show_page: 'Showed tab',
|
||||
close_page: 'Closed tab',
|
||||
list_pages: 'Listed open tabs',
|
||||
get_active_page: 'Got active tab',
|
||||
move_page: 'Moved tab',
|
||||
group_tabs: 'Grouped tabs',
|
||||
|
||||
// Page reading
|
||||
take_snapshot: 'Captured page snapshot',
|
||||
take_enhanced_snapshot: 'Captured detailed snapshot',
|
||||
get_page_content: 'Read page content',
|
||||
get_page_links: 'Extracted page links',
|
||||
get_dom: 'Read page DOM',
|
||||
search_dom: 'Searched page DOM',
|
||||
take_screenshot: 'Took screenshot',
|
||||
|
||||
// Input
|
||||
click: 'Clicked',
|
||||
click_at: 'Clicked at coordinates',
|
||||
hover: 'Hovered',
|
||||
hover_at: 'Hovered at coordinates',
|
||||
type_at: 'Typed at coordinates',
|
||||
drag_at: 'Dragged',
|
||||
focus: 'Focused element',
|
||||
fill: 'Filled field',
|
||||
clear: 'Cleared field',
|
||||
check: 'Checked box',
|
||||
uncheck: 'Unchecked box',
|
||||
press_key: 'Pressed key',
|
||||
upload_file: 'Uploaded file',
|
||||
|
||||
// Console / scripts
|
||||
evaluate_script: 'Ran script',
|
||||
get_console_logs: 'Read console logs',
|
||||
|
||||
// History / bookmarks
|
||||
search_history: 'Searched history',
|
||||
get_recent_history: 'Read recent history',
|
||||
delete_history_url: 'Deleted history entry',
|
||||
delete_history_range: 'Deleted history range',
|
||||
get_bookmarks: 'Listed bookmarks',
|
||||
create_bookmark: 'Created bookmark',
|
||||
remove_bookmark: 'Removed bookmark',
|
||||
update_bookmark: 'Updated bookmark',
|
||||
move_bookmark: 'Moved bookmark',
|
||||
search_bookmarks: 'Searched bookmarks',
|
||||
|
||||
// Filesystem (sandboxed)
|
||||
read_file: 'Read file',
|
||||
write_file: 'Wrote file',
|
||||
find_files: 'Searched files',
|
||||
|
||||
// Memory
|
||||
read_soul: 'Read soul memory',
|
||||
read_core: 'Read core memory',
|
||||
write_memory: 'Wrote memory',
|
||||
search_memory: 'Searched memory',
|
||||
update_soul: 'Updated soul memory',
|
||||
update_core: 'Updated core memory',
|
||||
|
||||
// Web
|
||||
web_search: 'Searched the web',
|
||||
web_fetch: 'Fetched URL',
|
||||
|
||||
// Klavis / external apps (Strata)
|
||||
connector_mcp_servers: 'Listed connected apps',
|
||||
discover_server_categories_or_actions: 'Browsed available actions',
|
||||
get_category_actions: 'Listed actions',
|
||||
get_action_details: 'Looked up action',
|
||||
execute_action: 'Ran external action',
|
||||
search_documentation: 'Searched docs',
|
||||
handle_auth_failure: 'Handled auth issue',
|
||||
|
||||
// Suggestions
|
||||
suggest_schedule: 'Suggested schedule',
|
||||
suggest_app_connection: 'Suggested app connect',
|
||||
|
||||
// BrowserOS info
|
||||
browseros_info: 'Read BrowserOS info',
|
||||
|
||||
// Windows
|
||||
list_windows: 'Listed windows',
|
||||
focus_window: 'Focused window',
|
||||
close_window: 'Closed window',
|
||||
create_window: 'Created window',
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────
|
||||
// Helpers
|
||||
// ──────────────────────────────────────────────────────────────────────
|
||||
|
||||
function asString(value: unknown): string | undefined {
|
||||
return typeof value === 'string' && value.length > 0 ? value : undefined
|
||||
}
|
||||
|
||||
function stringField(
|
||||
input: Record<string, unknown>,
|
||||
...keys: string[]
|
||||
): string | undefined {
|
||||
for (const k of keys) {
|
||||
const v = asString(input[k])
|
||||
if (v) return v
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
function truncate(text: string | undefined, max: number): string | undefined {
|
||||
if (!text) return undefined
|
||||
return text.length > max ? `${text.slice(0, max - 1)}…` : text
|
||||
}
|
||||
|
||||
function quote(value: string | undefined): string | undefined {
|
||||
if (!value) return undefined
|
||||
return `"${truncate(value, 60)}"`
|
||||
}
|
||||
|
||||
function basename(path: string | undefined): string | undefined {
|
||||
if (!path) return undefined
|
||||
const parts = path.split(/[/\\]/).filter(Boolean)
|
||||
return parts[parts.length - 1] ?? path
|
||||
}
|
||||
|
||||
function formatUrl(value: unknown): string | undefined {
|
||||
const url = asString(value)
|
||||
if (!url) return undefined
|
||||
try {
|
||||
const parsed = new URL(url)
|
||||
const host = parsed.host
|
||||
const path = parsed.pathname === '/' ? '' : parsed.pathname
|
||||
const display = path && path.length > 0 ? `${host}${path}` : host
|
||||
return truncate(display, 60)
|
||||
} catch {
|
||||
return truncate(url, 60)
|
||||
}
|
||||
}
|
||||
|
||||
function coords(x: unknown, y: unknown): string | undefined {
|
||||
if (typeof x === 'number' && typeof y === 'number') {
|
||||
return `${Math.round(x)}, ${Math.round(y)}`
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────
|
||||
// Subject extractors
|
||||
// ──────────────────────────────────────────────────────────────────────
|
||||
|
||||
type SubjectExtractor = (input: Record<string, unknown>) => string | undefined
|
||||
|
||||
const SUBJECT_EXTRACTORS: Record<string, SubjectExtractor> = {
|
||||
// URL-bearing tools
|
||||
new_page: (i) => formatUrl(i.url),
|
||||
new_hidden_page: (i) => formatUrl(i.url),
|
||||
navigate_page: (i) => {
|
||||
const action = asString(i.action)
|
||||
if (action === 'back') return 'back'
|
||||
if (action === 'forward') return 'forward'
|
||||
if (action === 'reload') return 'reload'
|
||||
return formatUrl(i.url)
|
||||
},
|
||||
web_fetch: (i) => formatUrl(i.url),
|
||||
|
||||
// Search queries
|
||||
web_search: (i) => quote(stringField(i, 'query', 'q')),
|
||||
search_history: (i) => quote(stringField(i, 'query', 'text')),
|
||||
search_bookmarks: (i) => quote(stringField(i, 'query', 'text')),
|
||||
search_memory: (i) => quote(stringField(i, 'query', 'q')),
|
||||
search_dom: (i) => quote(stringField(i, 'query', 'selector')),
|
||||
search_documentation: (i) => quote(stringField(i, 'query', 'q')),
|
||||
find_files: (i) => quote(stringField(i, 'pattern', 'query')),
|
||||
|
||||
// Element interactions
|
||||
click: (i) => stringField(i, 'element'),
|
||||
hover: (i) => stringField(i, 'element'),
|
||||
focus: (i) => stringField(i, 'element'),
|
||||
clear: (i) => stringField(i, 'element'),
|
||||
check: (i) => stringField(i, 'element'),
|
||||
uncheck: (i) => stringField(i, 'element'),
|
||||
fill: (i) => {
|
||||
const target = stringField(i, 'element')
|
||||
const text = stringField(i, 'text')
|
||||
if (target && text) return `${target}: ${truncate(text, 40)}`
|
||||
return target ?? truncate(text, 40)
|
||||
},
|
||||
press_key: (i) => stringField(i, 'key'),
|
||||
|
||||
// Coordinate-based input
|
||||
click_at: (i) => coords(i.x, i.y),
|
||||
hover_at: (i) => coords(i.x, i.y),
|
||||
type_at: (i) => {
|
||||
const at = coords(i.x, i.y)
|
||||
const text = stringField(i, 'text')
|
||||
if (at && text) return `${at}: ${truncate(text, 40)}`
|
||||
return at ?? truncate(text, 40)
|
||||
},
|
||||
drag_at: (i) => {
|
||||
const from = coords(i.fromX, i.fromY)
|
||||
const to = coords(i.toX, i.toY)
|
||||
if (from && to) return `${from} → ${to}`
|
||||
return from ?? to
|
||||
},
|
||||
|
||||
// Tab management
|
||||
show_page: (i) => {
|
||||
const page = i.page
|
||||
return typeof page === 'number' ? `tab ${page}` : asString(page)
|
||||
},
|
||||
close_page: (i) => {
|
||||
const page = i.page
|
||||
return typeof page === 'number' ? `tab ${page}` : asString(page)
|
||||
},
|
||||
move_page: (i) => {
|
||||
const page = i.page
|
||||
return typeof page === 'number' ? `tab ${page}` : asString(page)
|
||||
},
|
||||
|
||||
// Page reads (take_snapshot, take_enhanced_snapshot, get_page_content,
|
||||
// get_page_links, get_dom, take_screenshot) intentionally omit a
|
||||
// subject — the only argument is a numeric page ID that's internal
|
||||
// to the agent and meaningless to the user ("tab 4" tells them nothing).
|
||||
// The verb alone communicates what happened.
|
||||
|
||||
// External actions via Strata
|
||||
execute_action: (i) => {
|
||||
const server = stringField(i, 'server_name')
|
||||
const action = stringField(i, 'action_name')
|
||||
if (server && action) return `${server} · ${action}`
|
||||
return action ?? server
|
||||
},
|
||||
get_category_actions: (i) => stringField(i, 'category_name', 'server_name'),
|
||||
get_action_details: (i) => stringField(i, 'action_name'),
|
||||
discover_server_categories_or_actions: (i) =>
|
||||
stringField(i, 'server_name', 'category_name'),
|
||||
connector_mcp_servers: (i) => stringField(i, 'server_name'),
|
||||
|
||||
// Filesystem
|
||||
read_file: (i) => basename(stringField(i, 'path')),
|
||||
write_file: (i) => basename(stringField(i, 'path')),
|
||||
|
||||
// Memory writes — show first chars of content
|
||||
write_memory: (i) => truncate(stringField(i, 'content', 'text'), 40),
|
||||
update_soul: (i) => truncate(stringField(i, 'content'), 40),
|
||||
update_core: (i) => truncate(stringField(i, 'content'), 40),
|
||||
|
||||
// Bookmarks
|
||||
create_bookmark: (i) => stringField(i, 'title') ?? formatUrl(i.url),
|
||||
remove_bookmark: (i) => stringField(i, 'id', 'title'),
|
||||
update_bookmark: (i) => stringField(i, 'id', 'title'),
|
||||
move_bookmark: (i) => stringField(i, 'id', 'title'),
|
||||
|
||||
// History
|
||||
delete_history_url: (i) => formatUrl(i.url),
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────
|
||||
// Public API
|
||||
// ──────────────────────────────────────────────────────────────────────
|
||||
|
||||
export interface ToolLabelResult {
|
||||
label: string
|
||||
subject?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Strip MCP namespace prefixes (e.g. "browseros__", "mcp_") to find the
|
||||
* canonical tool name used in the override maps.
|
||||
*/
|
||||
function canonicalName(rawName: string): string {
|
||||
return rawName.replace(/^browseros__/, '').replace(/^mcp_/, '')
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a snake_case tool name into Sentence-case English as a fallback
|
||||
* when no curated override exists.
|
||||
*/
|
||||
function humanizeToolName(rawName: string): string {
|
||||
const stripped = canonicalName(rawName)
|
||||
const words = stripped.split(/[_-]/).filter((w) => w.length > 0)
|
||||
if (words.length === 0) return rawName
|
||||
const first = words[0]!
|
||||
return [
|
||||
first.charAt(0).toUpperCase() + first.slice(1),
|
||||
...words.slice(1),
|
||||
].join(' ')
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a human-readable label and subject string for a tool call,
|
||||
* suitable for rendering in the chat activity view.
|
||||
*/
|
||||
export function buildToolLabel(
|
||||
rawName: string,
|
||||
input?: Record<string, unknown>,
|
||||
): ToolLabelResult {
|
||||
const canonical = canonicalName(rawName)
|
||||
const label =
|
||||
VERB_OVERRIDES[canonical] ??
|
||||
VERB_OVERRIDES[rawName] ??
|
||||
humanizeToolName(rawName)
|
||||
|
||||
const extractor = Object.hasOwn(SUBJECT_EXTRACTORS, canonical)
|
||||
? SUBJECT_EXTRACTORS[canonical]
|
||||
: Object.hasOwn(SUBJECT_EXTRACTORS, rawName)
|
||||
? SUBJECT_EXTRACTORS[rawName]
|
||||
: undefined
|
||||
const subject = extractor && input ? extractor(input) : undefined
|
||||
|
||||
return { label, subject }
|
||||
}
|
||||
@@ -16,6 +16,9 @@ describe('createOpenClawRoutes', () => {
|
||||
const actualOpenClawService = await import(
|
||||
'../../../src/api/services/openclaw/openclaw-service'
|
||||
)
|
||||
const actualMonitoringService = await import(
|
||||
'../../../src/monitoring/service'
|
||||
)
|
||||
const chatStream = mock(
|
||||
async () =>
|
||||
new ReadableStream({
|
||||
@@ -41,6 +44,24 @@ describe('createOpenClawRoutes', () => {
|
||||
}) as never,
|
||||
}))
|
||||
|
||||
mock.module('../../../src/monitoring/service', () => ({
|
||||
...actualMonitoringService,
|
||||
getMonitoringService: () =>
|
||||
({
|
||||
waitForSessionFree: async () => undefined,
|
||||
startSession: async () => ({
|
||||
monitoringSessionId: 'm-1',
|
||||
agentId: 'research',
|
||||
sessionKey: 'session-123',
|
||||
originalPrompt: 'hi',
|
||||
chatHistory: [],
|
||||
startedAt: new Date().toISOString(),
|
||||
source: 'openclaw-agent-chat' as const,
|
||||
}),
|
||||
finalizeSession: async () => undefined,
|
||||
}) as never,
|
||||
}))
|
||||
|
||||
const { createOpenClawRoutes } = await import(
|
||||
'../../../src/api/routes/openclaw'
|
||||
)
|
||||
@@ -59,7 +80,15 @@ describe('createOpenClawRoutes', () => {
|
||||
expect(response.status).toBe(200)
|
||||
expect(response.headers.get('Content-Type')).toContain('text/event-stream')
|
||||
expect(response.headers.get('X-Session-Key')).toBe('session-123')
|
||||
expect(chatStream).toHaveBeenCalledWith('research', 'session-123', 'hi', [])
|
||||
expect(chatStream).toHaveBeenCalledWith(
|
||||
'research',
|
||||
'session-123',
|
||||
'hi',
|
||||
[],
|
||||
{
|
||||
messageParts: undefined,
|
||||
},
|
||||
)
|
||||
expect(await response.text()).toBe(
|
||||
'data: {"type":"text-delta","data":{"text":"Hello"}}\n\n' +
|
||||
'data: {"type":"done","data":{"text":"Hello"}}\n\n' +
|
||||
@@ -71,6 +100,9 @@ describe('createOpenClawRoutes', () => {
|
||||
const actualOpenClawService = await import(
|
||||
'../../../src/api/services/openclaw/openclaw-service'
|
||||
)
|
||||
const actualMonitoringService = await import(
|
||||
'../../../src/monitoring/service'
|
||||
)
|
||||
const chatStream = mock(
|
||||
async () =>
|
||||
new ReadableStream({
|
||||
@@ -92,6 +124,24 @@ describe('createOpenClawRoutes', () => {
|
||||
}) as never,
|
||||
}))
|
||||
|
||||
mock.module('../../../src/monitoring/service', () => ({
|
||||
...actualMonitoringService,
|
||||
getMonitoringService: () =>
|
||||
({
|
||||
waitForSessionFree: async () => undefined,
|
||||
startSession: async () => ({
|
||||
monitoringSessionId: 'm-2',
|
||||
agentId: 'research',
|
||||
sessionKey: 'session-456',
|
||||
originalPrompt: 'Summarize what is blocked',
|
||||
chatHistory: [],
|
||||
startedAt: new Date().toISOString(),
|
||||
source: 'openclaw-agent-chat' as const,
|
||||
}),
|
||||
finalizeSession: async () => undefined,
|
||||
}) as never,
|
||||
}))
|
||||
|
||||
const { createOpenClawRoutes } = await import(
|
||||
'../../../src/api/routes/openclaw'
|
||||
)
|
||||
@@ -117,10 +167,11 @@ describe('createOpenClawRoutes', () => {
|
||||
'session-456',
|
||||
'Summarize what is blocked',
|
||||
history,
|
||||
{ messageParts: undefined },
|
||||
)
|
||||
})
|
||||
|
||||
it('rejects concurrent monitored chat requests for the same agent', async () => {
|
||||
it('returns 503 when waitForSessionFree times out for a busy agent', async () => {
|
||||
const actualOpenClawService = await import(
|
||||
'../../../src/api/services/openclaw/openclaw-service'
|
||||
)
|
||||
@@ -141,8 +192,11 @@ describe('createOpenClawRoutes', () => {
|
||||
...actualMonitoringService,
|
||||
getMonitoringService: () =>
|
||||
({
|
||||
getActiveSessionId: (agentId: string) =>
|
||||
agentId === 'research' ? 'existing-run' : undefined,
|
||||
waitForSessionFree: async () => {
|
||||
throw new Error(
|
||||
'Timed out waiting for agent "research" to become free after 30000ms',
|
||||
)
|
||||
},
|
||||
}) as never,
|
||||
}))
|
||||
|
||||
@@ -160,11 +214,11 @@ describe('createOpenClawRoutes', () => {
|
||||
}),
|
||||
})
|
||||
|
||||
expect(response.status).toBe(409)
|
||||
expect(response.status).toBe(503)
|
||||
expect(chatStream).not.toHaveBeenCalled()
|
||||
expect(await response.json()).toEqual({
|
||||
error:
|
||||
'A monitored chat session is already active for this agent. Wait for it to finish before starting another.',
|
||||
'Timed out waiting for agent "research" to become free after 30000ms',
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
"dev:watch:new": "./tools/dev/run.sh watch --new",
|
||||
"dev:manual": "./tools/dev/run.sh watch --manual",
|
||||
"dev:setup": "./tools/dev/setup.sh",
|
||||
"install:balpha": "make -C tools/alpha install",
|
||||
"test:env": "./tools/dev/run.sh test",
|
||||
"test:cleanup": "./tools/dev/run.sh cleanup",
|
||||
"start:server": "bun run --filter @browseros/server --elide-lines=0 start",
|
||||
|
||||
21
packages/browseros-agent/tools/alpha/Makefile
Normal file
21
packages/browseros-agent/tools/alpha/Makefile
Normal file
@@ -0,0 +1,21 @@
|
||||
BINARY := balpha
|
||||
SOURCES := $(shell find . -name '*.go') go.mod go.sum
|
||||
PREFIX ?= $(HOME)/bin
|
||||
|
||||
$(BINARY): $(SOURCES)
|
||||
@echo "[build] Compiling $(BINARY)..."
|
||||
@go build -o $(BINARY) .
|
||||
|
||||
.PHONY: install test clean
|
||||
|
||||
install: $(BINARY)
|
||||
@mkdir -p $(PREFIX)
|
||||
@cp $(BINARY) $(PREFIX)/$(BINARY)
|
||||
@codesign --force --sign - $(PREFIX)/$(BINARY) >/dev/null 2>&1 || true
|
||||
@echo "Installed $(PREFIX)/$(BINARY)"
|
||||
|
||||
test:
|
||||
@go test ./...
|
||||
|
||||
clean:
|
||||
@rm -f $(BINARY)
|
||||
156
packages/browseros-agent/tools/alpha/README.md
Normal file
156
packages/browseros-agent/tools/alpha/README.md
Normal file
@@ -0,0 +1,156 @@
|
||||
# balpha
|
||||
|
||||
Internal BrowserOS alpha dogfooding CLI for running the current checkout against a copied BrowserOS profile.
|
||||
|
||||
## What It Does
|
||||
|
||||
`balpha` starts a local BrowserOS dogfooding environment:
|
||||
|
||||
- Uses the BrowserOS repo path from config, then works from `packages/browseros-agent`.
|
||||
- Copies one installed BrowserOS profile into a separate dev profile under `~/.config/balpha/profile`.
|
||||
- Writes `apps/server/.env.production` and `apps/cli/.env.production` from config.
|
||||
- Runs the existing `tools/dev/setup.sh` setup flow.
|
||||
- Builds the WXT dev extension.
|
||||
- Launches `/Applications/BrowserOS.app` with the dev profile, the local extension, and the built-in server disabled.
|
||||
- Starts the local Bun server from `apps/server`.
|
||||
|
||||
It does not auto-pull on `start`. Use `balpha pull` when you want to refresh the checkout.
|
||||
|
||||
## Requirements
|
||||
|
||||
- macOS.
|
||||
- Go.
|
||||
- Bun.
|
||||
- BrowserOS installed at `/Applications/BrowserOS.app`.
|
||||
- A BrowserOS monorepo checkout, for example `~/code/browseros-project/browseros-test`.
|
||||
- `~/bin` or your chosen install directory on `PATH`.
|
||||
|
||||
## Install
|
||||
|
||||
From the BrowserOS monorepo root:
|
||||
|
||||
```bash
|
||||
cd packages/browseros-agent
|
||||
bun run install:balpha
|
||||
```
|
||||
|
||||
This builds `tools/alpha/balpha` and installs it to `~/bin/balpha`.
|
||||
|
||||
To install somewhere else:
|
||||
|
||||
```bash
|
||||
cd packages/browseros-agent/tools/alpha
|
||||
make install PREFIX=/usr/local/bin
|
||||
```
|
||||
|
||||
Check the binary:
|
||||
|
||||
```bash
|
||||
balpha --help
|
||||
```
|
||||
|
||||
## First-Time Setup
|
||||
|
||||
Run:
|
||||
|
||||
```bash
|
||||
balpha init
|
||||
```
|
||||
|
||||
`init` asks for:
|
||||
|
||||
- `Repo path`: the BrowserOS monorepo root, not `packages/browseros-agent`.
|
||||
- `BrowserOS binary`: defaults to `/Applications/BrowserOS.app/Contents/MacOS/BrowserOS`.
|
||||
- `Source profile`: selected from the installed BrowserOS profiles in `~/Library/Application Support/BrowserOS`.
|
||||
|
||||
Config is written to:
|
||||
|
||||
```text
|
||||
~/.config/balpha/config.yaml
|
||||
```
|
||||
|
||||
The dev profile defaults to:
|
||||
|
||||
```text
|
||||
~/.config/balpha/profile
|
||||
```
|
||||
|
||||
`init` also writes the generated production env files in the configured checkout.
|
||||
|
||||
## Start
|
||||
|
||||
```bash
|
||||
balpha start
|
||||
```
|
||||
|
||||
Each start:
|
||||
|
||||
- Warns if the configured checkout has uncommitted changes.
|
||||
- Imports the BrowserOS profile if the dev profile does not exist.
|
||||
- Rewrites production env files from config.
|
||||
- Auto-increments busy ports and saves the resolved values back to config.
|
||||
- Runs `tools/dev/setup.sh`.
|
||||
- Builds the WXT extension.
|
||||
- Starts BrowserOS and the local Bun server.
|
||||
|
||||
Use this when you want to refresh the copied profile before launching:
|
||||
|
||||
```bash
|
||||
balpha start --refresh-profile
|
||||
```
|
||||
|
||||
Use this for a headless launch:
|
||||
|
||||
```bash
|
||||
balpha start --headless
|
||||
```
|
||||
|
||||
Stop the environment with `Ctrl+C`.
|
||||
|
||||
## Update The Checkout
|
||||
|
||||
`balpha start` intentionally does not pull. To update the configured repo:
|
||||
|
||||
```bash
|
||||
balpha pull
|
||||
```
|
||||
|
||||
If the checkout has uncommitted changes, `pull` fails. To pull anyway:
|
||||
|
||||
```bash
|
||||
balpha pull --force
|
||||
```
|
||||
|
||||
## Refresh The Copied Profile
|
||||
|
||||
To overwrite the dev profile from the selected installed BrowserOS profile:
|
||||
|
||||
```bash
|
||||
balpha refresh-profile
|
||||
```
|
||||
|
||||
This removes and recreates `dev_user_data_dir`. It refuses to run if the dev user-data dir is the real BrowserOS user-data dir or lives inside it.
|
||||
|
||||
## Edit Config
|
||||
|
||||
```bash
|
||||
balpha config edit
|
||||
```
|
||||
|
||||
Important fields:
|
||||
|
||||
- `repo_path`: BrowserOS monorepo root.
|
||||
- `browseros_app_path`: BrowserOS executable to launch.
|
||||
- `source_user_data_dir`: installed BrowserOS user-data dir. Defaults to `~/Library/Application Support/BrowserOS`.
|
||||
- `source_profile_dir`: installed profile directory to copy.
|
||||
- `dev_user_data_dir`: separate dev user-data dir. Defaults to `~/.config/balpha/profile`.
|
||||
- `dev_profile_dir`: dev profile directory. Defaults to `Default`.
|
||||
- `ports`: CDP, BrowserOS server, and extension ports.
|
||||
- `production_env`: values written to `apps/server/.env.production` and `apps/cli/.env.production`.
|
||||
|
||||
## Safety Notes
|
||||
|
||||
- Do not point `dev_user_data_dir` at the real BrowserOS profile.
|
||||
- `balpha` does not pass `--use-mock-keychain`; copied login data relies on the installed signed app path.
|
||||
- Default ports are CDP `9015`, server `9115`, and extension `9315`.
|
||||
- Browser launch passes `--browseros-mcp-port`, `--browseros-server-port`, and `--browseros-proxy-port` to tolerate current switch differences.
|
||||
43
packages/browseros-agent/tools/alpha/browser/args.go
Normal file
43
packages/browseros-agent/tools/alpha/browser/args.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package browser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"browseros-alpha/config"
|
||||
)
|
||||
|
||||
type ArgsConfig struct {
|
||||
Binary string
|
||||
AgentRoot string
|
||||
UserDataDir string
|
||||
ProfileDir string
|
||||
Ports config.Ports
|
||||
Headless bool
|
||||
}
|
||||
|
||||
func BuildArgs(cfg ArgsConfig) []string {
|
||||
args := []string{
|
||||
cfg.Binary,
|
||||
"--no-first-run",
|
||||
"--no-default-browser-check",
|
||||
"--show-component-extension-options",
|
||||
"--disable-browseros-server",
|
||||
"--disable-browseros-extensions",
|
||||
fmt.Sprintf("--remote-debugging-port=%d", cfg.Ports.CDP),
|
||||
// Keep all server aliases until installed BrowserOS apps converge on one switch.
|
||||
fmt.Sprintf("--browseros-mcp-port=%d", cfg.Ports.Server),
|
||||
fmt.Sprintf("--browseros-server-port=%d", cfg.Ports.Server),
|
||||
fmt.Sprintf("--browseros-proxy-port=%d", cfg.Ports.Server),
|
||||
fmt.Sprintf("--browseros-extension-port=%d", cfg.Ports.Extension),
|
||||
fmt.Sprintf("--user-data-dir=%s", cfg.UserDataDir),
|
||||
}
|
||||
if cfg.ProfileDir != "" {
|
||||
args = append(args, fmt.Sprintf("--profile-directory=%s", cfg.ProfileDir))
|
||||
}
|
||||
args = append(args, fmt.Sprintf("--load-extension=%s", filepath.Join(cfg.AgentRoot, "apps/agent/dist/chrome-mv3-dev")))
|
||||
if cfg.Headless {
|
||||
args = append(args, "--headless=new")
|
||||
}
|
||||
return append(args, "chrome://newtab")
|
||||
}
|
||||
61
packages/browseros-agent/tools/alpha/browser/args_test.go
Normal file
61
packages/browseros-agent/tools/alpha/browser/args_test.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package browser
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"browseros-alpha/config"
|
||||
)
|
||||
|
||||
func TestBuildArgs(t *testing.T) {
|
||||
args := BuildArgs(ArgsConfig{
|
||||
Binary: "/Applications/BrowserOS.app/Contents/MacOS/BrowserOS",
|
||||
AgentRoot: "/repo/packages/browseros-agent",
|
||||
UserDataDir: "/tmp/balpha",
|
||||
ProfileDir: "Default",
|
||||
Ports: config.Ports{CDP: 9015, Server: 9115, Extension: 9315},
|
||||
})
|
||||
joined := strings.Join(args, "\n")
|
||||
for _, want := range []string{
|
||||
"--remote-debugging-port=9015",
|
||||
"--browseros-mcp-port=9115",
|
||||
"--browseros-server-port=9115",
|
||||
"--browseros-proxy-port=9115",
|
||||
"--browseros-extension-port=9315",
|
||||
"--user-data-dir=/tmp/balpha",
|
||||
"--profile-directory=Default",
|
||||
"--disable-browseros-server",
|
||||
"--disable-browseros-extensions",
|
||||
"--load-extension=/repo/packages/browseros-agent/apps/agent/dist/chrome-mv3-dev",
|
||||
"chrome://newtab",
|
||||
} {
|
||||
if !strings.Contains(joined, want) {
|
||||
t.Fatalf("missing %s in\n%s", want, joined)
|
||||
}
|
||||
}
|
||||
if strings.Contains(joined, "--use-mock-keychain") {
|
||||
t.Fatal("must not use mock keychain")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildArgsHeadless(t *testing.T) {
|
||||
args := BuildArgs(ArgsConfig{
|
||||
Binary: "/bin/browser",
|
||||
AgentRoot: "/repo/packages/browseros-agent",
|
||||
UserDataDir: "/tmp/balpha",
|
||||
Ports: config.Ports{CDP: 1, Server: 2, Extension: 3},
|
||||
Headless: true,
|
||||
})
|
||||
if !contains(args, "--headless=new") {
|
||||
t.Fatalf("missing headless arg: %#v", args)
|
||||
}
|
||||
}
|
||||
|
||||
func contains(values []string, target string) bool {
|
||||
for _, value := range values {
|
||||
if value == target {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
32
packages/browseros-agent/tools/alpha/browser/cdp.go
Normal file
32
packages/browseros-agent/tools/alpha/browser/cdp.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package browser
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func WaitForCDP(ctx context.Context, port int, maxAttempts int) bool {
|
||||
client := &http.Client{Timeout: time.Second}
|
||||
url := fmt.Sprintf("http://127.0.0.1:%d/json/version", port)
|
||||
|
||||
for range maxAttempts {
|
||||
if ctx.Err() != nil {
|
||||
return false
|
||||
}
|
||||
resp, err := client.Get(url)
|
||||
if err == nil {
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode == 200 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
54
packages/browseros-agent/tools/alpha/cmd/config.go
Normal file
54
packages/browseros-agent/tools/alpha/cmd/config.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"browseros-alpha/config"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
configCmd.AddCommand(configEditCmd)
|
||||
rootCmd.AddCommand(configCmd)
|
||||
}
|
||||
|
||||
var configCmd = &cobra.Command{
|
||||
Use: "config",
|
||||
Short: "Manage balpha config",
|
||||
}
|
||||
|
||||
var configEditCmd = &cobra.Command{
|
||||
Use: "edit",
|
||||
Short: "Open balpha config in $EDITOR",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
path, err := config.Path()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg := config.Defaults(home)
|
||||
if err := config.Save(path, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
editor := os.Getenv("EDITOR")
|
||||
if editor == "" {
|
||||
editor = "vi"
|
||||
}
|
||||
c := exec.Command(editor, path)
|
||||
c.Stdin = os.Stdin
|
||||
c.Stdout = os.Stdout
|
||||
c.Stderr = os.Stderr
|
||||
if err := c.Run(); err != nil {
|
||||
return fmt.Errorf("editor failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
99
packages/browseros-agent/tools/alpha/cmd/init.go
Normal file
99
packages/browseros-agent/tools/alpha/cmd/init.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"browseros-alpha/config"
|
||||
"browseros-alpha/pipeline"
|
||||
"browseros-alpha/profile"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(initCmd)
|
||||
}
|
||||
|
||||
var initCmd = &cobra.Command{
|
||||
Use: "init",
|
||||
Short: "Create or update balpha config",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg := config.Defaults(home)
|
||||
if cwd, err := os.Getwd(); err == nil && looksLikeRepo(cwd) {
|
||||
cfg.RepoPath = cwd
|
||||
}
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
cfg.RepoPath = prompt(reader, "Repo path", cfg.RepoPath)
|
||||
cfg.BrowserOSAppPath = prompt(reader, "BrowserOS binary", cfg.BrowserOSAppPath)
|
||||
profiles, _ := profile.ReadProfiles(cfg.SourceUserDataDir)
|
||||
cfg.SourceProfileDir = chooseProfile(reader, profiles)
|
||||
cfg.Resolve()
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
path, err := config.Path()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := config.Save(path, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pipeline.WriteProductionEnvFiles(cfg.AgentRoot(), cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Config written: %s\nRun: balpha start\n", path)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func prompt(r *bufio.Reader, label string, current string) string {
|
||||
fmt.Printf("%s [%s]: ", label, current)
|
||||
line, _ := r.ReadString('\n')
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
return current
|
||||
}
|
||||
home, _ := os.UserHomeDir()
|
||||
return config.ExpandTilde(line, home)
|
||||
}
|
||||
|
||||
func chooseProfile(r *bufio.Reader, profiles []profile.BrowserProfile) string {
|
||||
if len(profiles) == 0 {
|
||||
return "Default"
|
||||
}
|
||||
fmt.Printf("Found %d BrowserOS profiles:\n", len(profiles))
|
||||
for i, p := range profiles {
|
||||
email := ""
|
||||
if p.Email != "" {
|
||||
email = " " + p.Email
|
||||
}
|
||||
fmt.Printf(" %d. %s (%s)%s\n", i+1, p.Name, p.Dir, email)
|
||||
}
|
||||
for {
|
||||
fmt.Print("Select source profile [1]: ")
|
||||
line, _ := r.ReadString('\n')
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
return profiles[0].Dir
|
||||
}
|
||||
n, err := strconv.Atoi(line)
|
||||
if err == nil && n >= 1 && n <= len(profiles) {
|
||||
return profiles[n-1].Dir
|
||||
}
|
||||
fmt.Println("Choose a listed number.")
|
||||
}
|
||||
}
|
||||
|
||||
func looksLikeRepo(path string) bool {
|
||||
_, err := os.Stat(filepath.Join(path, "packages/browseros-agent/package.json"))
|
||||
return err == nil
|
||||
}
|
||||
47
packages/browseros-agent/tools/alpha/cmd/pull.go
Normal file
47
packages/browseros-agent/tools/alpha/cmd/pull.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"browseros-alpha/pipeline"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var pullForce bool
|
||||
|
||||
func init() {
|
||||
pullCmd.Flags().BoolVar(&pullForce, "force", false, "Pull even when the checkout has uncommitted changes")
|
||||
rootCmd.AddCommand(pullCmd)
|
||||
}
|
||||
|
||||
var pullCmd = &cobra.Command{
|
||||
Use: "pull",
|
||||
Short: "Refresh the configured BrowserOS checkout",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := loadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner := pipeline.ExecRunner{}
|
||||
if err := pipeline.WriteProductionEnvFiles(cfg.AgentRoot(), cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
branch := pipeline.Branch(cfg.RepoPath, runner)
|
||||
head, _ := pipeline.Head(cfg.RepoPath, runner)
|
||||
fmt.Printf("Repo: %s %s %s\n", cfg.RepoPath, branch, head)
|
||||
dirty, err := pipeline.Dirty(cfg.RepoPath, runner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dirty && !pullForce {
|
||||
return fmt.Errorf("checkout has uncommitted changes; commit/stash them or use --force")
|
||||
}
|
||||
if err := pipeline.Pull(cfg.RepoPath, runner); err != nil {
|
||||
return err
|
||||
}
|
||||
newHead, _ := pipeline.Head(cfg.RepoPath, runner)
|
||||
fmt.Printf("Updated to %s\n", newHead)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
50
packages/browseros-agent/tools/alpha/cmd/refresh_profile.go
Normal file
50
packages/browseros-agent/tools/alpha/cmd/refresh_profile.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"browseros-alpha/config"
|
||||
"browseros-alpha/profile"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(refreshProfileCmd)
|
||||
}
|
||||
|
||||
var refreshProfileCmd = &cobra.Command{
|
||||
Use: "refresh-profile",
|
||||
Short: "Copy the configured BrowserOS profile into the balpha dev profile",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := loadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := profile.Import(profile.ImportConfig{
|
||||
SourceUserDataDir: cfg.SourceUserDataDir,
|
||||
SourceProfileDir: cfg.SourceProfileDir,
|
||||
DevUserDataDir: cfg.DevUserDataDir,
|
||||
DevProfileDir: cfg.DevProfileDir,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Profile refreshed: %s\n", cfg.DevUserDataDir)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func loadConfig() (config.Config, error) {
|
||||
path, err := config.Path()
|
||||
if err != nil {
|
||||
return config.Config{}, err
|
||||
}
|
||||
cfg, err := config.Load(path)
|
||||
if err != nil {
|
||||
return config.Config{}, fmt.Errorf("missing config at %s; run balpha init: %w", path, err)
|
||||
}
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return config.Config{}, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
24
packages/browseros-agent/tools/alpha/cmd/root.go
Normal file
24
packages/browseros-agent/tools/alpha/cmd/root.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "balpha",
|
||||
Short: "BrowserOS alpha dogfooding CLI",
|
||||
Long: "balpha - BrowserOS alpha dogfooding CLI",
|
||||
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
173
packages/browseros-agent/tools/alpha/cmd/start.go
Normal file
173
packages/browseros-agent/tools/alpha/cmd/start.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"browseros-alpha/browser"
|
||||
"browseros-alpha/config"
|
||||
"browseros-alpha/pipeline"
|
||||
"browseros-alpha/proc"
|
||||
"browseros-alpha/profile"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var startRefreshProfile bool
|
||||
var startHeadless bool
|
||||
|
||||
func init() {
|
||||
startCmd.Flags().BoolVar(&startRefreshProfile, "refresh-profile", false, "Refresh copied BrowserOS profile before launch")
|
||||
startCmd.Flags().BoolVar(&startHeadless, "headless", false, "Run BrowserOS headless")
|
||||
rootCmd.AddCommand(startCmd)
|
||||
}
|
||||
|
||||
var startCmd = &cobra.Command{
|
||||
Use: "start",
|
||||
Short: "Start BrowserOS alpha dogfooding environment",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := loadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
agentRoot := cfg.AgentRoot()
|
||||
runner := pipeline.ExecRunner{}
|
||||
if dirty, err := pipeline.Dirty(cfg.RepoPath, runner); err == nil && dirty {
|
||||
fmt.Fprintln(os.Stderr, "warning: checkout has uncommitted changes; start will use current files")
|
||||
}
|
||||
if startRefreshProfile || !exists(cfg.DevUserDataDir) {
|
||||
if err := profile.Import(profile.ImportConfig{
|
||||
SourceUserDataDir: cfg.SourceUserDataDir,
|
||||
SourceProfileDir: cfg.SourceProfileDir,
|
||||
DevUserDataDir: cfg.DevUserDataDir,
|
||||
DevProfileDir: cfg.DevProfileDir,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := profile.CleanupSingletons(cfg.DevUserDataDir); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pipeline.WriteProductionEnvFiles(agentRoot, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
resolvedPorts, changed, err := proc.ResolvePorts(cfg.Ports)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.Ports = resolvedPorts
|
||||
if changed {
|
||||
path, err := config.Path()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := config.Save(path, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
proc.LogMsgf(proc.TagInfo, "Busy ports detected; using CDP=%d Server=%d Extension=%d", cfg.Ports.CDP, cfg.Ports.Server, cfg.Ports.Extension)
|
||||
} else {
|
||||
proc.LogMsgf(proc.TagInfo, "Using ports CDP=%d Server=%d Extension=%d", cfg.Ports.CDP, cfg.Ports.Server, cfg.Ports.Extension)
|
||||
}
|
||||
if err := pipeline.Build(agentRoot, runner); err != nil {
|
||||
return err
|
||||
}
|
||||
return runEnvironment(cfg, agentRoot)
|
||||
},
|
||||
}
|
||||
|
||||
func runEnvironment(cfg config.Config, agentRoot string) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var managed []*proc.ManagedProc
|
||||
managed = append(managed, proc.StartManaged(ctx, &wg, proc.ProcConfig{
|
||||
Tag: proc.TagBrowser,
|
||||
Dir: agentRoot,
|
||||
Restart: false,
|
||||
Cmd: browser.BuildArgs(browser.ArgsConfig{
|
||||
Binary: cfg.BrowserOSAppPath,
|
||||
AgentRoot: agentRoot,
|
||||
UserDataDir: cfg.DevUserDataDir,
|
||||
ProfileDir: cfg.DevProfileDir,
|
||||
Ports: cfg.Ports,
|
||||
Headless: startHeadless,
|
||||
}),
|
||||
}))
|
||||
proc.LogMsg(proc.TagServer, "Waiting for CDP...")
|
||||
if browser.WaitForCDP(ctx, cfg.Ports.CDP, 60) {
|
||||
proc.LogMsg(proc.TagServer, "CDP ready")
|
||||
} else {
|
||||
proc.LogMsg(proc.TagServer, proc.WarnColor.Sprint("CDP not available, starting server anyway"))
|
||||
}
|
||||
env := os.Environ()
|
||||
env = append(env,
|
||||
"NODE_ENV=development",
|
||||
fmt.Sprintf("BROWSEROS_CDP_PORT=%d", cfg.Ports.CDP),
|
||||
fmt.Sprintf("BROWSEROS_SERVER_PORT=%d", cfg.Ports.Server),
|
||||
fmt.Sprintf("BROWSEROS_EXTENSION_PORT=%d", cfg.Ports.Extension),
|
||||
fmt.Sprintf("VITE_BROWSEROS_SERVER_PORT=%d", cfg.Ports.Server),
|
||||
)
|
||||
serverDir := filepath.Join(agentRoot, "apps/server")
|
||||
managed = append(managed, proc.StartManaged(ctx, &wg, proc.ProcConfig{
|
||||
Tag: proc.TagServer,
|
||||
Dir: serverDir,
|
||||
Env: env,
|
||||
Restart: true,
|
||||
Cmd: []string{"bun", "--watch", "--env-file=.env.development", "src/index.ts"},
|
||||
}))
|
||||
printSummary(cfg, agentRoot)
|
||||
|
||||
sigCh := make(chan os.Signal, 2)
|
||||
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
<-sigCh
|
||||
fmt.Println()
|
||||
proc.LogMsg(proc.TagInfo, proc.WarnColor.Sprint("Shutting down (Ctrl+C again to force)..."))
|
||||
cancel()
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
go func() {
|
||||
select {
|
||||
case <-sigCh:
|
||||
for _, p := range managed {
|
||||
p.ForceKill()
|
||||
}
|
||||
os.Exit(1)
|
||||
case <-done:
|
||||
}
|
||||
}()
|
||||
for _, p := range managed {
|
||||
p.Stop()
|
||||
}
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(10 * time.Second):
|
||||
for _, p := range managed {
|
||||
p.ForceKill()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func exists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func printSummary(cfg config.Config, agentRoot string) {
|
||||
fmt.Println()
|
||||
proc.LogMsgf(proc.TagInfo, "App: %s", cfg.BrowserOSAppPath)
|
||||
proc.LogMsgf(proc.TagInfo, "Repo: %s", cfg.RepoPath)
|
||||
proc.LogMsgf(proc.TagInfo, "Agent root: %s", agentRoot)
|
||||
proc.LogMsgf(proc.TagInfo, "Profile: %s", cfg.DevUserDataDir)
|
||||
proc.LogMsgf(proc.TagInfo, "Ports: CDP=%d Server=%d Extension=%d", cfg.Ports.CDP, cfg.Ports.Server, cfg.Ports.Extension)
|
||||
fmt.Println()
|
||||
}
|
||||
229
packages/browseros-agent/tools/alpha/config/config.go
Normal file
229
packages/browseros-agent/tools/alpha/config/config.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"browseros-alpha/internal/fspath"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type Ports struct {
|
||||
CDP int `yaml:"cdp"`
|
||||
Server int `yaml:"server"`
|
||||
Extension int `yaml:"extension"`
|
||||
}
|
||||
|
||||
type ProductionEnv struct {
|
||||
Server map[string]string `yaml:"server"`
|
||||
CLI map[string]string `yaml:"cli"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
RepoPath string `yaml:"repo_path"`
|
||||
BrowserOSAppPath string `yaml:"browseros_app_path"`
|
||||
SourceUserDataDir string `yaml:"source_user_data_dir"`
|
||||
SourceProfileDir string `yaml:"source_profile_dir"`
|
||||
DevUserDataDir string `yaml:"dev_user_data_dir"`
|
||||
DevProfileDir string `yaml:"dev_profile_dir"`
|
||||
Ports Ports `yaml:"ports"`
|
||||
ProductionEnv ProductionEnv `yaml:"production_env"`
|
||||
}
|
||||
|
||||
type packageJSON struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
func Path() (string, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(DefaultConfigDir(home), "config.yaml"), nil
|
||||
}
|
||||
|
||||
func DefaultConfigDir(home string) string {
|
||||
if xdg := os.Getenv("XDG_CONFIG_HOME"); xdg != "" {
|
||||
return filepath.Join(xdg, "balpha")
|
||||
}
|
||||
return filepath.Join(home, ".config", "balpha")
|
||||
}
|
||||
|
||||
func Defaults(home string) Config {
|
||||
return Config{
|
||||
BrowserOSAppPath: "/Applications/BrowserOS.app/Contents/MacOS/BrowserOS",
|
||||
SourceUserDataDir: filepath.Join(home, "Library/Application Support/BrowserOS"),
|
||||
SourceProfileDir: "Default",
|
||||
DevUserDataDir: filepath.Join(DefaultConfigDir(home), "profile"),
|
||||
DevProfileDir: "Default",
|
||||
Ports: Ports{CDP: 9015, Server: 9115, Extension: 9315},
|
||||
ProductionEnv: DefaultProductionEnv(),
|
||||
}
|
||||
}
|
||||
|
||||
func Load(path string) (Config, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
var cfg Config
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return Config{}, fmt.Errorf("parse config: %w", err)
|
||||
}
|
||||
cfg.Resolve()
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func Save(path string, cfg Config) error {
|
||||
cfg.FillProductionEnvDefaults()
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
header := "# balpha configuration\n# Run: balpha init to reconfigure\n\n"
|
||||
return os.WriteFile(path, append([]byte(header), data...), 0644)
|
||||
}
|
||||
|
||||
func (c *Config) Resolve() {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
home = ""
|
||||
}
|
||||
c.RepoPath = ExpandTilde(c.RepoPath, home)
|
||||
c.SourceUserDataDir = ExpandTilde(c.SourceUserDataDir, home)
|
||||
c.DevUserDataDir = ExpandTilde(c.DevUserDataDir, home)
|
||||
c.BrowserOSAppPath = ExpandTilde(c.BrowserOSAppPath, home)
|
||||
if c.DevProfileDir == "" {
|
||||
c.DevProfileDir = "Default"
|
||||
}
|
||||
if c.Ports.CDP == 0 {
|
||||
c.Ports.CDP = 9015
|
||||
}
|
||||
if c.Ports.Server == 0 {
|
||||
c.Ports.Server = 9115
|
||||
}
|
||||
if c.Ports.Extension == 0 {
|
||||
c.Ports.Extension = 9315
|
||||
}
|
||||
c.FillProductionEnvDefaults()
|
||||
}
|
||||
|
||||
func (c Config) AgentRoot() string {
|
||||
return filepath.Join(c.RepoPath, "packages/browseros-agent")
|
||||
}
|
||||
|
||||
func (c Config) SourceProfilePath() string {
|
||||
return filepath.Join(c.SourceUserDataDir, c.SourceProfileDir)
|
||||
}
|
||||
|
||||
func (c Config) DevProfilePath() string {
|
||||
return filepath.Join(c.DevUserDataDir, c.DevProfileDir)
|
||||
}
|
||||
|
||||
func (c Config) Validate() error {
|
||||
if c.RepoPath == "" {
|
||||
return fmt.Errorf("repo_path is required")
|
||||
}
|
||||
if c.BrowserOSAppPath == "" {
|
||||
return fmt.Errorf("browseros_app_path is required")
|
||||
}
|
||||
if c.SourceUserDataDir == "" || c.SourceProfileDir == "" {
|
||||
return fmt.Errorf("source_user_data_dir and source_profile_dir are required")
|
||||
}
|
||||
if c.DevUserDataDir == "" || c.DevProfileDir == "" {
|
||||
return fmt.Errorf("dev_user_data_dir and dev_profile_dir are required")
|
||||
}
|
||||
if fspath.IsSameOrChild(c.DevUserDataDir, c.SourceUserDataDir) {
|
||||
return fmt.Errorf("dev_user_data_dir must not equal or live inside source_user_data_dir")
|
||||
}
|
||||
if err := validateRepo(c.AgentRoot()); err != nil {
|
||||
return err
|
||||
}
|
||||
if info, err := os.Stat(c.BrowserOSAppPath); err != nil {
|
||||
return fmt.Errorf("browseros_app_path: %w", err)
|
||||
} else if info.IsDir() || info.Mode()&0111 == 0 {
|
||||
return fmt.Errorf("browseros_app_path is not an executable file: %s", c.BrowserOSAppPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateRepo(agentRoot string) error {
|
||||
data, err := os.ReadFile(filepath.Join(agentRoot, "package.json"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("repo_path must contain packages/browseros-agent/package.json: %w", err)
|
||||
}
|
||||
var pkg packageJSON
|
||||
if err := json.Unmarshal(data, &pkg); err != nil {
|
||||
return fmt.Errorf("parse package.json: %w", err)
|
||||
}
|
||||
if pkg.Name != "browseros-monorepo" {
|
||||
return fmt.Errorf("unexpected package name %q in packages/browseros-agent/package.json", pkg.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ExpandTilde(path string, home string) string {
|
||||
if path == "~" {
|
||||
return home
|
||||
}
|
||||
if strings.HasPrefix(path, "~/") {
|
||||
return filepath.Join(home, path[2:])
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func DefaultProductionEnv() ProductionEnv {
|
||||
return ProductionEnv{
|
||||
Server: map[string]string{
|
||||
"BROWSEROS_CONFIG_URL": "https://llm.browseros.com/api/browseros-server/config",
|
||||
"CODEGEN_SERVICE_URL": "",
|
||||
"POSTHOG_API_KEY": "",
|
||||
"SENTRY_DSN": "",
|
||||
"BROWSEROS_VM_CACHE_PREFETCH": "true",
|
||||
"BROWSEROS_VM_CACHE_MANIFEST_URL": "https://cdn.browseros.com/vm/manifest.json",
|
||||
"R2_ACCOUNT_ID": "",
|
||||
"R2_ACCESS_KEY_ID": "",
|
||||
"R2_SECRET_ACCESS_KEY": "",
|
||||
"R2_BUCKET": "",
|
||||
"R2_DOWNLOAD_PREFIX": "artifacts/vendor",
|
||||
"R2_UPLOAD_PREFIX": "artifacts/server",
|
||||
"NODE_ENV": "production",
|
||||
"LOG_LEVEL": "info",
|
||||
},
|
||||
CLI: map[string]string{
|
||||
"POSTHOG_API_KEY": "",
|
||||
"R2_ACCOUNT_ID": "",
|
||||
"R2_ACCESS_KEY_ID": "",
|
||||
"R2_SECRET_ACCESS_KEY": "",
|
||||
"R2_BUCKET": "browseros",
|
||||
"R2_UPLOAD_PREFIX": "cli",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Config) FillProductionEnvDefaults() {
|
||||
defaults := DefaultProductionEnv()
|
||||
if c.ProductionEnv.Server == nil {
|
||||
c.ProductionEnv.Server = map[string]string{}
|
||||
}
|
||||
if c.ProductionEnv.CLI == nil {
|
||||
c.ProductionEnv.CLI = map[string]string{}
|
||||
}
|
||||
for key, value := range defaults.Server {
|
||||
if _, ok := c.ProductionEnv.Server[key]; !ok {
|
||||
c.ProductionEnv.Server[key] = value
|
||||
}
|
||||
}
|
||||
for key, value := range defaults.CLI {
|
||||
if _, ok := c.ProductionEnv.CLI[key]; !ok {
|
||||
c.ProductionEnv.CLI[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
143
packages/browseros-agent/tools/alpha/config/config_test.go
Normal file
143
packages/browseros-agent/tools/alpha/config/config_test.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDefaults(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("XDG_CONFIG_HOME", "")
|
||||
cfg := Defaults(home)
|
||||
|
||||
if cfg.BrowserOSAppPath != "/Applications/BrowserOS.app/Contents/MacOS/BrowserOS" {
|
||||
t.Fatalf("unexpected browser path: %s", cfg.BrowserOSAppPath)
|
||||
}
|
||||
if cfg.SourceUserDataDir != filepath.Join(home, "Library/Application Support/BrowserOS") {
|
||||
t.Fatalf("unexpected source dir: %s", cfg.SourceUserDataDir)
|
||||
}
|
||||
if cfg.DevUserDataDir != filepath.Join(home, ".config/balpha/profile") {
|
||||
t.Fatalf("unexpected dev dir: %s", cfg.DevUserDataDir)
|
||||
}
|
||||
if cfg.DevProfileDir != "Default" {
|
||||
t.Fatalf("unexpected dev profile: %s", cfg.DevProfileDir)
|
||||
}
|
||||
if cfg.Ports.CDP != 9015 || cfg.Ports.Server != 9115 || cfg.Ports.Extension != 9315 {
|
||||
t.Fatalf("unexpected ports: %+v", cfg.Ports)
|
||||
}
|
||||
if cfg.ProductionEnv.Server["BROWSEROS_CONFIG_URL"] == "" {
|
||||
t.Fatalf("missing server production env defaults: %#v", cfg.ProductionEnv.Server)
|
||||
}
|
||||
if cfg.ProductionEnv.CLI["R2_BUCKET"] != "browseros" {
|
||||
t.Fatalf("missing cli production env defaults: %#v", cfg.ProductionEnv.CLI)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveLoadRoundTrip(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "config.yaml")
|
||||
cfg := Config{
|
||||
RepoPath: "/repo",
|
||||
BrowserOSAppPath: "/Applications/BrowserOS.app/Contents/MacOS/BrowserOS",
|
||||
SourceUserDataDir: "/source",
|
||||
SourceProfileDir: "Profile 25",
|
||||
DevUserDataDir: "/dev",
|
||||
DevProfileDir: "Default",
|
||||
Ports: Ports{CDP: 9015, Server: 9115, Extension: 9315},
|
||||
ProductionEnv: ProductionEnv{
|
||||
Server: map[string]string{"NODE_ENV": "production"},
|
||||
CLI: map[string]string{"R2_BUCKET": "browseros"},
|
||||
},
|
||||
}
|
||||
|
||||
if err := Save(path, cfg); err != nil {
|
||||
t.Fatalf("save: %v", err)
|
||||
}
|
||||
got, err := Load(path)
|
||||
if err != nil {
|
||||
t.Fatalf("load: %v", err)
|
||||
}
|
||||
if got.SourceProfileDir != cfg.SourceProfileDir {
|
||||
t.Fatalf("source profile mismatch: %q", got.SourceProfileDir)
|
||||
}
|
||||
if got.Ports.Server != 9115 {
|
||||
t.Fatalf("server port mismatch: %d", got.Ports.Server)
|
||||
}
|
||||
if got.ProductionEnv.CLI["R2_BUCKET"] != "browseros" {
|
||||
t.Fatalf("production env mismatch: %#v", got.ProductionEnv)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpandTilde(t *testing.T) {
|
||||
got := ExpandTilde("~/x", "/Users/test")
|
||||
want := filepath.Join("/Users/test", "x")
|
||||
if got != want {
|
||||
t.Fatalf("got %q want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateRejectsSourceInsideDev(t *testing.T) {
|
||||
cfg := Config{
|
||||
RepoPath: t.TempDir(),
|
||||
BrowserOSAppPath: "/bin/sh",
|
||||
SourceUserDataDir: "/tmp/source",
|
||||
SourceProfileDir: "Default",
|
||||
DevUserDataDir: "/tmp/source/dev",
|
||||
DevProfileDir: "Default",
|
||||
Ports: Ports{CDP: 9015, Server: 9115, Extension: 9315},
|
||||
}
|
||||
if err := cfg.Validate(); err == nil {
|
||||
t.Fatal("expected validation error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigPathHonorsXDG(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
t.Setenv("XDG_CONFIG_HOME", dir)
|
||||
got, err := Path()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := filepath.Join(dir, "balpha", "config.yaml")
|
||||
if got != want {
|
||||
t.Fatalf("got %q want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPathDefault(t *testing.T) {
|
||||
t.Setenv("XDG_CONFIG_HOME", "")
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
got, err := Path()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := filepath.Join(home, ".config", "balpha", "config.yaml")
|
||||
if got != want {
|
||||
t.Fatalf("got %q want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateRepoShape(t *testing.T) {
|
||||
repo := t.TempDir()
|
||||
agentRoot := filepath.Join(repo, "packages/browseros-agent")
|
||||
if err := os.MkdirAll(agentRoot, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(agentRoot, "package.json"), []byte(`{"name":"browseros-monorepo"}`), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cfg := Config{
|
||||
RepoPath: repo,
|
||||
BrowserOSAppPath: "/bin/sh",
|
||||
SourceUserDataDir: "/tmp/source",
|
||||
SourceProfileDir: "Default",
|
||||
DevUserDataDir: "/tmp/dev",
|
||||
DevProfileDir: "Default",
|
||||
Ports: Ports{CDP: 9015, Server: 9115, Extension: 9315},
|
||||
}
|
||||
if err := cfg.Validate(); err != nil {
|
||||
t.Fatalf("validate: %v", err)
|
||||
}
|
||||
}
|
||||
17
packages/browseros-agent/tools/alpha/go.mod
Normal file
17
packages/browseros-agent/tools/alpha/go.mod
Normal file
@@ -0,0 +1,17 @@
|
||||
module browseros-alpha
|
||||
|
||||
go 1.25.7
|
||||
|
||||
require (
|
||||
github.com/fatih/color v1.18.0
|
||||
github.com/spf13/cobra v1.10.2
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/spf13/pflag v1.0.9 // indirect
|
||||
golang.org/x/sys v0.42.0 // indirect
|
||||
)
|
||||
22
packages/browseros-agent/tools/alpha/go.sum
Normal file
22
packages/browseros-agent/tools/alpha/go.sum
Normal file
@@ -0,0 +1,22 @@
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
|
||||
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
16
packages/browseros-agent/tools/alpha/internal/fspath/path.go
Normal file
16
packages/browseros-agent/tools/alpha/internal/fspath/path.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package fspath
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func IsSameOrChild(child string, parent string) bool {
|
||||
child = filepath.Clean(child)
|
||||
parent = filepath.Clean(parent)
|
||||
if child == parent {
|
||||
return true
|
||||
}
|
||||
rel, err := filepath.Rel(parent, child)
|
||||
return err == nil && rel != "." && !strings.HasPrefix(rel, "..")
|
||||
}
|
||||
7
packages/browseros-agent/tools/alpha/main.go
Normal file
7
packages/browseros-agent/tools/alpha/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "browseros-alpha/cmd"
|
||||
|
||||
func main() {
|
||||
cmd.Execute()
|
||||
}
|
||||
18
packages/browseros-agent/tools/alpha/pipeline/build.go
Normal file
18
packages/browseros-agent/tools/alpha/pipeline/build.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package pipeline
|
||||
|
||||
func Build(agentRoot string, r Runner) error {
|
||||
if err := r.Run(agentRoot, "./tools/dev/setup.sh"); err != nil {
|
||||
return err
|
||||
}
|
||||
return r.Run(agentRoot, "bun", "--cwd", "apps/agent", "--env-file=.env.development", "wxt", "build", "--mode", "development")
|
||||
}
|
||||
|
||||
type ExecRunner struct{}
|
||||
|
||||
func (ExecRunner) Run(dir string, args ...string) error {
|
||||
return runCommand(dir, args...)
|
||||
}
|
||||
|
||||
func (ExecRunner) OutputRun(dir string, args ...string) (string, error) {
|
||||
return outputCommand(dir, args...)
|
||||
}
|
||||
20
packages/browseros-agent/tools/alpha/pipeline/build_test.go
Normal file
20
packages/browseros-agent/tools/alpha/pipeline/build_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package pipeline
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestBuildRunsExpectedCommands(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
r := &FakeRunner{}
|
||||
if err := Build(root, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := []string{
|
||||
"./tools/dev/setup.sh",
|
||||
"bun --cwd apps/agent --env-file=.env.development wxt build --mode development",
|
||||
}
|
||||
for i := range want {
|
||||
if r.Commands[i] != want[i] {
|
||||
t.Fatalf("command %d got %q want %q", i, r.Commands[i], want[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
55
packages/browseros-agent/tools/alpha/pipeline/env.go
Normal file
55
packages/browseros-agent/tools/alpha/pipeline/env.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"browseros-alpha/config"
|
||||
)
|
||||
|
||||
func WriteProductionEnvFiles(agentRoot string, cfg config.Config) error {
|
||||
cfg.FillProductionEnvDefaults()
|
||||
if err := writeEnvFile(filepath.Join(agentRoot, "apps/server/.env.production"), cfg.ProductionEnv.Server); err != nil {
|
||||
return err
|
||||
}
|
||||
return writeEnvFile(filepath.Join(agentRoot, "apps/cli/.env.production"), cfg.ProductionEnv.CLI)
|
||||
}
|
||||
|
||||
func writeEnvFile(path string, values map[string]string) error {
|
||||
keys := make([]string, 0, len(values))
|
||||
for key := range values {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
var out bytes.Buffer
|
||||
for _, key := range keys {
|
||||
line, err := formatEnvLine(key, values[key])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out.WriteString(line)
|
||||
out.WriteByte('\n')
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(path, out.Bytes(), 0600)
|
||||
}
|
||||
|
||||
func formatEnvLine(key string, value string) (string, error) {
|
||||
if key == "" || strings.ContainsAny(key, " \t\r\n=") {
|
||||
return "", fmt.Errorf("invalid env key %q", key)
|
||||
}
|
||||
if strings.ContainsAny(value, "\r\n") {
|
||||
return "", fmt.Errorf("env value for %s must not contain newlines", key)
|
||||
}
|
||||
if strings.ContainsAny(value, " \t#'\"=") {
|
||||
value = strconv.Quote(value)
|
||||
}
|
||||
return fmt.Sprintf("%s=%s", key, value), nil
|
||||
}
|
||||
74
packages/browseros-agent/tools/alpha/pipeline/env_test.go
Normal file
74
packages/browseros-agent/tools/alpha/pipeline/env_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"browseros-alpha/config"
|
||||
)
|
||||
|
||||
func TestWriteProductionEnvFiles(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
cfg := config.Config{
|
||||
ProductionEnv: config.ProductionEnv{
|
||||
Server: map[string]string{
|
||||
"NODE_ENV": "production",
|
||||
"LOG_LEVEL": "info",
|
||||
},
|
||||
CLI: map[string]string{
|
||||
"R2_BUCKET": "browseros",
|
||||
"R2_UPLOAD_PREFIX": "cli",
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := WriteProductionEnvFiles(root, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertMode(t, filepath.Join(root, "apps/server/.env.production"), 0600)
|
||||
assertMode(t, filepath.Join(root, "apps/cli/.env.production"), 0600)
|
||||
assertContains(t, filepath.Join(root, "apps/server/.env.production"), "BROWSEROS_CONFIG_URL=https://llm.browseros.com/api/browseros-server/config\n")
|
||||
assertContains(t, filepath.Join(root, "apps/server/.env.production"), "LOG_LEVEL=info\n")
|
||||
assertContains(t, filepath.Join(root, "apps/server/.env.production"), "NODE_ENV=production\n")
|
||||
assertContains(t, filepath.Join(root, "apps/cli/.env.production"), "POSTHOG_API_KEY=\n")
|
||||
assertContains(t, filepath.Join(root, "apps/cli/.env.production"), "R2_BUCKET=browseros\n")
|
||||
assertContains(t, filepath.Join(root, "apps/cli/.env.production"), "R2_UPLOAD_PREFIX=cli\n")
|
||||
}
|
||||
|
||||
func TestWriteEnvFileQuotesUnsafeValues(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), ".env.production")
|
||||
if err := writeEnvFile(path, map[string]string{"TOKEN": "abc=123 with space"}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertContains(t, path, "TOKEN=\"abc=123 with space\"\n")
|
||||
}
|
||||
|
||||
func TestWriteEnvFileRejectsNewlines(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), ".env.production")
|
||||
if err := writeEnvFile(path, map[string]string{"TOKEN": "abc\n123"}); err == nil {
|
||||
t.Fatal("expected newline value error")
|
||||
}
|
||||
}
|
||||
|
||||
func assertContains(t *testing.T, path string, want string) {
|
||||
t.Helper()
|
||||
got, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !strings.Contains(string(got), want) {
|
||||
t.Fatalf("%s missing %q in %q", path, want, string(got))
|
||||
}
|
||||
}
|
||||
|
||||
func assertMode(t *testing.T, path string, want os.FileMode) {
|
||||
t.Helper()
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := info.Mode().Perm(); got != want {
|
||||
t.Fatalf("%s mode got %o want %o", path, got, want)
|
||||
}
|
||||
}
|
||||
24
packages/browseros-agent/tools/alpha/pipeline/exec.go
Normal file
24
packages/browseros-agent/tools/alpha/pipeline/exec.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func runCommand(dir string, args ...string) error {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = dir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func outputCommand(dir string, args ...string) (string, error) {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = dir
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(out), nil
|
||||
}
|
||||
36
packages/browseros-agent/tools/alpha/pipeline/git.go
Normal file
36
packages/browseros-agent/tools/alpha/pipeline/git.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package pipeline
|
||||
|
||||
import "strings"
|
||||
|
||||
type Runner interface {
|
||||
Run(dir string, args ...string) error
|
||||
OutputRun(dir string, args ...string) (string, error)
|
||||
}
|
||||
|
||||
func Dirty(repoPath string, r Runner) (bool, error) {
|
||||
out, err := r.OutputRun(repoPath, "git", "status", "--porcelain")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return strings.TrimSpace(out) != "", nil
|
||||
}
|
||||
|
||||
func Pull(repoPath string, r Runner) error {
|
||||
return r.Run(repoPath, "git", "pull", "--ff-only")
|
||||
}
|
||||
|
||||
func Head(repoPath string, r Runner) (string, error) {
|
||||
out, err := r.OutputRun(repoPath, "git", "rev-parse", "--short", "HEAD")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(out), nil
|
||||
}
|
||||
|
||||
func Branch(repoPath string, r Runner) string {
|
||||
out, err := r.OutputRun(repoPath, "git", "branch", "--show-current")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(out)
|
||||
}
|
||||
53
packages/browseros-agent/tools/alpha/pipeline/git_test.go
Normal file
53
packages/browseros-agent/tools/alpha/pipeline/git_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package pipeline
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestDirtyStatus(t *testing.T) {
|
||||
r := &FakeRunner{Output: map[string]string{
|
||||
"git status --porcelain": " M file.go\n",
|
||||
}}
|
||||
dirty, err := Dirty("/repo", r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !dirty {
|
||||
t.Fatal("expected dirty")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPullRunsFastForwardOnly(t *testing.T) {
|
||||
r := &FakeRunner{}
|
||||
if err := Pull("/repo", r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := r.Commands[0]; got != "git pull --ff-only" {
|
||||
t.Fatalf("got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
type FakeRunner struct {
|
||||
Commands []string
|
||||
Output map[string]string
|
||||
}
|
||||
|
||||
func (f *FakeRunner) Run(dir string, args ...string) error {
|
||||
f.Commands = append(f.Commands, join(args))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeRunner) OutputRun(dir string, args ...string) (string, error) {
|
||||
cmd := join(args)
|
||||
f.Commands = append(f.Commands, cmd)
|
||||
return f.Output[cmd], nil
|
||||
}
|
||||
|
||||
func join(args []string) string {
|
||||
out := ""
|
||||
for i, arg := range args {
|
||||
if i > 0 {
|
||||
out += " "
|
||||
}
|
||||
out += arg
|
||||
}
|
||||
return out
|
||||
}
|
||||
46
packages/browseros-agent/tools/alpha/proc/log.go
Normal file
46
packages/browseros-agent/tools/alpha/proc/log.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package proc
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
type Tag struct {
|
||||
Name string
|
||||
Color *color.Color
|
||||
}
|
||||
|
||||
var (
|
||||
TagBuild = Tag{"build", color.New(color.FgYellow)}
|
||||
TagAgent = Tag{"agent", color.New(color.FgMagenta)}
|
||||
TagServer = Tag{"server", color.New(color.FgCyan)}
|
||||
TagBrowser = Tag{"browser", color.New(color.FgBlue)}
|
||||
TagInfo = Tag{"info", color.New(color.FgGreen)}
|
||||
TagTest = Tag{"test", color.New(color.FgWhite)}
|
||||
|
||||
ErrorColor = color.New(color.FgRed)
|
||||
WarnColor = color.New(color.FgYellow)
|
||||
BoldColor = color.New(color.Bold)
|
||||
DimColor = color.New(color.Faint)
|
||||
)
|
||||
|
||||
func LogMsg(t Tag, msg string) {
|
||||
fmt.Printf("%s %s\n", t.Color.Sprintf("[%s]", t.Name), msg)
|
||||
}
|
||||
|
||||
func LogMsgf(t Tag, format string, args ...any) {
|
||||
LogMsg(t, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func StreamLines(r interface{ Read([]byte) (int, error) }, t Tag) {
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if line != "" {
|
||||
fmt.Printf("%s %s\n", t.Color.Sprintf("[%s]", t.Name), line)
|
||||
}
|
||||
}
|
||||
}
|
||||
162
packages/browseros-agent/tools/alpha/proc/managed.go
Normal file
162
packages/browseros-agent/tools/alpha/proc/managed.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package proc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ProcConfig struct {
|
||||
Tag Tag
|
||||
Dir string
|
||||
Env []string
|
||||
Restart bool
|
||||
Cmd []string
|
||||
BeforeStart func() error
|
||||
}
|
||||
|
||||
type ManagedProc struct {
|
||||
Cfg ProcConfig
|
||||
cancel context.CancelFunc
|
||||
mu sync.Mutex
|
||||
proc *os.Process
|
||||
exited chan struct{}
|
||||
}
|
||||
|
||||
func StartManaged(ctx context.Context, wg *sync.WaitGroup, cfg ProcConfig) *ManagedProc {
|
||||
procCtx, procCancel := context.WithCancel(ctx)
|
||||
mp := &ManagedProc{
|
||||
Cfg: cfg,
|
||||
cancel: procCancel,
|
||||
exited: make(chan struct{}),
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
mp.run(procCtx)
|
||||
}()
|
||||
|
||||
return mp
|
||||
}
|
||||
|
||||
func (mp *ManagedProc) run(ctx context.Context) {
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if mp.Cfg.BeforeStart != nil {
|
||||
if err := mp.Cfg.BeforeStart(); err != nil {
|
||||
LogMsg(mp.Cfg.Tag, ErrorColor.Sprintf("Pre-start failed: %v", err))
|
||||
if !mp.Cfg.Restart || ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
LogMsgf(mp.Cfg.Tag, "Starting: %s", DimColor.Sprint(strings.Join(mp.Cfg.Cmd, " ")))
|
||||
|
||||
cmd := exec.Command(mp.Cfg.Cmd[0], mp.Cfg.Cmd[1:]...)
|
||||
cmd.Dir = mp.Cfg.Dir
|
||||
if mp.Cfg.Env != nil {
|
||||
cmd.Env = mp.Cfg.Env
|
||||
}
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
stdout, _ := cmd.StdoutPipe()
|
||||
stderr, _ := cmd.StderrPipe()
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
LogMsg(mp.Cfg.Tag, ErrorColor.Sprintf("Error starting: %v", err))
|
||||
if !mp.Cfg.Restart || ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
exited := make(chan struct{})
|
||||
mp.mu.Lock()
|
||||
mp.proc = cmd.Process
|
||||
mp.exited = exited
|
||||
cancelled := ctx.Err() != nil
|
||||
mp.mu.Unlock()
|
||||
if cancelled {
|
||||
_ = syscall.Kill(-cmd.Process.Pid, syscall.SIGTERM)
|
||||
}
|
||||
|
||||
var streamWg sync.WaitGroup
|
||||
streamWg.Add(2)
|
||||
go func() { defer streamWg.Done(); StreamLines(stdout, mp.Cfg.Tag) }()
|
||||
go func() { defer streamWg.Done(); StreamLines(stderr, mp.Cfg.Tag) }()
|
||||
|
||||
streamWg.Wait()
|
||||
_ = cmd.Wait()
|
||||
|
||||
mp.mu.Lock()
|
||||
mp.proc = nil
|
||||
close(mp.exited)
|
||||
mp.mu.Unlock()
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
exitCode := cmd.ProcessState.ExitCode()
|
||||
if exitCode != 0 {
|
||||
LogMsg(mp.Cfg.Tag, ErrorColor.Sprintf("Process exited with code %d", exitCode))
|
||||
} else {
|
||||
LogMsg(mp.Cfg.Tag, "Process exited cleanly")
|
||||
}
|
||||
|
||||
if !mp.Cfg.Restart {
|
||||
return
|
||||
}
|
||||
|
||||
LogMsg(mp.Cfg.Tag, WarnColor.Sprint("Restarting in 1s..."))
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mp *ManagedProc) Stop() {
|
||||
mp.cancel()
|
||||
mp.mu.Lock()
|
||||
proc := mp.proc
|
||||
exited := mp.exited
|
||||
mp.mu.Unlock()
|
||||
|
||||
if proc != nil {
|
||||
_ = syscall.Kill(-proc.Pid, syscall.SIGTERM)
|
||||
select {
|
||||
case <-exited:
|
||||
case <-time.After(5 * time.Second):
|
||||
_ = syscall.Kill(-proc.Pid, syscall.SIGKILL)
|
||||
select {
|
||||
case <-exited:
|
||||
case <-time.After(3 * time.Second):
|
||||
LogMsg(mp.Cfg.Tag, WarnColor.Sprint("Process did not exit after SIGKILL, giving up"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mp *ManagedProc) ForceKill() {
|
||||
mp.mu.Lock()
|
||||
proc := mp.proc
|
||||
mp.mu.Unlock()
|
||||
|
||||
if proc != nil {
|
||||
_ = syscall.Kill(-proc.Pid, syscall.SIGKILL)
|
||||
}
|
||||
}
|
||||
66
packages/browseros-agent/tools/alpha/proc/ports.go
Normal file
66
packages/browseros-agent/tools/alpha/proc/ports.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package proc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"browseros-alpha/config"
|
||||
)
|
||||
|
||||
func ResolvePorts(start config.Ports) (config.Ports, bool, error) {
|
||||
used := map[int]bool{}
|
||||
cdp, err := resolvePort("CDP", start.CDP, used)
|
||||
if err != nil {
|
||||
return config.Ports{}, false, err
|
||||
}
|
||||
used[cdp] = true
|
||||
server, err := resolvePort("server", start.Server, used)
|
||||
if err != nil {
|
||||
return config.Ports{}, false, err
|
||||
}
|
||||
used[server] = true
|
||||
extension, err := resolvePort("extension", start.Extension, used)
|
||||
if err != nil {
|
||||
return config.Ports{}, false, err
|
||||
}
|
||||
resolved := config.Ports{CDP: cdp, Server: server, Extension: extension}
|
||||
return resolved, resolved != start, nil
|
||||
}
|
||||
|
||||
func resolvePort(name string, start int, used map[int]bool) (int, error) {
|
||||
if start <= 0 || start > 65535 {
|
||||
return 0, fmt.Errorf("invalid %s port: %d", name, start)
|
||||
}
|
||||
for port := start; port <= 65535; port++ {
|
||||
if used[port] {
|
||||
continue
|
||||
}
|
||||
if isPortAvailable(port) {
|
||||
return port, nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("no available %s port at or above %d%s", name, start, pidSuffix(start))
|
||||
}
|
||||
|
||||
func isPortAvailable(port int) bool {
|
||||
ln, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
ln.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
func pidSuffix(port int) string {
|
||||
out, err := exec.Command("lsof", "-ti", fmt.Sprintf(":%d", port)).Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
pids := strings.TrimSpace(string(out))
|
||||
if pids == "" {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf(" (pids: %s)", strings.ReplaceAll(pids, "\n", ","))
|
||||
}
|
||||
51
packages/browseros-agent/tools/alpha/proc/ports_test.go
Normal file
51
packages/browseros-agent/tools/alpha/proc/ports_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package proc
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"browseros-alpha/config"
|
||||
)
|
||||
|
||||
func TestResolvePortsIncrementsBusyPort(t *testing.T) {
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer ln.Close()
|
||||
port := ln.Addr().(*net.TCPAddr).Port
|
||||
got, changed, err := ResolvePorts(config.Ports{CDP: port, Server: 9115, Extension: 9315})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !changed {
|
||||
t.Fatal("expected changed ports")
|
||||
}
|
||||
if got.CDP == port {
|
||||
t.Fatalf("expected CDP port to move off busy port: %+v", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolvePortsAvoidsDuplicates(t *testing.T) {
|
||||
base := freePort(t)
|
||||
got, changed, err := ResolvePorts(config.Ports{CDP: base, Server: base, Extension: base})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !changed {
|
||||
t.Fatal("expected changed ports")
|
||||
}
|
||||
if got.CDP == got.Server || got.Server == got.Extension || got.CDP == got.Extension {
|
||||
t.Fatalf("ports must be distinct: %+v", got)
|
||||
}
|
||||
}
|
||||
|
||||
func freePort(t *testing.T) int {
|
||||
t.Helper()
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer ln.Close()
|
||||
return ln.Addr().(*net.TCPAddr).Port
|
||||
}
|
||||
38
packages/browseros-agent/tools/alpha/proc/run.go
Normal file
38
packages/browseros-agent/tools/alpha/proc/run.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package proc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func RunBlocking(ctx context.Context, dir string, t Tag, args ...string) error {
|
||||
return runBlocking(ctx, dir, nil, t, args...)
|
||||
}
|
||||
|
||||
func RunBlockingWithEnv(ctx context.Context, dir string, env []string, t Tag, args ...string) error {
|
||||
return runBlocking(ctx, dir, env, t, args...)
|
||||
}
|
||||
|
||||
func runBlocking(ctx context.Context, dir string, env []string, t Tag, args ...string) error {
|
||||
cmd := exec.CommandContext(ctx, args[0], args[1:]...)
|
||||
cmd.Dir = dir
|
||||
if env != nil {
|
||||
cmd.Env = env
|
||||
}
|
||||
|
||||
stdout, _ := cmd.StdoutPipe()
|
||||
stderr, _ := cmd.StderrPipe()
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go func() { defer wg.Done(); StreamLines(stdout, t) }()
|
||||
go func() { defer wg.Done(); StreamLines(stderr, t) }()
|
||||
wg.Wait()
|
||||
|
||||
return cmd.Wait()
|
||||
}
|
||||
212
packages/browseros-agent/tools/alpha/profile/import.go
Normal file
212
packages/browseros-agent/tools/alpha/profile/import.go
Normal file
@@ -0,0 +1,212 @@
|
||||
package profile
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"browseros-alpha/internal/fspath"
|
||||
)
|
||||
|
||||
type ImportConfig struct {
|
||||
SourceUserDataDir string
|
||||
SourceProfileDir string
|
||||
DevUserDataDir string
|
||||
DevProfileDir string
|
||||
}
|
||||
|
||||
var profileAllowlist = []string{
|
||||
"Extensions",
|
||||
"Local Extension Settings",
|
||||
"Login Data",
|
||||
"Login Data For Account",
|
||||
"Cookies",
|
||||
"Cookies-journal",
|
||||
"Bookmarks",
|
||||
"Preferences",
|
||||
"Web Data",
|
||||
"History",
|
||||
}
|
||||
|
||||
func Import(cfg ImportConfig) error {
|
||||
if cfg.SourceUserDataDir == "" || cfg.SourceProfileDir == "" || cfg.DevUserDataDir == "" || cfg.DevProfileDir == "" {
|
||||
return fmt.Errorf("source and dev profile paths are required")
|
||||
}
|
||||
if fspath.IsSameOrChild(cfg.DevUserDataDir, cfg.SourceUserDataDir) {
|
||||
return fmt.Errorf("dev user-data dir must not equal or live inside source user-data dir")
|
||||
}
|
||||
sourceProfile := filepath.Join(cfg.SourceUserDataDir, cfg.SourceProfileDir)
|
||||
if info, err := os.Stat(sourceProfile); err != nil || !info.IsDir() {
|
||||
return fmt.Errorf("source profile not found: %s", sourceProfile)
|
||||
}
|
||||
if err := os.RemoveAll(cfg.DevUserDataDir); err != nil {
|
||||
return err
|
||||
}
|
||||
devProfile := filepath.Join(cfg.DevUserDataDir, cfg.DevProfileDir)
|
||||
if err := os.MkdirAll(devProfile, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
localStatePath := filepath.Join(cfg.DevUserDataDir, "Local State")
|
||||
if err := copyIfExists(filepath.Join(cfg.SourceUserDataDir, "Local State"), localStatePath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := patchLocalState(localStatePath, cfg.SourceProfileDir, cfg.DevProfileDir); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, name := range profileAllowlist {
|
||||
src := filepath.Join(sourceProfile, name)
|
||||
dst := filepath.Join(devProfile, name)
|
||||
if err := copyIfExists(src, dst); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := patchPreferences(filepath.Join(devProfile, "Preferences")); err != nil {
|
||||
return err
|
||||
}
|
||||
return CleanupSingletons(cfg.DevUserDataDir)
|
||||
}
|
||||
|
||||
func CleanupSingletons(userDataDir string) error {
|
||||
entries, err := filepath.Glob(filepath.Join(userDataDir, "Singleton*"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if err := os.RemoveAll(entry); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyIfExists(src string, dst string) error {
|
||||
info, err := os.Stat(src)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return copyDir(src, dst)
|
||||
}
|
||||
return copyFile(src, dst, info.Mode())
|
||||
}
|
||||
|
||||
func copyDir(src string, dst string) error {
|
||||
return filepath.WalkDir(src, func(path string, d os.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := filepath.Rel(src, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
target := filepath.Join(dst, rel)
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return os.MkdirAll(target, info.Mode())
|
||||
}
|
||||
return copyFile(path, target, info.Mode())
|
||||
})
|
||||
}
|
||||
|
||||
func copyFile(src string, dst string, mode os.FileMode) error {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
out, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(out, in); err != nil {
|
||||
out.Close()
|
||||
return err
|
||||
}
|
||||
return out.Close()
|
||||
}
|
||||
|
||||
func patchPreferences(path string) error {
|
||||
data, err := os.ReadFile(path)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var prefs map[string]any
|
||||
if err := json.Unmarshal(data, &prefs); err != nil {
|
||||
return nil
|
||||
}
|
||||
profile, ok := prefs["profile"].(map[string]any)
|
||||
if !ok {
|
||||
profile = map[string]any{}
|
||||
prefs["profile"] = profile
|
||||
}
|
||||
profile["exit_type"] = "Normal"
|
||||
profile["exited_cleanly"] = true
|
||||
out, err := json.Marshal(prefs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(path, out, 0644)
|
||||
}
|
||||
|
||||
func patchLocalState(path string, sourceProfileDir string, devProfileDir string) error {
|
||||
data, err := os.ReadFile(path)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var state map[string]any
|
||||
if err := json.Unmarshal(data, &state); err != nil {
|
||||
return nil
|
||||
}
|
||||
profile := ensureObject(state, "profile")
|
||||
selected := selectedProfileInfo(profile, sourceProfileDir)
|
||||
profile["info_cache"] = map[string]any{devProfileDir: selected}
|
||||
profile["last_used"] = devProfileDir
|
||||
profile["last_active_profiles"] = []string{devProfileDir}
|
||||
profile["profiles_order"] = []string{devProfileDir}
|
||||
profile["show_picker_on_startup"] = false
|
||||
profile["picker_shown"] = true
|
||||
out, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(path, out, 0644)
|
||||
}
|
||||
|
||||
func ensureObject(parent map[string]any, key string) map[string]any {
|
||||
value, ok := parent[key].(map[string]any)
|
||||
if ok {
|
||||
return value
|
||||
}
|
||||
value = map[string]any{}
|
||||
parent[key] = value
|
||||
return value
|
||||
}
|
||||
|
||||
func selectedProfileInfo(profile map[string]any, sourceProfileDir string) map[string]any {
|
||||
infoCache, ok := profile["info_cache"].(map[string]any)
|
||||
if !ok {
|
||||
return map[string]any{"name": sourceProfileDir}
|
||||
}
|
||||
selected, ok := infoCache[sourceProfileDir].(map[string]any)
|
||||
if !ok {
|
||||
return map[string]any{"name": sourceProfileDir}
|
||||
}
|
||||
return selected
|
||||
}
|
||||
163
packages/browseros-agent/tools/alpha/profile/import_test.go
Normal file
163
packages/browseros-agent/tools/alpha/profile/import_test.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package profile
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestImportCopiesAllowlistAndLocalState(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
sourceUser := filepath.Join(root, "source")
|
||||
sourceProfile := filepath.Join(sourceUser, "Profile 25")
|
||||
devUser := filepath.Join(root, "dev")
|
||||
mustWrite(t, filepath.Join(sourceUser, "Local State"), `{
|
||||
"os_crypt": {"encrypted_key": "abc"},
|
||||
"profile": {
|
||||
"info_cache": {
|
||||
"Default": {"name": "Personal", "user_name": "me@example.com"},
|
||||
"Profile 25": {"name": "Sam", "user_name": "sam@example.test"}
|
||||
},
|
||||
"last_used": "Default",
|
||||
"last_active_profiles": ["Default", "Profile 25"],
|
||||
"profiles_order": ["Default", "Profile 25"],
|
||||
"show_picker_on_startup": true,
|
||||
"picker_shown": true
|
||||
}
|
||||
}`)
|
||||
mustWrite(t, filepath.Join(sourceProfile, "Bookmarks"), "bookmarks")
|
||||
mustWrite(t, filepath.Join(sourceProfile, "Preferences"), `{"profile":{"exit_type":"Crashed","exited_cleanly":false}}`)
|
||||
mustWrite(t, filepath.Join(sourceProfile, "Cache/junk"), "cache")
|
||||
mustWrite(t, filepath.Join(sourceProfile, "Extensions/ext/manifest.json"), "{}")
|
||||
|
||||
err := Import(ImportConfig{
|
||||
SourceUserDataDir: sourceUser,
|
||||
SourceProfileDir: "Profile 25",
|
||||
DevUserDataDir: devUser,
|
||||
DevProfileDir: "Default",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assertImportedLocalState(t, filepath.Join(devUser, "Local State"))
|
||||
assertFile(t, filepath.Join(devUser, "Default", "Bookmarks"), "bookmarks")
|
||||
assertMissing(t, filepath.Join(devUser, "Default", "Cache"))
|
||||
assertFileExists(t, filepath.Join(devUser, "Default", "Extensions/ext/manifest.json"))
|
||||
prefs, err := os.ReadFile(filepath.Join(devUser, "Default", "Preferences"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(prefs) != `{"profile":{"exit_type":"Normal","exited_cleanly":true}}` {
|
||||
t.Fatalf("preferences not patched: %s", string(prefs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestImportRejectsDangerousDevDir(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
err := Import(ImportConfig{
|
||||
SourceUserDataDir: root,
|
||||
SourceProfileDir: "Default",
|
||||
DevUserDataDir: filepath.Join(root, "child"),
|
||||
DevProfileDir: "Default",
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func assertImportedLocalState(t *testing.T, path string) {
|
||||
t.Helper()
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var state map[string]any
|
||||
if err := json.Unmarshal(data, &state); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
osCrypt := state["os_crypt"].(map[string]any)
|
||||
if osCrypt["encrypted_key"] != "abc" {
|
||||
t.Fatalf("os_crypt not preserved: %#v", osCrypt)
|
||||
}
|
||||
profile := state["profile"].(map[string]any)
|
||||
infoCache := profile["info_cache"].(map[string]any)
|
||||
if len(infoCache) != 1 {
|
||||
t.Fatalf("expected one dev profile in info_cache, got %#v", infoCache)
|
||||
}
|
||||
selected := infoCache["Default"].(map[string]any)
|
||||
if selected["name"] != "Sam" || selected["user_name"] != "sam@example.test" {
|
||||
t.Fatalf("selected profile metadata not remapped: %#v", selected)
|
||||
}
|
||||
if profile["last_used"] != "Default" {
|
||||
t.Fatalf("last_used mismatch: %#v", profile["last_used"])
|
||||
}
|
||||
assertStringList(t, profile["last_active_profiles"], []string{"Default"})
|
||||
assertStringList(t, profile["profiles_order"], []string{"Default"})
|
||||
if profile["show_picker_on_startup"] != false {
|
||||
t.Fatalf("profile picker not disabled: %#v", profile["show_picker_on_startup"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanupSingletons(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
mustWrite(t, filepath.Join(dir, "SingletonLock"), "lock")
|
||||
mustWrite(t, filepath.Join(dir, "SingletonCookie"), "cookie")
|
||||
if err := CleanupSingletons(dir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertMissing(t, filepath.Join(dir, "SingletonLock"))
|
||||
assertMissing(t, filepath.Join(dir, "SingletonCookie"))
|
||||
}
|
||||
|
||||
func assertStringList(t *testing.T, got any, want []string) {
|
||||
t.Helper()
|
||||
values, ok := got.([]any)
|
||||
if !ok {
|
||||
t.Fatalf("got %#v, want string list", got)
|
||||
}
|
||||
if len(values) != len(want) {
|
||||
t.Fatalf("got %#v, want %#v", got, want)
|
||||
}
|
||||
for i, value := range values {
|
||||
if value != want[i] {
|
||||
t.Fatalf("got %#v, want %#v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mustWrite(t *testing.T, path string, content string) {
|
||||
t.Helper()
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func assertFile(t *testing.T, path string, want string) {
|
||||
t.Helper()
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(data) != want {
|
||||
t.Fatalf("%s got %q want %q", path, string(data), want)
|
||||
}
|
||||
}
|
||||
|
||||
func assertFileExists(t *testing.T, path string) {
|
||||
t.Helper()
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
t.Fatalf("expected %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func assertMissing(t *testing.T, path string) {
|
||||
t.Helper()
|
||||
if _, err := os.Stat(path); !os.IsNotExist(err) {
|
||||
t.Fatalf("expected missing %s, err=%v", path, err)
|
||||
}
|
||||
}
|
||||
53
packages/browseros-agent/tools/alpha/profile/local_state.go
Normal file
53
packages/browseros-agent/tools/alpha/profile/local_state.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package profile
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type BrowserProfile struct {
|
||||
Dir string
|
||||
Name string
|
||||
Email string
|
||||
}
|
||||
|
||||
type localState struct {
|
||||
Profile struct {
|
||||
InfoCache map[string]struct {
|
||||
Name string `json:"name"`
|
||||
UserName string `json:"user_name"`
|
||||
} `json:"info_cache"`
|
||||
} `json:"profile"`
|
||||
}
|
||||
|
||||
func ReadProfiles(userDataDir string) ([]BrowserProfile, error) {
|
||||
data, err := os.ReadFile(filepath.Join(userDataDir, "Local State"))
|
||||
if err != nil {
|
||||
return []BrowserProfile{{Dir: "Default", Name: "Default"}}, nil
|
||||
}
|
||||
var state localState
|
||||
if err := json.Unmarshal(data, &state); err != nil {
|
||||
return []BrowserProfile{{Dir: "Default", Name: "Default"}}, nil
|
||||
}
|
||||
if len(state.Profile.InfoCache) == 0 {
|
||||
return []BrowserProfile{{Dir: "Default", Name: "Default"}}, nil
|
||||
}
|
||||
profiles := make([]BrowserProfile, 0, len(state.Profile.InfoCache))
|
||||
for dir, meta := range state.Profile.InfoCache {
|
||||
name := meta.Name
|
||||
if name == "" {
|
||||
name = dir
|
||||
}
|
||||
profiles = append(profiles, BrowserProfile{
|
||||
Dir: dir,
|
||||
Name: name,
|
||||
Email: meta.UserName,
|
||||
})
|
||||
}
|
||||
sort.Slice(profiles, func(i, j int) bool {
|
||||
return profiles[i].Dir < profiles[j].Dir
|
||||
})
|
||||
return profiles, nil
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
package profile
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadProfilesFromLocalState(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
localState := `{
|
||||
"profile": {
|
||||
"info_cache": {
|
||||
"Default": {"name": "Personal", "user_name": "me@example.com"},
|
||||
"Profile 25": {"name": "Work", "user_name": "work@example.com"}
|
||||
}
|
||||
}
|
||||
}`
|
||||
if err := os.WriteFile(filepath.Join(dir, "Local State"), []byte(localState), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
profiles, err := ReadProfiles(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(profiles) != 2 {
|
||||
t.Fatalf("expected 2 profiles, got %d", len(profiles))
|
||||
}
|
||||
if profiles[0].Dir != "Default" || profiles[1].Dir != "Profile 25" {
|
||||
t.Fatalf("profiles not sorted by dir: %+v", profiles)
|
||||
}
|
||||
if profiles[1].Name != "Work" || profiles[1].Email != "work@example.com" {
|
||||
t.Fatalf("profile metadata mismatch: %+v", profiles[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadProfilesFallbackDefault(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
profiles, err := ReadProfiles(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(profiles) != 1 || profiles[0].Dir != "Default" {
|
||||
t.Fatalf("fallback mismatch: %+v", profiles)
|
||||
}
|
||||
}
|
||||
@@ -30,7 +30,7 @@ MACOS_SERVER_BINARIES: Dict[str, SignSpec] = {
|
||||
),
|
||||
"bun": SignSpec("bun", "runtime", "browseros-executable-entitlements.plist"),
|
||||
"rg": SignSpec("rg", "runtime"),
|
||||
"limactl": SignSpec("limactl", "runtime"),
|
||||
"limactl": SignSpec("limactl", "runtime", "lima-vz-entitlements.plist"),
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for the shared server-binary sign table."""
|
||||
|
||||
import plistlib
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
@@ -33,6 +34,16 @@ class MacosServerBinariesTest(unittest.TestCase):
|
||||
self.assertEqual(spec.identifier_suffix, "limactl")
|
||||
self.assertIsNone(macos_sign_spec_for(Path("/x/not_a_known_binary")))
|
||||
|
||||
def test_limactl_uses_vz_entitlement(self):
|
||||
spec = macos_sign_spec_for(Path("/x/limactl"))
|
||||
assert spec is not None
|
||||
self.assertEqual(spec.entitlements, "lima-vz-entitlements.plist")
|
||||
|
||||
entitlements_name = spec.entitlements
|
||||
assert entitlements_name is not None
|
||||
entitlements = plistlib.loads((ENTITLEMENTS_DIR / entitlements_name).read_bytes())
|
||||
self.assertIs(entitlements.get("com.apple.security.virtualization"), True)
|
||||
|
||||
def test_matches_lima_bundle_layout(self):
|
||||
keys = set(MACOS_SERVER_BINARIES.keys())
|
||||
self.assertIn("limactl", keys)
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>com.apple.security.virtualization</key>
|
||||
<true/>
|
||||
</dict>
|
||||
</plist>
|
||||
Reference in New Issue
Block a user