Compare commits

..

4 Commits

Author SHA1 Message Date
Nikhil Sonti
72a2cc1d8a fix: enable alpha capabilities in development 2026-04-15 15:23:05 -07:00
Nikhil Sonti
fdee7f91f2 fix: gate agents page behind alpha 2026-04-15 14:58:03 -07:00
Nikhil Sonti
90691c03b9 fix: provide chat session for non-alpha home 2026-04-15 14:56:24 -07:00
Nikhil Sonti
fa07dc2a22 feat: gate agent alpha UI behind capabilities 2026-04-15 14:42:52 -07:00
144 changed files with 7046 additions and 7643 deletions

View File

@@ -30,54 +30,15 @@ jobs:
fail-fast: false
matrix:
include:
- suite: server-agent
command: (cd apps/server && bun run test:agent)
junit_path: test-results/server-agent.xml
needs_browser: false
- suite: server-api
command: (cd apps/server && bun run test:api)
junit_path: test-results/server-api.xml
needs_browser: false
- suite: server-skills
command: (cd apps/server && bun run test:skills)
junit_path: test-results/server-skills.xml
needs_browser: false
- suite: server-tools
command: (cd apps/server && bun run test:tools)
junit_path: test-results/server-tools.xml
needs_browser: true
- suite: server-browser
command: (cd apps/server && bun run test:browser)
junit_path: test-results/server-browser.xml
needs_browser: false
- suite: server-integration
command: (cd apps/server && bun run test:integration)
junit_path: test-results/server-integration.xml
needs_browser: true
- suite: server-sdk
command: (cd apps/server && bun run test:sdk)
junit_path: test-results/server-sdk.xml
needs_browser: true
- suite: server-root
command: (cd apps/server && bun run test:root)
junit_path: test-results/server-root.xml
needs_browser: false
- suite: agent
command: bun run test:agent
junit_path: test-results/agent.xml
needs_browser: false
- suite: eval
command: bun run test:eval
junit_path: test-results/eval.xml
needs_browser: false
- suite: agent-sdk
command: bun run test:agent-sdk
junit_path: test-results/agent-sdk.xml
needs_browser: false
- suite: build
command: bun run test:build
junit_path: test-results/build.xml
needs_browser: false
- suite: tools
test_path: tests/tools
junit_path: test-results/tools.xml
- suite: integration
test_path: tests/server.integration.test.ts
junit_path: test-results/integration.xml
- suite: sdk
test_path: tests/sdk
junit_path: test-results/sdk.xml
steps:
- name: Checkout code
@@ -90,7 +51,6 @@ jobs:
run: bun ci
- name: Resolve BrowserOS cache key
if: matrix.needs_browser == true
id: browseros-cache-key
run: |
set -euo pipefail
@@ -105,7 +65,6 @@ jobs:
echo "key=browseros-appimage-${{ runner.os }}-$cache_key" >> "$GITHUB_OUTPUT"
- name: Restore BrowserOS cache
if: matrix.needs_browser == true
id: browseros-cache
uses: actions/cache@v4
with:
@@ -113,14 +72,13 @@ jobs:
key: ${{ steps.browseros-cache-key.outputs.key }}
- name: Download BrowserOS
if: matrix.needs_browser == true && steps.browseros-cache.outputs.cache-hit != 'true'
if: steps.browseros-cache.outputs.cache-hit != 'true'
run: |
mkdir -p .ci/bin
curl -fsSL "$BROWSEROS_APPIMAGE_URL" -o .ci/bin/BrowserOS.AppImage
chmod +x .ci/bin/BrowserOS.AppImage
- name: Prepare BrowserOS wrapper
if: matrix.needs_browser == true
run: |
mkdir -p .ci/bin
cat > .ci/bin/browseros <<'EOF'
@@ -141,23 +99,16 @@ jobs:
BROWSEROS_BINARY: ${{ github.workspace }}/packages/browseros-agent/.ci/bin/browseros
BROWSEROS_TEST_HEADLESS: "true"
BROWSEROS_TEST_EXTRA_ARGS: --no-sandbox --disable-dev-shm-usage
BROWSEROS_JUNIT_PATH: ${{ github.workspace }}/packages/browseros-agent/${{ matrix.junit_path }}
run: |
set +e
mkdir -p test-results
${{ matrix.command }}
cd apps/server
bun run test:cleanup
bun --env-file=.env.development test "${{ matrix.test_path }}" --reporter=junit --reporter-outfile="../../${{ matrix.junit_path }}"
exit_code=$?
cd ../..
if [ ! -f "${{ matrix.junit_path }}" ]; then
if [ "$exit_code" = "0" ]; then
cat > "${{ matrix.junit_path }}" <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0">
<testsuite name="${{ matrix.suite }}" tests="0" failures="0">
</testsuite>
</testsuites>
EOF
else
cat > "${{ matrix.junit_path }}" <<EOF
cat > "${{ matrix.junit_path }}" <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="1">
<testsuite name="${{ matrix.suite }}" tests="1" failures="1">
@@ -167,7 +118,6 @@ jobs:
</testsuite>
</testsuites>
EOF
fi
fi
echo "exit_code=$exit_code" >> "$GITHUB_OUTPUT"
@@ -189,124 +139,3 @@ jobs:
echo "See the uploaded \`junit-${{ matrix.suite }}\` artifact for details." >> "$GITHUB_STEP_SUMMARY"
exit 1
fi
comment:
name: PR test summary
needs: test
if: >-
always()
&& github.event_name == 'pull_request'
&& github.event.pull_request.head.repo.full_name == github.repository
runs-on: ubuntu-latest
permissions:
pull-requests: write
actions: read
steps:
- name: Download JUnit artifacts
uses: actions/download-artifact@v4
continue-on-error: true
with:
path: junit
pattern: junit-*
- name: Build comment body
run: |
python3 <<'PY'
import glob, os, xml.etree.ElementTree as ET
run_url = f"{os.environ['GITHUB_SERVER_URL']}/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}"
marker = "<!-- browseros-agent-tests-summary -->"
suites = []
failed_cases = []
total_tests = total_failed = total_skipped = 0
for xml_path in sorted(glob.glob("junit/junit-*/*.xml")):
suite_name = os.path.basename(os.path.dirname(xml_path)).removeprefix("junit-")
try:
root = ET.parse(xml_path).getroot()
except ET.ParseError:
suites.append({"name": suite_name, "passed": 0, "failed": 1, "skipped": 0, "total": 1})
total_tests += 1
total_failed += 1
failed_cases.append((suite_name, "(could not parse junit XML)"))
continue
testsuites = root.findall("testsuite") if root.tag == "testsuites" else [root]
s_tests = s_fail = s_err = s_skip = 0
for ts in testsuites:
s_tests += int(ts.get("tests") or 0)
s_fail += int(ts.get("failures") or 0)
s_err += int(ts.get("errors") or 0)
s_skip += int(ts.get("skipped") or 0)
for tc in ts.iter("testcase"):
if tc.find("failure") is not None or tc.find("error") is not None:
cls = tc.get("classname") or ""
name = tc.get("name") or "(unnamed)"
label = f"{cls} > {name}" if cls else name
failed_cases.append((suite_name, label))
s_failed = s_fail + s_err
s_passed = max(s_tests - s_failed - s_skip, 0)
suites.append({"name": suite_name, "passed": s_passed, "failed": s_failed, "skipped": s_skip, "total": s_tests})
total_tests += s_tests
total_failed += s_failed
total_skipped += s_skip
total_passed = max(total_tests - total_failed - total_skipped, 0)
if total_tests == 0:
header = "## :warning: No test results were produced"
elif total_failed == 0:
header = f"## :white_check_mark: Tests passed — {total_passed}/{total_tests}"
else:
header = f"## :x: Tests failed — {total_failed}/{total_tests} failed"
lines = [marker, header, ""]
if suites:
lines.append("| Suite | Passed | Failed | Skipped |")
lines.append("|-------|--------|--------|---------|")
for s in suites:
icon = ":white_check_mark:" if s["failed"] == 0 and s["total"] > 0 else ":warning:" if s["total"] == 0 else ":x:"
lines.append(f"| {icon} `{s['name']}` | {s['passed']}/{s['total']} | {s['failed']} | {s['skipped']} |")
if failed_cases:
lines += ["", "<details open>", "<summary><b>Failed tests</b></summary>", ""]
for suite_name, label in failed_cases[:50]:
lines.append(f"- **{suite_name}** — `{label}`")
if len(failed_cases) > 50:
lines.append(f"- …and {len(failed_cases) - 50} more")
lines += ["", "</details>"]
lines += ["", f"[View workflow run]({run_url})"]
with open("comment.md", "w") as f:
f.write("\n".join(lines) + "\n")
PY
- name: Upsert sticky PR comment
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const body = fs.readFileSync('comment.md', 'utf8');
const marker = '<!-- browseros-agent-tests-summary -->';
const { owner, repo } = context.repo;
const issue_number = context.payload.pull_request.number;
const triggerSha = context.payload.pull_request.head.sha;
const { data: pr } = await github.rest.pulls.get({ owner, repo, pull_number: issue_number });
if (pr.head.sha !== triggerSha) {
core.info(`PR head has moved (${pr.head.sha} vs ${triggerSha}) — skipping stale comment.`);
return;
}
const comments = await github.paginate(github.rest.issues.listComments, {
owner, repo, issue_number, per_page: 100,
});
const existing = comments.find(c => c.body && c.body.includes(marker));
if (existing) {
await github.rest.issues.updateComment({ owner, repo, comment_id: existing.id, body });
} else {
await github.rest.issues.createComment({ owner, repo, issue_number, body });
}

2
.gitignore vendored
View File

@@ -1,6 +1,4 @@
**/.DS_Store
**.auctor/**
.auctor.json
.gcs_entries
**/dmg
**/env

View File

@@ -1,3 +1,7 @@
import type {
BrowserOSCustomRoleInput,
BrowserOSRoleBoundary,
} from '@browseros/shared/types/role-aware-agents'
import {
AlertCircle,
Cpu,
@@ -31,18 +35,55 @@ import {
SelectTrigger,
SelectValue,
} from '@/components/ui/select'
import { Textarea } from '@/components/ui/textarea'
import { useLlmProviders } from '@/lib/llm-providers/useLlmProviders'
import { AgentChat } from './AgentChat'
import { AgentTerminal } from './AgentTerminal'
import { getOpenClawSupportedProviders } from './openclaw-supported-providers'
import {
type AgentEntry,
type OpenClawStatus,
type RoleTemplateSummary,
useOpenClawAgents,
useOpenClawMutations,
useOpenClawRoles,
useOpenClawStatus,
} from './useOpenClaw'
const OAUTH_ONLY_TYPES = new Set(['chatgpt-pro', 'github-copilot', 'qwen-code'])
const CUSTOM_ROLE_VALUE = '__custom__'
const PLAIN_AGENT_VALUE = '__plain__'
type AgentCreationMode = 'builtin' | 'custom' | 'plain'
function createDefaultCustomRoleBoundaries(): BrowserOSRoleBoundary[] {
return [
{
key: 'draft-external-comms',
label: 'Draft external communications',
description: 'May prepare outbound messages for review.',
defaultMode: 'allow',
},
{
key: 'send-external-comms',
label: 'Send external communications',
description: 'Should require approval before sending messages.',
defaultMode: 'ask',
},
{
key: 'calendar-mutations',
label: 'Modify calendar events',
description: 'Should ask before moving or creating calendar events.',
defaultMode: 'ask',
},
]
}
function parseCommaSeparatedList(input: string): string[] {
return input
.split(',')
.map((item) => item.trim())
.filter(Boolean)
}
const CONTROL_PLANE_COPY: Record<
OpenClawStatus['controlPlaneStatus'],
{
@@ -240,6 +281,7 @@ export const AgentsPage: FC = () => {
loading: agentsLoading,
error: agentsError,
} = useOpenClawAgents(agentsQueryEnabled)
const { roles, loading: rolesLoading, error: rolesError } = useOpenClawRoles()
const {
setupOpenClaw,
createAgent,
@@ -257,15 +299,44 @@ export const AgentsPage: FC = () => {
const [setupOpen, setSetupOpen] = useState(false)
const [setupProviderId, setSetupProviderId] = useState('')
const [createOpen, setCreateOpen] = useState(false)
const [selectedRoleValue, setSelectedRoleValue] = useState<
| RoleTemplateSummary['id']
| typeof CUSTOM_ROLE_VALUE
| typeof PLAIN_AGENT_VALUE
>('chief-of-staff')
const [newName, setNewName] = useState('')
const [createProviderId, setCreateProviderId] = useState('')
const [customRole, setCustomRole] = useState<BrowserOSCustomRoleInput>({
name: '',
shortDescription: '',
longDescription: '',
recommendedApps: [],
boundaries: createDefaultCustomRoleBoundaries(),
})
const [chatAgent, setChatAgent] = useState<AgentEntry | null>(null)
const [showTerminal, setShowTerminal] = useState(false)
const [error, setError] = useState<string | null>(null)
const compatibleProviders = getOpenClawSupportedProviders(providers)
const compatibleProviders = providers.filter(
(provider) => provider.apiKey && !OAUTH_ONLY_TYPES.has(provider.type),
)
const creationMode: AgentCreationMode =
selectedRoleValue === CUSTOM_ROLE_VALUE
? 'custom'
: selectedRoleValue === PLAIN_AGENT_VALUE
? 'plain'
: 'builtin'
const isCustomRole = creationMode === 'custom'
const isPlainAgent = creationMode === 'plain'
const selectedRole =
creationMode === 'builtin'
? (roles.find((role) => role.id === selectedRoleValue) ??
roles[0] ??
null)
: null
useEffect(() => {
if (compatibleProviders.length === 0) return
@@ -284,13 +355,48 @@ export const AgentsPage: FC = () => {
defaultProviderId,
])
useEffect(() => {
if (!createOpen || roles.length === 0) return
const defaultRole = roles.find((role) => role.id === 'chief-of-staff')
const nextRole = defaultRole ?? roles[0]
setSelectedRoleValue((current) => {
if (current === CUSTOM_ROLE_VALUE || current === PLAIN_AGENT_VALUE)
return current
const hasCurrent = roles.some((role) => role.id === current)
return hasCurrent ? current : nextRole.id
})
setNewName((current) => current || nextRole.defaultAgentName)
}, [createOpen, roles])
useEffect(() => {
if (!createOpen) return
setNewName((current) => current || 'agent')
}, [createOpen])
if (isCustomRole) {
setNewName(
(current) =>
current || customRole.name.trim().toLowerCase().replace(/\s+/g, '-'),
)
return
}
if (isPlainAgent) {
setNewName((current) => current || 'agent')
return
}
if (selectedRole) {
setNewName((current) => current || selectedRole.defaultAgentName)
}
}, [createOpen, isCustomRole, isPlainAgent, customRole.name, selectedRole])
const inlineError =
error ?? statusError?.message ?? agentsError?.message ?? null
error ??
statusError?.message ??
agentsError?.message ??
rolesError?.message ??
null
const gatewayUiState = useMemo(() => {
if (!status) {
@@ -356,10 +462,34 @@ export const AgentsPage: FC = () => {
(item) => item.id === createProviderId,
)
const normalizedName = newName.trim().toLowerCase().replace(/\s+/g, '-')
const customRolePayload = isCustomRole
? {
...customRole,
name: customRole.name.trim(),
shortDescription: customRole.shortDescription.trim(),
longDescription: customRole.longDescription.trim(),
}
: undefined
if (
isCustomRole &&
(!customRolePayload?.name ||
!customRolePayload.shortDescription ||
!customRolePayload.longDescription)
) {
setError(
'Custom roles require a role name, short description, and long description.',
)
return
}
if (creationMode === 'builtin' && !selectedRole) return
await runWithErrorHandling(async () => {
await createAgent({
name: normalizedName,
roleId: creationMode === 'builtin' ? selectedRole?.id : undefined,
customRole: isCustomRole ? customRolePayload : undefined,
providerType: provider?.type,
providerName: provider?.name,
baseUrl: provider?.baseUrl,
@@ -368,6 +498,13 @@ export const AgentsPage: FC = () => {
})
setCreateOpen(false)
setNewName('')
setCustomRole({
name: '',
shortDescription: '',
longDescription: '',
recommendedApps: [],
boundaries: createDefaultCustomRoleBoundaries(),
})
})
}
@@ -651,10 +788,20 @@ export const AgentsPage: FC = () => {
<CardTitle className="text-base">
{agent.name}
</CardTitle>
{agent.role && (
<Badge variant="secondary">
{agent.role.roleName}
</Badge>
)}
</div>
<p className="font-mono text-muted-foreground text-xs">
{agent.workspace}
</p>
{agent.role && (
<p className="text-muted-foreground text-xs">
{agent.role.shortDescription}
</p>
)}
</div>
</div>
<div className="flex items-center gap-1">
@@ -721,6 +868,246 @@ export const AgentsPage: FC = () => {
<DialogTitle>Create Agent</DialogTitle>
</DialogHeader>
<div className="space-y-4 py-2">
<div className="space-y-2">
<label className="font-medium text-sm" htmlFor="agent-role">
Agent Role
</label>
<Select
value={selectedRoleValue}
onValueChange={(value) => {
if (value === CUSTOM_ROLE_VALUE) {
setSelectedRoleValue(CUSTOM_ROLE_VALUE)
setNewName(
customRole.name
.trim()
.toLowerCase()
.replace(/\s+/g, '-') || 'custom-agent',
)
return
}
if (value === PLAIN_AGENT_VALUE) {
setSelectedRoleValue(PLAIN_AGENT_VALUE)
setNewName('agent')
return
}
const role = roles.find((item) => item.id === value)
if (!role) return
setSelectedRoleValue(role.id)
setNewName(role.defaultAgentName)
}}
disabled={rolesLoading}
>
<SelectTrigger id="agent-role">
<SelectValue
placeholder={
rolesLoading ? 'Loading roles...' : 'Select a role'
}
/>
</SelectTrigger>
<SelectContent>
{roles.map((role) => (
<SelectItem key={role.id} value={role.id}>
{role.name}
</SelectItem>
))}
<SelectItem value={PLAIN_AGENT_VALUE}>Plain Agent</SelectItem>
<SelectItem value={CUSTOM_ROLE_VALUE}>Custom Role</SelectItem>
</SelectContent>
</Select>
{selectedRole && !isCustomRole && (
<Card>
<CardContent className="space-y-3 py-4">
<div>
<div className="font-medium text-sm">
{selectedRole.name}
</div>
<p className="text-muted-foreground text-xs">
{selectedRole.shortDescription}
</p>
</div>
<div>
<div className="font-medium text-xs">
Recommended Apps
</div>
<p className="text-muted-foreground text-xs">
{selectedRole.recommendedApps.join(', ')}
</p>
</div>
<div>
<div className="font-medium text-xs">
Default Boundaries
</div>
<ul className="space-y-1 text-muted-foreground text-xs">
{selectedRole.boundaries.map((boundary) => (
<li key={boundary.key}>
{boundary.label}: {boundary.defaultMode}
</li>
))}
</ul>
</div>
</CardContent>
</Card>
)}
{isPlainAgent && (
<Card>
<CardContent className="space-y-2 py-4">
<div className="font-medium text-sm">Plain Agent</div>
<p className="text-muted-foreground text-xs">
No role bootstrap or defaults. Intended for temporary
development and testing only.
</p>
</CardContent>
</Card>
)}
</div>
{isCustomRole && (
<Card>
<CardContent className="space-y-4 py-4">
<div className="space-y-2">
<label
htmlFor="custom-role-name"
className="font-medium text-sm"
>
Custom Role Name
</label>
<Input
id="custom-role-name"
value={customRole.name}
onChange={(event) => {
const name = event.target.value
setCustomRole((current) => ({ ...current, name }))
setNewName(
name.trim().toLowerCase().replace(/\s+/g, '-') ||
'custom-agent',
)
}}
placeholder="Board Prep Operator"
/>
</div>
<div className="space-y-2">
<label
htmlFor="custom-role-short-description"
className="font-medium text-sm"
>
Short Description
</label>
<Input
id="custom-role-short-description"
value={customRole.shortDescription}
onChange={(event) =>
setCustomRole((current) => ({
...current,
shortDescription: event.target.value,
}))
}
placeholder="Prepares executive briefs and weekly follow-ups."
/>
</div>
<div className="space-y-2">
<label
htmlFor="custom-role-long-description"
className="font-medium text-sm"
>
Long Description
</label>
<Textarea
id="custom-role-long-description"
value={customRole.longDescription}
onChange={(event) =>
setCustomRole((current) => ({
...current,
longDescription: event.target.value,
}))
}
placeholder="Describe the role, purpose, and what kinds of outcomes this agent should produce."
rows={4}
/>
</div>
<div className="space-y-2">
<label
htmlFor="custom-role-apps"
className="font-medium text-sm"
>
Recommended Apps
</label>
<Input
id="custom-role-apps"
value={customRole.recommendedApps.join(', ')}
onChange={(event) =>
setCustomRole((current) => ({
...current,
recommendedApps: parseCommaSeparatedList(
event.target.value,
),
}))
}
placeholder="gmail, slack, notion"
/>
<p className="text-muted-foreground text-xs">
Comma-separated. Used as role guidance only in this
milestone.
</p>
</div>
<div className="space-y-3">
<div>
<div className="font-medium text-sm">
Boundary Defaults
</div>
<p className="text-muted-foreground text-xs">
Set the starting behavior for common high-impact
actions.
</p>
</div>
{customRole.boundaries.map((boundary) => (
<div
key={boundary.key}
className="grid gap-2 rounded-lg border p-3"
>
<div>
<div className="font-medium text-sm">
{boundary.label}
</div>
<p className="text-muted-foreground text-xs">
{boundary.description}
</p>
</div>
<Select
value={boundary.defaultMode}
onValueChange={(value) =>
setCustomRole((current) => ({
...current,
boundaries: current.boundaries.map((item) =>
item.key === boundary.key
? {
...item,
defaultMode:
value as BrowserOSRoleBoundary['defaultMode'],
}
: item,
),
}))
}
>
<SelectTrigger>
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="allow">Allow</SelectItem>
<SelectItem value="ask">Ask</SelectItem>
<SelectItem value="block">Block</SelectItem>
</SelectContent>
</Select>
</div>
))}
</div>
</CardContent>
</Card>
)}
<div>
<label
htmlFor="agent-name"
@@ -754,8 +1141,10 @@ export const AgentsPage: FC = () => {
disabled={
!newName.trim() ||
creating ||
rolesLoading ||
!gatewayUiState.canManageAgents ||
compatibleProviders.length === 0
compatibleProviders.length === 0 ||
(creationMode === 'builtin' && !selectedRole)
}
className="w-full"
>

View File

@@ -1,23 +0,0 @@
import type { LlmProviderConfig, ProviderType } from '@/lib/llm-providers/types'
const OPENCLAW_SUPPORTED_PROVIDER_TYPES: ProviderType[] = [
'openrouter',
'openai',
'anthropic',
'moonshot',
]
export function isOpenClawSupportedProviderType(
providerType: ProviderType,
): boolean {
return OPENCLAW_SUPPORTED_PROVIDER_TYPES.includes(providerType)
}
export function getOpenClawSupportedProviders(
providers: LlmProviderConfig[],
): LlmProviderConfig[] {
return providers.filter(
(provider) =>
!!provider.apiKey && isOpenClawSupportedProviderType(provider.type),
)
}

View File

@@ -1,3 +1,7 @@
import type {
BrowserOSAgentRoleId,
BrowserOSCustomRoleInput,
} from '@browseros/shared/types/role-aware-agents'
import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
import { getAgentServerUrl } from '@/lib/browseros/helpers'
import { useAgentServerUrl } from '@/lib/browseros/useBrowserOSProviders'
@@ -7,6 +11,27 @@ export interface AgentEntry {
name: string
workspace: string
model?: unknown
role?: {
roleSource: 'builtin' | 'custom'
roleId?: BrowserOSAgentRoleId
roleName: string
shortDescription: string
}
}
export interface RoleTemplateSummary {
id: BrowserOSAgentRoleId
name: string
shortDescription: string
longDescription: string
recommendedApps: string[]
defaultAgentName: string
boundaries: Array<{
key: string
label: string
description: string
defaultMode: 'allow' | 'ask' | 'block'
}>
}
export interface OpenClawStatus {
@@ -36,6 +61,8 @@ export interface OpenClawStatus {
export interface OpenClawAgentMutationInput {
name: string
roleId?: BrowserOSAgentRoleId
customRole?: BrowserOSCustomRoleInput
providerType?: string
providerName?: string
baseUrl?: string
@@ -59,6 +86,7 @@ export function getModelDisplayName(model: unknown): string | undefined {
export const OPENCLAW_QUERY_KEYS = {
status: 'openclaw-status',
agents: 'openclaw-agents',
roles: 'openclaw-roles',
} as const
async function clawFetch<T>(
@@ -89,6 +117,16 @@ async function fetchOpenClawAgents(baseUrl: string): Promise<AgentEntry[]> {
return data.agents ?? []
}
async function fetchOpenClawRoles(
baseUrl: string,
): Promise<RoleTemplateSummary[]> {
const data = await clawFetch<{ roles: RoleTemplateSummary[] }>(
baseUrl,
'/roles',
)
return data.roles ?? []
}
async function invalidateOpenClawQueries(
queryClient: ReturnType<typeof useQueryClient>,
): Promise<void> {
@@ -141,6 +179,28 @@ export function useOpenClawAgents(enabled = true) {
}
}
export function useOpenClawRoles() {
const {
baseUrl,
isLoading: urlLoading,
error: urlError,
} = useAgentServerUrl()
const query = useQuery<RoleTemplateSummary[], Error>({
queryKey: [OPENCLAW_QUERY_KEYS.roles, baseUrl],
queryFn: () => fetchOpenClawRoles(baseUrl as string),
enabled: !!baseUrl && !urlLoading,
staleTime: 60_000,
})
return {
roles: query.data ?? [],
loading: query.isLoading || urlLoading,
error: query.error ?? urlError,
refetch: query.refetch,
}
}
export function useOpenClawMutations() {
const { baseUrl, isLoading: urlLoading } = useAgentServerUrl()
const queryClient = useQueryClient()

View File

@@ -7,9 +7,8 @@ import { PRODUCT_WEB_HOST } from './lib/constants/productWebHost'
// biome-ignore lint/style/noProcessEnv: build config file needs env access
const env = process.env
const apiUrl = new URL(
env.VITE_PUBLIC_BROWSEROS_API?.trim() || 'https://api.browseros.com',
)
// biome-ignore lint/style/noNonNullAssertion: required env var
const apiUrl = new URL(env.VITE_PUBLIC_BROWSEROS_API!)
const apiPattern = apiUrl.port
? `${apiUrl.hostname}:${apiUrl.port}`
: apiUrl.hostname

View File

@@ -1,100 +0,0 @@
{"site":"hn","url":"https://news.ycombinator.com/","category":"news","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"lobsters","url":"https://lobste.rs/","category":"news","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"tldr","url":"https://tldr.tech/","category":"news","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"theverge","url":"https://www.theverge.com/","category":"news","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"techcrunch","url":"https://techcrunch.com/","category":"news","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"arstechnica","url":"https://arstechnica.com/","category":"news","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"bbc_news","url":"https://www.bbc.com/news","category":"news","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"npr","url":"https://www.npr.org/","category":"news","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"guardian","url":"https://www.theguardian.com/international","category":"news","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"reuters","url":"https://www.reuters.com/","category":"news","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"mdn","url":"https://developer.mozilla.org/","category":"docs","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"react_dev","url":"https://react.dev/","category":"docs","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"nextjs_docs","url":"https://nextjs.org/docs","category":"docs","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"vue_docs","url":"https://vuejs.org/guide/introduction.html","category":"docs","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"svelte_docs","url":"https://svelte.dev/docs/introduction","category":"docs","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"tailwind_docs","url":"https://tailwindcss.com/docs","category":"docs","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"fastapi_docs","url":"https://fastapi.tiangolo.com/","category":"docs","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"django_docs","url":"https://docs.djangoproject.com/en/5.0/","category":"docs","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"rust_book","url":"https://doc.rust-lang.org/book/","category":"docs","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"go_pkg","url":"https://pkg.go.dev/","category":"docs","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"amazon_home","url":"https://www.amazon.com/","category":"ecommerce","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"amazon_book","url":"https://www.amazon.com/dp/0735619670","category":"ecommerce","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"ebay_home","url":"https://www.ebay.com/","category":"ecommerce","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"etsy_home","url":"https://www.etsy.com/","category":"ecommerce","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"walmart_home","url":"https://www.walmart.com/","category":"ecommerce","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"bestbuy_home","url":"https://www.bestbuy.com/","category":"ecommerce","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"target_home","url":"https://www.target.com/","category":"ecommerce","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"ikea_home","url":"https://www.ikea.com/us/en/","category":"ecommerce","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"rei_home","url":"https://www.rei.com/","category":"ecommerce","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"costco_home","url":"https://www.costco.com/","category":"ecommerce","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"youtube","url":"https://www.youtube.com/","category":"media","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"vimeo","url":"https://vimeo.com/","category":"media","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"twitch_directory","url":"https://www.twitch.tv/directory","category":"media","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"dailymotion","url":"https://www.dailymotion.com/","category":"media","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"soundcloud","url":"https://soundcloud.com/discover","category":"media","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"archive_org","url":"https://archive.org/","category":"media","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"ted_talks","url":"https://www.ted.com/talks","category":"media","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"khan_academy","url":"https://www.khanacademy.org/","category":"media","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"coursera_home","url":"https://www.coursera.org/","category":"media","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"edx_home","url":"https://www.edx.org/","category":"media","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"reddit_programming","url":"https://www.reddit.com/r/programming/","category":"social","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"reddit_python","url":"https://www.reddit.com/r/python/","category":"social","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"reddit_ml","url":"https://www.reddit.com/r/MachineLearning/","category":"social","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"hn_newest","url":"https://news.ycombinator.com/newest","category":"social","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"lobsters_recent","url":"https://lobste.rs/recent","category":"social","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"mastodon_explore","url":"https://mastodon.social/explore","category":"social","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"github_explore","url":"https://github.com/explore","category":"social","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"github_topics","url":"https://github.com/topics","category":"social","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"stackexchange_sites","url":"https://stackexchange.com/sites","category":"social","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"stackoverflow_questions","url":"https://stackoverflow.com/questions","category":"social","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"wikipedia_main","url":"https://en.wikipedia.org/wiki/Main_Page","category":"reference","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"wikipedia_roman","url":"https://en.wikipedia.org/wiki/Roman_Empire","category":"reference","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"wiktionary","url":"https://en.wiktionary.org/wiki/Wiktionary:Main_Page","category":"reference","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"wikidata","url":"https://www.wikidata.org/wiki/Wikidata:Main_Page","category":"reference","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"wikivoyage","url":"https://en.wikivoyage.org/wiki/Main_Page","category":"reference","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"simple_wiki","url":"https://simple.wikipedia.org/wiki/Main_Page","category":"reference","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"gutenberg","url":"https://www.gutenberg.org/","category":"reference","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"openlibrary","url":"https://openlibrary.org/","category":"reference","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"scholar_react","url":"https://scholar.google.com/scholar?q=react+hooks","category":"reference","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"mit_ocw","url":"https://ocw.mit.edu/","category":"reference","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"google_react","url":"https://www.google.com/search?q=react+hooks","category":"search","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"bing_react","url":"https://www.bing.com/search?q=react+hooks","category":"search","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"ddg_react","url":"https://duckduckgo.com/?q=react+hooks","category":"search","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"brave_react","url":"https://search.brave.com/search?q=react+hooks","category":"search","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"ecosia_react","url":"https://www.ecosia.org/search?q=react+hooks","category":"search","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"qwant_react","url":"https://www.qwant.com/?q=react+hooks","category":"search","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"you_react","url":"https://you.com/search?q=react+hooks","category":"search","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"perplexity_home","url":"https://www.perplexity.ai/","category":"search","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"phind_home","url":"https://www.phind.com/","category":"search","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"mojeek_react","url":"https://www.mojeek.com/search?q=react+hooks","category":"search","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"github_trending","url":"https://github.com/trending","category":"dashboard","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"github_pulls","url":"https://github.com/pulls?q=is%3Apr+is%3Aopen+author%3Atorvalds","category":"dashboard","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"github_issues","url":"https://github.com/issues?q=is%3Aopen+author%3Atorvalds","category":"dashboard","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"npm_react","url":"https://www.npmjs.com/package/react","category":"dashboard","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"npm_search_ai","url":"https://www.npmjs.com/search?q=ai+sdk","category":"dashboard","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"pypi_django","url":"https://pypi.org/project/django/","category":"dashboard","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"pypi_search","url":"https://pypi.org/search/?q=fastapi","category":"dashboard","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"dockerhub","url":"https://hub.docker.com/explore","category":"dashboard","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"crates_io","url":"https://crates.io/","category":"dashboard","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"packagist","url":"https://packagist.org/","category":"dashboard","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"medium_top","url":"https://medium.com/","category":"longform","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"substack_top","url":"https://substack.com/discover","category":"longform","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"dev_to","url":"https://dev.to/","category":"longform","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"hashnode_top","url":"https://hashnode.com/","category":"longform","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"css_tricks","url":"https://css-tricks.com/","category":"longform","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"smashing_mag","url":"https://www.smashingmagazine.com/","category":"longform","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"a_list_apart","url":"https://alistapart.com/","category":"longform","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"github_blog","url":"https://github.blog/","category":"longform","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"mozilla_blog","url":"https://blog.mozilla.org/","category":"longform","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"nytimes_open","url":"https://open.nytimes.com/","category":"longform","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"google_flights","url":"https://www.google.com/travel/flights","category":"app","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"kayak_home","url":"https://www.kayak.com/","category":"app","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"airbnb_lisbon","url":"https://www.airbnb.com/s/Lisbon--Portugal","category":"app","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"booking_lisbon","url":"https://www.booking.com/searchresults.html?ss=Lisbon","category":"app","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"expedia_home","url":"https://www.expedia.com/","category":"app","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"opentable_lisbon","url":"https://www.opentable.com/lisbon-portugal-restaurants","category":"app","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"openstreetmap","url":"https://www.openstreetmap.org/","category":"app","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"weather_com","url":"https://weather.com/","category":"app","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"accuweather","url":"https://www.accuweather.com/","category":"app","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}
{"site":"wolframalpha","url":"https://www.wolframalpha.com/","category":"app","states":[{"kind":"initial"},{"kind":"scroll","pixels":800},{"kind":"scroll","pixels":1600},{"kind":"scroll","pixels":2400},{"kind":"scroll","pixels":3200}]}

View File

@@ -1,10 +0,0 @@
{"site":"hn","url":"https://news.ycombinator.com/","states":[{"kind":"initial"},{"kind":"scroll","pixels":600},{"kind":"scroll","pixels":1200}]}
{"site":"wikipedia","url":"https://en.wikipedia.org/wiki/Main_Page","states":[{"kind":"initial"},{"kind":"scroll","pixels":600},{"kind":"scroll","pixels":1200}]}
{"site":"github_trending","url":"https://github.com/trending","states":[{"kind":"initial"},{"kind":"scroll","pixels":600},{"kind":"scroll","pixels":1200}]}
{"site":"reddit","url":"https://www.reddit.com/r/programming/","states":[{"kind":"initial"},{"kind":"scroll","pixels":600},{"kind":"scroll","pixels":1200}]}
{"site":"mdn","url":"https://developer.mozilla.org/","states":[{"kind":"initial"},{"kind":"scroll","pixels":600},{"kind":"scroll","pixels":1200}]}
{"site":"arxiv","url":"https://arxiv.org/list/cs.CL/recent","states":[{"kind":"initial"},{"kind":"scroll","pixels":600},{"kind":"scroll","pixels":1200}]}
{"site":"stackoverflow","url":"https://stackoverflow.com/questions","states":[{"kind":"initial"},{"kind":"scroll","pixels":600},{"kind":"scroll","pixels":1200}]}
{"site":"ycombinator","url":"https://www.ycombinator.com/companies","states":[{"kind":"initial"},{"kind":"scroll","pixels":600},{"kind":"scroll","pixels":1200}]}
{"site":"npm","url":"https://www.npmjs.com/package/react","states":[{"kind":"initial"},{"kind":"scroll","pixels":600},{"kind":"scroll","pixels":1200}]}
{"site":"youtube","url":"https://www.youtube.com/","states":[{"kind":"initial"},{"kind":"scroll","pixels":600},{"kind":"scroll","pixels":1200}]}

View File

@@ -5,7 +5,6 @@
"type": "module",
"scripts": {
"eval": "bun --env-file=.env.development run src/index.ts",
"collect": "bun --env-file=.env.development run src/collect.ts",
"typecheck": "tsc --noEmit"
},
"dependencies": {

View File

@@ -1,108 +0,0 @@
#!/usr/bin/env bun
import { dirname, resolve } from 'node:path'
import { fileURLToPath } from 'node:url'
import { parseArgs } from 'node:util'
import { runCollection } from './runner/collection-runner'
const HELP = `
VL training-data collector
Usage:
bun run collect --seeds <path.jsonl> [options]
Options:
--seeds <path> JSONL file with CollectionTarget entries (required)
--out <dir> Output directory (default: results/vl-data/<timestamp>)
--workers <n> Parallel workers (default: 1)
--limit <n> Stop after N targets (default: all)
--headless Run BrowserOS headless (default: false)
--profile-seed <dir> Seed each worker's user-data-dir from this snapshot.
Expected layout: <dir>/Default/<profile files>.
Use scripts/copy-browseros-profile.sh to build one.
When set, --use-mock-keychain is dropped so cookies
encrypted against the OS keychain still decrypt.
-h, --help Show this help
`
async function main() {
const { values } = parseArgs({
args: Bun.argv.slice(2),
options: {
seeds: { type: 'string' },
out: { type: 'string' },
workers: { type: 'string', default: '1' },
limit: { type: 'string' },
headless: { type: 'boolean', default: false },
'profile-seed': { type: 'string' },
help: { type: 'boolean', short: 'h', default: false },
},
})
if (values.help) {
console.log(HELP)
process.exit(0)
}
if (!values.seeds) {
console.error('error: --seeds is required')
console.log(HELP)
process.exit(1)
}
const workers = parsePositiveInt(values.workers, '--workers')
const limit = values.limit
? parsePositiveInt(values.limit, '--limit')
: undefined
const projectRoot = resolve(
dirname(fileURLToPath(import.meta.url)),
'../../..',
)
const outDir = values.out
? resolve(process.cwd(), values.out)
: resolve(projectRoot, `results/vl-data/${timestamp()}`)
const profileSeed = values['profile-seed']
? resolve(process.cwd(), values['profile-seed'])
: undefined
const { writtenCount, errors } = await runCollection({
seedsPath: resolve(process.cwd(), values.seeds),
outDir,
projectRoot,
workers,
limit,
headless: values.headless,
profileSeed,
})
console.log(`\nWrote ${writtenCount} record(s) to ${outDir}`)
if (errors.length > 0) {
console.error(`Validation failed with ${errors.length} error(s):`)
for (const err of errors.slice(0, 20)) console.error(` ${err}`)
if (errors.length > 20)
console.error(` ... and ${errors.length - 20} more`)
process.exit(1)
}
console.log('Validation passed.')
}
function timestamp(): string {
const d = new Date()
const pad = (n: number) => String(n).padStart(2, '0')
return `${d.getFullYear()}-${pad(d.getMonth() + 1)}-${pad(d.getDate())}-${pad(d.getHours())}${pad(d.getMinutes())}`
}
function parsePositiveInt(raw: string, flag: string): number {
const n = Number.parseInt(raw, 10)
if (!Number.isFinite(n) || n < 1) {
console.error(`error: ${flag} must be a positive integer (got "${raw}")`)
process.exit(1)
}
return n
}
main().catch((err) => {
console.error(err instanceof Error ? err.message : String(err))
process.exit(1)
})

View File

@@ -1,84 +0,0 @@
import { randomUUID } from 'node:crypto'
import { mkdir, rename, writeFile } from 'node:fs/promises'
import { join, relative } from 'node:path'
import { VL_VIEWPORT_HEIGHT, VL_VIEWPORT_WIDTH } from '../constants'
import type { CollectedRecord } from '../types/collection-target'
export interface PreparedRecord
extends Omit<CollectedRecord, 'id' | 'screenshot_path'> {}
export interface WriteResult {
id: string
screenshotPath: string
jsonPath: string
}
export class RecordWriter {
private readonly siteCounts = new Map<string, number>()
constructor(
private readonly outDir: string,
private readonly projectRoot: string,
) {}
async init(): Promise<void> {
await mkdir(join(this.outDir, 'screenshots'), { recursive: true })
await mkdir(join(this.outDir, 'raw'), { recursive: true })
}
async write(record: PreparedRecord, pngBase64: string): Promise<WriteResult> {
const shortUuid = randomUUID().replace(/-/g, '').slice(0, 8)
const id = `${record.site}_${shortUuid}`
const pngPath = join(this.outDir, 'screenshots', `${id}.png`)
const jsonPath = join(this.outDir, 'raw', `${id}.json`)
// temp + rename so a crash between png and json writes doesn't leave
// orphan files that future validators would flag.
await writeAtomic(pngPath, Buffer.from(pngBase64, 'base64'))
const finalRecord: CollectedRecord = {
...record,
id,
screenshot_path: relative(this.projectRoot, pngPath),
}
await writeAtomic(jsonPath, `${JSON.stringify(finalRecord, null, 2)}\n`)
this.siteCounts.set(
record.site,
(this.siteCounts.get(record.site) ?? 0) + 1,
)
return { id, screenshotPath: pngPath, jsonPath }
}
async writeManifest(collectedAt: Date, collectorTag: string): Promise<void> {
const sites = [...this.siteCounts.entries()].map(([site, states]) => ({
site,
states,
}))
const manifest = {
collected_at: collectedAt.toISOString(),
collector: collectorTag,
total_records: [...this.siteCounts.values()].reduce((a, b) => a + b, 0),
sites,
viewport: { width: VL_VIEWPORT_WIDTH, height: VL_VIEWPORT_HEIGHT },
}
await writeAtomic(
join(this.outDir, 'meta.json'),
`${JSON.stringify(manifest, null, 2)}\n`,
)
}
getSiteCounts(): Map<string, number> {
return new Map(this.siteCounts)
}
}
async function writeAtomic(
path: string,
data: string | Buffer | Uint8Array,
): Promise<void> {
const tmp = `${path}.tmp`
await writeFile(tmp, data)
await rename(tmp, path)
}

View File

@@ -1,39 +0,0 @@
export interface ParsedSnapshotLine {
backend_id: number
role: string
name: string
snapshot_line: string
}
const LINE_RE = /^\[(\d+)\]\s+(\S+)(?:\s+"((?:[^"\\]|\\.)*)")?/
export class SnapshotParseError extends Error {
constructor(
message: string,
public readonly lineIndex: number,
public readonly line: string,
) {
super(message)
this.name = 'SnapshotParseError'
}
}
export function parseSnapshot(snapshot: string): ParsedSnapshotLine[] {
const lines = snapshot.split('\n')
return lines.map((line, i) => {
const match = line.match(LINE_RE)
if (!match) {
throw new SnapshotParseError(
`Snapshot line ${i + 1} does not match [N] role format`,
i,
line,
)
}
return {
backend_id: Number.parseInt(match[1], 10),
role: match[2],
name: match[3] ?? '',
snapshot_line: line,
}
})
}

View File

@@ -1,94 +0,0 @@
import { readFile } from 'node:fs/promises'
import { z } from 'zod'
import {
type CollectionTarget,
CollectionTargetSchema,
} from '../types/collection-target'
export class TargetLoadError extends Error {
constructor(
message: string,
public readonly path: string,
public readonly cause?: Error,
) {
super(message)
this.name = 'TargetLoadError'
}
}
export async function loadCollectionTargets(
path: string,
): Promise<CollectionTarget[]> {
let content: string
try {
content = await readFile(path, 'utf-8')
} catch (error) {
throw new TargetLoadError(
`Failed to read seeds file: ${path}`,
path,
error instanceof Error ? error : undefined,
)
}
const lines = content
.trim()
.split('\n')
.filter((line) => line.trim().length > 0)
if (lines.length === 0) {
throw new TargetLoadError('Seeds file is empty', path)
}
const targets: CollectionTarget[] = []
const errors: Array<{ line: number; error: string }> = []
for (let i = 0; i < lines.length; i++) {
const lineNumber = i + 1
try {
const parsed = JSON.parse(lines[i])
targets.push(CollectionTargetSchema.parse(parsed))
} catch (error) {
if (error instanceof SyntaxError) {
errors.push({
line: lineNumber,
error: `Invalid JSON: ${error.message}`,
})
} else if (error instanceof z.ZodError) {
const issues = error.issues
.map((iss) => `${iss.path.join('.')}: ${iss.message}`)
.join(', ')
errors.push({ line: lineNumber, error: `Validation: ${issues}` })
} else {
errors.push({ line: lineNumber, error: `Unknown: ${String(error)}` })
}
}
}
if (errors.length > 0) {
const summary = errors
.slice(0, 5)
.map((e) => ` Line ${e.line}: ${e.error}`)
.join('\n')
const more =
errors.length > 5 ? `\n ... and ${errors.length - 5} more` : ''
throw new TargetLoadError(
`Failed to parse ${errors.length} target(s):\n${summary}${more}`,
path,
)
}
const seen = new Set<string>()
const duplicates: string[] = []
for (const t of targets) {
if (seen.has(t.site)) duplicates.push(t.site)
seen.add(t.site)
}
if (duplicates.length > 0) {
throw new TargetLoadError(
`Duplicate site slugs: ${duplicates.join(', ')}`,
path,
)
}
return targets
}

View File

@@ -1,125 +0,0 @@
import { access, readdir, readFile } from 'node:fs/promises'
import { basename, extname, isAbsolute, join, resolve } from 'node:path'
import {
type CollectedRecord,
CollectedRecordSchema,
} from '../types/collection-target'
export async function validateOutput(
outDir: string,
projectRoot: string = process.cwd(),
): Promise<string[]> {
const errors: string[] = []
const rawDir = join(outDir, 'raw')
let files: string[]
try {
files = await readdir(rawDir)
} catch {
return [`${rawDir}: directory not readable`]
}
const jsonFiles = files.filter((n) => n.endsWith('.json'))
if (jsonFiles.length === 0) return [`${rawDir}: no .json records`]
for (const f of jsonFiles) {
const fileErrors = await validateRecordFile(rawDir, f, projectRoot)
errors.push(...fileErrors)
}
return errors
}
async function validateRecordFile(
rawDir: string,
filename: string,
projectRoot: string,
): Promise<string[]> {
const errors: string[] = []
const fullPath = join(rawDir, filename)
let raw: string
try {
raw = await readFile(fullPath, 'utf-8')
} catch (e) {
return [`${filename}: cannot read: ${stringErr(e)}`]
}
let parsed: unknown
try {
parsed = JSON.parse(raw)
} catch (e) {
return [`${filename}: invalid JSON: ${stringErr(e)}`]
}
const result = CollectedRecordSchema.safeParse(parsed)
if (!result.success) {
const issues = result.error.issues
.map((iss) => `${iss.path.join('.')}: ${iss.message}`)
.join(', ')
return [`${filename}: schema: ${issues}`]
}
const record = result.data
if (record.id !== basename(filename, extname(filename))) {
errors.push(`${filename}: id "${record.id}" does not match filename stem`)
}
const lineCount = record.snapshot.split('\n').length
if (lineCount !== record.elements.length) {
errors.push(
`${filename}: snapshot has ${lineCount} lines but elements has ${record.elements.length}`,
)
}
errors.push(...validateElements(filename, record))
errors.push(
...(await validateScreenshotExists(filename, record, projectRoot)),
)
return errors
}
function validateElements(filename: string, record: CollectedRecord): string[] {
const errors: string[] = []
const seen = new Set<number>()
for (const el of record.elements) {
if (seen.has(el.backend_id)) {
errors.push(`${filename}: duplicate backend_id ${el.backend_id}`)
}
seen.add(el.backend_id)
if (el.bbox[0] > el.bbox[2] || el.bbox[1] > el.bbox[3]) {
errors.push(
`${filename}: backend_id ${el.backend_id} has bad bbox ${JSON.stringify(el.bbox)}`,
)
}
if (!el.snapshot_line.startsWith(`[${el.backend_id}]`)) {
errors.push(
`${filename}: backend_id ${el.backend_id} snapshot_line does not start with [${el.backend_id}]`,
)
}
if (!record.snapshot.includes(el.snapshot_line)) {
errors.push(
`${filename}: snapshot_line for [${el.backend_id}] not found in snapshot`,
)
}
}
return errors
}
async function validateScreenshotExists(
filename: string,
record: CollectedRecord,
projectRoot: string,
): Promise<string[]> {
const screenshotPath = isAbsolute(record.screenshot_path)
? record.screenshot_path
: resolve(projectRoot, record.screenshot_path)
try {
await access(screenshotPath)
return []
} catch {
return [`${filename}: screenshot_path missing on disk (${screenshotPath})`]
}
}
function stringErr(e: unknown): string {
return e instanceof Error ? e.message : String(e)
}

View File

@@ -1,150 +0,0 @@
import type { Browser } from '@browseros/server/browser'
import {
LAYOUT_SETTLE_MS,
VL_VIEWPORT_HEIGHT,
VL_VIEWPORT_WIDTH,
} from '../constants'
import type {
CollectionState,
CollectionTarget,
ElementRecord,
} from '../types/collection-target'
import { sleep } from '../utils/sleep'
import type { RecordWriter } from './record-writer'
import { parseSnapshot } from './snapshot-parser'
export interface VlCollectorDeps {
browser: Browser
pageId: number
writer: RecordWriter
log?: (msg: string) => void
}
export class VlCollector {
constructor(private readonly deps: VlCollectorDeps) {}
async collect(target: CollectionTarget): Promise<number> {
const { browser, pageId, writer, log } = this.deps
await browser.setViewport(pageId, VL_VIEWPORT_WIDTH, VL_VIEWPORT_HEIGHT, 1)
await browser.goto(pageId, target.url)
let written = 0
for (const state of target.states) {
try {
await this.applyState(state)
await sleep(LAYOUT_SETTLE_MS)
const { record, pngBase64 } = await this.captureOne(target)
const result = await writer.write(record, pngBase64)
written++
log?.(` captured ${result.id} (${state.kind})`)
} catch (error) {
log?.(` failed ${target.site} ${state.kind}: ${errorMessage(error)}`)
}
}
return written
}
private async applyState(state: CollectionState): Promise<void> {
const { browser, pageId } = this.deps
switch (state.kind) {
case 'initial':
return
case 'scroll':
await browser.evaluate(pageId, `window.scrollTo(0, ${state.pixels})`)
return
case 'click_and_wait': {
const { x1, y1, x2, y2 } = await browser.getElementBbox(
pageId,
state.backend_id,
)
const cx = Math.floor((x1 + x2) / 2)
const cy = Math.floor((y1 + y2) / 2)
await browser.clickAt(pageId, cx, cy)
await sleep(state.wait_ms)
return
}
case 'evaluate':
await browser.evaluate(pageId, state.expression)
await sleep(state.wait_ms)
return
}
}
private async captureOne(target: CollectionTarget): Promise<{
record: Omit<
import('../types/collection-target').CollectedRecord,
'id' | 'screenshot_path'
>
pngBase64: string
}> {
const { browser, pageId } = this.deps
// Snapshot, scroll_y, url, and screenshot must reflect the same page state
// — bbox resolution can take seconds on a busy page and would let scroll
// drift, so capture all frozen-state fields adjacent in time.
const rawSnapshot = await browser.snapshot(pageId)
const snapshot = rawSnapshot.replace(/\n$/, '')
const scrollY = toInt(
(await browser.evaluate(pageId, 'window.scrollY')).value,
)
const resolvedUrl = coerceString(
(await browser.evaluate(pageId, 'window.location.href')).value,
target.url,
)
const screenshot = await browser.screenshot(pageId, {
format: 'png',
fullPage: false,
})
const parsed = parseSnapshot(snapshot)
const elements: ElementRecord[] = []
for (const line of parsed) {
let bbox: [number, number, number, number]
try {
const box = await browser.getElementBbox(pageId, line.backend_id)
bbox = [box.x1, box.y1, box.x2, box.y2]
} catch {
bbox = [0, 0, 0, 0]
}
elements.push({
backend_id: line.backend_id,
role: line.role,
name: line.name,
bbox,
snapshot_line: line.snapshot_line,
in_viewport: overlapsViewport(bbox),
})
}
return {
record: {
url: resolvedUrl,
site: target.site,
viewport: { width: VL_VIEWPORT_WIDTH, height: VL_VIEWPORT_HEIGHT },
scroll_y: scrollY,
snapshot,
elements,
},
pngBase64: screenshot.data,
}
}
}
function overlapsViewport(bbox: [number, number, number, number]): boolean {
const [x1, y1, x2, y2] = bbox
if (x1 === 0 && y1 === 0 && x2 === 0 && y2 === 0) return false
return x1 < VL_VIEWPORT_WIDTH && x2 > 0 && y1 < VL_VIEWPORT_HEIGHT && y2 > 0
}
function toInt(v: unknown): number {
const n = typeof v === 'number' ? v : Number(v)
return Number.isFinite(n) ? Math.max(0, Math.floor(n)) : 0
}
function coerceString(v: unknown, fallback: string): string {
return typeof v === 'string' && v.length > 0 ? v : fallback
}
function errorMessage(e: unknown): string {
return e instanceof Error ? e.message : String(e)
}

View File

@@ -6,7 +6,3 @@ export const DEFAULT_TIMEOUT_MS = 30 * 60 * 1000 // 30 minutes
export const SCREENSHOT_TIMEOUT_MS = 65_000 // 65s — ensures we get extension's error (60s)
export const MAX_ACTIONS_PER_DELEGATION = 15
export const CLADO_REQUEST_TIMEOUT_MS = 120_000
export const VL_VIEWPORT_WIDTH = 1280
export const VL_VIEWPORT_HEIGHT = 800
export const LAYOUT_SETTLE_MS = 300

View File

@@ -14,7 +14,6 @@
*/
import {
copyFileSync,
existsSync,
mkdtempSync,
readFileSync,
@@ -53,19 +52,16 @@ export class BrowserOSAppManager {
private readonly workerIndex: number
private readonly loadExtensions: boolean
private readonly headless: boolean
private readonly profileSeed: string | null
constructor(
workerIndex: number = 0,
basePorts?: EvalPorts,
loadExtensions: boolean = false,
headless: boolean = false,
profileSeed: string | null = null,
) {
this.workerIndex = workerIndex
this.loadExtensions = loadExtensions
this.headless = headless
this.profileSeed = profileSeed
const base = basePorts ?? { cdp: 9010, server: 9110, extension: 9310 }
this.ports = {
cdp: base.cdp + workerIndex,
@@ -127,25 +123,16 @@ export class BrowserOSAppManager {
// Unique temp dir per worker per restart
this.tempDir = mkdtempSync('/tmp/browseros-eval-')
if (this.profileSeed) {
this.cloneProfileSeed(this.tempDir)
}
console.log(
` [W${this.workerIndex}] Ports: CDP=${cdp} Server=${server} Extension=${extension}${this.headless ? ' (headless)' : ''}`,
)
console.log(
` [W${this.workerIndex}] Profile: ${this.tempDir}${this.profileSeed ? ' (seeded)' : ''}`,
)
console.log(` [W${this.workerIndex}] Profile: ${this.tempDir}`)
// --- Chrome Launch (matches start.ts startManualBrowser) ---
// Drop --use-mock-keychain when a real profile seed is in use — otherwise
// cookies encrypted against the OS keychain (where the login sessions
// live) decrypt to garbage and the seed's logged-in state is lost.
const chromeArgs = [
'--no-first-run',
'--no-default-browser-check',
...(this.profileSeed ? [] : ['--use-mock-keychain']),
'--use-mock-keychain',
'--disable-browseros-server',
'--disable-browseros-extensions',
...(this.headless ? ['--headless=new'] : []),
@@ -209,43 +196,6 @@ export class BrowserOSAppManager {
console.log(` [W${this.workerIndex}] Server healthy`)
}
private cloneProfileSeed(tempDir: string): void {
const seed = this.profileSeed
if (!seed) return
const seedDefault = join(seed, 'Default')
if (!existsSync(seedDefault)) {
throw new Error(`Profile seed missing Default/ subfolder: ${seedDefault}`)
}
// APFS clone is O(1) on macOS; slower byte-copy on other filesystems.
const result = spawnSync({
cmd: ['cp', '-c', '-R', seedDefault, join(tempDir, 'Default')],
stdout: 'ignore',
stderr: 'pipe',
})
if (result.exitCode !== 0) {
const stderr = result.stderr?.toString?.() ?? ''
throw new Error(`Failed to clone profile seed: ${stderr.trim()}`)
}
const seedLocalState = join(seed, 'Local State')
const destLocalState = join(tempDir, 'Local State')
if (existsSync(seedLocalState)) {
copyFileSync(seedLocalState, destLocalState)
} else {
writeFileSync(destLocalState, '{}')
}
// Stale singleton files from the source instance would block Chrome.
for (const f of ['SingletonLock', 'SingletonSocket', 'SingletonCookie']) {
try {
rmSync(join(tempDir, 'Default', f), { force: true })
} catch {
// ignore
}
}
}
private async waitForCdp(): Promise<boolean> {
const startTime = Date.now()
while (Date.now() - startTime < CDP_WAIT_TIMEOUT_MS) {

View File

@@ -1,171 +0,0 @@
import { execSync } from 'node:child_process'
import { existsSync } from 'node:fs'
import { join } from 'node:path'
import { Browser } from '@browseros/server/browser'
import { CdpBackend } from '@browseros/server/browser/backends/cdp'
import { RecordWriter } from '../collectors/record-writer'
import { loadCollectionTargets } from '../collectors/target-loader'
import { validateOutput } from '../collectors/validator'
import { VlCollector } from '../collectors/vl-collector'
import type { CollectionTarget } from '../types/collection-target'
import type { EvalPorts } from '../utils/dev-config'
import { BrowserOSAppManager } from './browseros-app-manager'
const DEFAULT_BASE_PORTS: EvalPorts = {
cdp: 9010,
server: 9110,
extension: 9310,
}
export interface CollectionRunnerOptions {
seedsPath: string
outDir: string
projectRoot: string
workers: number
limit?: number
headless: boolean
profileSeed?: string
basePorts?: EvalPorts
}
class TargetQueue {
private index = 0
private stopped = false
constructor(private readonly targets: CollectionTarget[]) {}
next(): CollectionTarget | null {
if (this.stopped || this.index >= this.targets.length) return null
return this.targets[this.index++]
}
stop(): void {
this.stopped = true
}
}
export async function runCollection(
opts: CollectionRunnerOptions,
): Promise<{ writtenCount: number; errors: string[] }> {
if (opts.profileSeed && !existsSync(join(opts.profileSeed, 'Default'))) {
throw new Error(
`profile seed missing Default/ subfolder: ${join(opts.profileSeed, 'Default')}`,
)
}
const loaded = await loadCollectionTargets(opts.seedsPath)
const targets = opts.limit ? loaded.slice(0, opts.limit) : loaded
const startedAt = new Date()
const writer = new RecordWriter(opts.outDir, opts.projectRoot)
await writer.init()
const queue = new TargetQueue(targets)
const appManagers: BrowserOSAppManager[] = []
const workerCount = Math.max(1, Math.min(opts.workers, targets.length))
const cleanupSignal = setupSignalHandlers(queue, appManagers)
let writtenCount = 0
try {
const workers = Array.from({ length: workerCount }, (_, i) =>
runWorker(i, queue, writer, opts, appManagers),
)
const counts = await Promise.all(workers)
writtenCount = counts.reduce((a, b) => a + b, 0)
} finally {
await Promise.allSettled(appManagers.map((m) => m.killApp()))
cleanupSignal()
}
await writer.writeManifest(startedAt, resolveCollectorTag())
const errors = await validateOutput(opts.outDir, opts.projectRoot)
return { writtenCount, errors }
}
async function runWorker(
workerIndex: number,
queue: TargetQueue,
writer: RecordWriter,
opts: CollectionRunnerOptions,
appManagers: BrowserOSAppManager[],
): Promise<number> {
const basePorts = opts.basePorts ?? DEFAULT_BASE_PORTS
const appManager = new BrowserOSAppManager(
workerIndex,
basePorts,
false,
opts.headless,
opts.profileSeed ?? null,
)
await appManager.restart()
appManagers.push(appManager)
const { cdp: cdpPort } = appManager.getPorts()
const cdp = new CdpBackend({ port: cdpPort })
await cdp.connect()
const browser = new Browser(cdp)
const pages = await browser.listPages()
const pageId = pages[0]?.pageId
if (pageId === undefined) {
throw new Error(`Worker ${workerIndex}: no initial page available`)
}
const collector = new VlCollector({
browser,
pageId,
writer,
log: (msg) => console.log(`worker ${workerIndex}:${msg}`),
})
let written = 0
try {
while (true) {
const target = queue.next()
if (!target) break
console.log(
`worker ${workerIndex}: collecting ${target.site} (${target.url})`,
)
written += await collector.collect(target)
}
} finally {
await cdp.disconnect().catch(() => {})
}
return written
}
function setupSignalHandlers(
queue: TargetQueue,
appManagers: BrowserOSAppManager[],
): () => void {
let tripped = false
const onSignal = () => {
if (tripped) return
tripped = true
console.log('\nShutting down: draining in-flight targets...')
queue.stop()
// Kick the app managers so workers unblock quickly. The outer `finally`
// in runCollection will also await killApp — allSettled tolerates the
// double-kill cleanly.
for (const m of appManagers) {
m.killApp().catch(() => {})
}
}
process.on('SIGINT', onSignal)
process.on('SIGTERM', onSignal)
return () => {
process.off('SIGINT', onSignal)
process.off('SIGTERM', onSignal)
}
}
function resolveCollectorTag(): string {
try {
const sha = execSync('git rev-parse --short HEAD', {
stdio: ['ignore', 'pipe', 'ignore'],
})
.toString()
.trim()
return sha ? `browseros-agent@${sha}` : 'browseros-agent@unknown'
} catch {
return 'browseros-agent@unknown'
}
}

View File

@@ -1,59 +0,0 @@
import { z } from 'zod'
export const CollectionStateSchema = z.discriminatedUnion('kind', [
z.object({ kind: z.literal('initial') }),
z.object({
kind: z.literal('scroll'),
pixels: z.number().int().nonnegative(),
}),
z.object({
kind: z.literal('click_and_wait'),
backend_id: z.number().int().positive(),
wait_ms: z.number().int().positive().default(1000),
}),
z.object({
kind: z.literal('evaluate'),
expression: z.string().min(1),
wait_ms: z.number().int().nonnegative().default(300),
}),
])
export const CollectionTargetSchema = z.object({
site: z.string().regex(/^[a-z0-9_]+$/, 'site must match [a-z0-9_]+'),
url: z.string().url(),
states: z.array(CollectionStateSchema).min(1).max(10),
category: z.string().optional(),
})
export const ElementRecordSchema = z.object({
backend_id: z.number().int(),
role: z.string(),
name: z.string(),
bbox: z.tuple([
z.number().int(),
z.number().int(),
z.number().int(),
z.number().int(),
]),
snapshot_line: z.string(),
in_viewport: z.boolean(),
})
export const CollectedRecordSchema = z.object({
id: z.string().regex(/^[a-z0-9_]+_[0-9a-f]{8}$/),
url: z.string().url(),
site: z.string(),
viewport: z.object({
width: z.literal(1280),
height: z.literal(800),
}),
scroll_y: z.number().int().nonnegative(),
screenshot_path: z.string(),
snapshot: z.string(),
elements: z.array(ElementRecordSchema),
})
export type CollectionState = z.infer<typeof CollectionStateSchema>
export type CollectionTarget = z.infer<typeof CollectionTargetSchema>
export type ElementRecord = z.infer<typeof ElementRecordSchema>
export type CollectedRecord = z.infer<typeof CollectedRecordSchema>

View File

@@ -1,16 +1,4 @@
// Config types
// Collection target + record types
export {
type CollectedRecord,
CollectedRecordSchema,
type CollectionState,
CollectionStateSchema,
type CollectionTarget,
CollectionTargetSchema,
type ElementRecord,
ElementRecordSchema,
} from './collection-target'
export {
type AgentConfig,
AgentConfigSchema,
@@ -56,6 +44,7 @@ export {
type UserMessage,
UserMessageSchema,
} from './message'
// Result types
export {
type AgentResult,

View File

@@ -1,108 +0,0 @@
import { afterEach, beforeEach, describe, expect, it } from 'bun:test'
import { mkdtemp, readdir, readFile, rm } from 'node:fs/promises'
import { tmpdir } from 'node:os'
import { join } from 'node:path'
import { RecordWriter } from '../../src/collectors/record-writer'
import type { CollectedRecord } from '../../src/types/collection-target'
const TINY_PNG_BASE64 =
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=='
function makeRecord(
site: string,
): Omit<CollectedRecord, 'id' | 'screenshot_path'> {
return {
url: 'https://example.com/',
site,
viewport: { width: 1280, height: 800 },
scroll_y: 0,
snapshot: '[1] button "ok"',
elements: [
{
backend_id: 1,
role: 'button',
name: 'ok',
bbox: [10, 20, 30, 40],
snapshot_line: '[1] button "ok"',
in_viewport: true,
},
],
}
}
describe('RecordWriter', () => {
let outDir: string
beforeEach(async () => {
outDir = await mkdtemp(join(tmpdir(), 'vl-writer-'))
})
afterEach(async () => {
await rm(outDir, { recursive: true, force: true })
})
it('writes screenshot + json with an assigned id', async () => {
const writer = new RecordWriter(outDir, outDir)
await writer.init()
const result = await writer.write(makeRecord('hn'), TINY_PNG_BASE64)
expect(result.id).toMatch(/^hn_[0-9a-f]{8}$/)
const pngFiles = await readdir(join(outDir, 'screenshots'))
expect(pngFiles).toEqual([`${result.id}.png`])
const jsonText = await readFile(
join(outDir, 'raw', `${result.id}.json`),
'utf-8',
)
const record = JSON.parse(jsonText) as CollectedRecord
expect(record.id).toBe(result.id)
expect(record.screenshot_path).toContain(`screenshots/${result.id}.png`)
expect(record.snapshot).toBe('[1] button "ok"')
})
it('writes atomically — no .tmp files remain after a successful write', async () => {
const writer = new RecordWriter(outDir, outDir)
await writer.init()
await writer.write(makeRecord('hn'), TINY_PNG_BASE64)
const pngFiles = await readdir(join(outDir, 'screenshots'))
const jsonFiles = await readdir(join(outDir, 'raw'))
for (const f of [...pngFiles, ...jsonFiles]) {
expect(f.endsWith('.tmp')).toBe(false)
}
})
it('writes manifest with site counts and collector tag', async () => {
const writer = new RecordWriter(outDir, outDir)
await writer.init()
await writer.write(makeRecord('hn'), TINY_PNG_BASE64)
await writer.write(makeRecord('hn'), TINY_PNG_BASE64)
await writer.write(makeRecord('wiki'), TINY_PNG_BASE64)
const collectedAt = new Date('2026-04-18T12:00:00Z')
await writer.writeManifest(collectedAt, 'browseros-agent@abc123')
const manifest = JSON.parse(
await readFile(join(outDir, 'meta.json'), 'utf-8'),
)
expect(manifest.collector).toBe('browseros-agent@abc123')
expect(manifest.collected_at).toBe('2026-04-18T12:00:00.000Z')
expect(manifest.total_records).toBe(3)
expect(manifest.viewport).toEqual({ width: 1280, height: 800 })
const siteCounts = Object.fromEntries(
(manifest.sites as Array<{ site: string; states: number }>).map((s) => [
s.site,
s.states,
]),
)
expect(siteCounts).toEqual({ hn: 2, wiki: 1 })
})
it('generates a fresh id per write even for the same site', async () => {
const writer = new RecordWriter(outDir, outDir)
await writer.init()
const first = await writer.write(makeRecord('hn'), TINY_PNG_BASE64)
const second = await writer.write(makeRecord('hn'), TINY_PNG_BASE64)
expect(second.id).not.toBe(first.id)
const files = await readdir(join(outDir, 'raw'))
expect(files.length).toBe(2)
})
})

View File

@@ -1,93 +0,0 @@
import { describe, expect, it } from 'bun:test'
import {
parseSnapshot,
SnapshotParseError,
} from '../../src/collectors/snapshot-parser'
describe('parseSnapshot', () => {
it('parses a simple button line with name', () => {
const result = parseSnapshot('[337] button "Search"')
expect(result).toEqual([
{
backend_id: 337,
role: 'button',
name: 'Search',
snapshot_line: '[337] button "Search"',
},
])
})
it('parses an empty-name element (searchbox with no accessible name)', () => {
const result = parseSnapshot('[22] searchbox ""')
expect(result[0]).toEqual({
backend_id: 22,
role: 'searchbox',
name: '',
snapshot_line: '[22] searchbox ""',
})
})
it('parses an element with no name at all', () => {
const result = parseSnapshot('[5] checkbox')
expect(result[0]).toEqual({
backend_id: 5,
role: 'checkbox',
name: '',
snapshot_line: '[5] checkbox',
})
})
it('parses a searchbox with a value attribute', () => {
const line = '[22] searchbox "" value="query"'
const result = parseSnapshot(line)
expect(result[0].backend_id).toBe(22)
expect(result[0].role).toBe('searchbox')
expect(result[0].name).toBe('')
expect(result[0].snapshot_line).toBe(line)
})
it('parses a disabled link', () => {
const line = '[18] link "past" (disabled)'
const result = parseSnapshot(line)
expect(result[0]).toEqual({
backend_id: 18,
role: 'link',
name: 'past',
snapshot_line: line,
})
})
it('parses cursor-interactive "clickable" role', () => {
const result = parseSnapshot('[99] clickable "Open menu"')
expect(result[0].role).toBe('clickable')
expect(result[0].name).toBe('Open menu')
})
it('parses a multi-line snapshot and preserves order', () => {
const snapshot = [
'[12] link "Hacker News"',
'[14] link "new"',
'[22] searchbox ""',
'[25] button "Search"',
].join('\n')
const result = parseSnapshot(snapshot)
expect(result.length).toBe(4)
expect(result.map((r) => r.backend_id)).toEqual([12, 14, 22, 25])
})
it('preserves snapshot_line byte-for-byte', () => {
const line = '[100] button "foo\\"bar" (expanded, required)'
const result = parseSnapshot(line)
expect(result[0].snapshot_line).toBe(line)
})
it('throws SnapshotParseError for a line without [N] prefix', () => {
expect(() => parseSnapshot('not a snapshot line')).toThrow(
SnapshotParseError,
)
})
it('throws SnapshotParseError for a line with wrong [N] format', () => {
expect(() => parseSnapshot('[abc] button')).toThrow(SnapshotParseError)
})
})

View File

@@ -1,122 +0,0 @@
import { afterEach, beforeEach, describe, expect, it } from 'bun:test'
import { mkdtemp, rm, writeFile } from 'node:fs/promises'
import { tmpdir } from 'node:os'
import { join } from 'node:path'
import {
loadCollectionTargets,
TargetLoadError,
} from '../../src/collectors/target-loader'
describe('loadCollectionTargets', () => {
let dir: string
beforeEach(async () => {
dir = await mkdtemp(join(tmpdir(), 'vl-seeds-'))
})
afterEach(async () => {
await rm(dir, { recursive: true, force: true })
})
async function writeSeeds(lines: string[]): Promise<string> {
const path = join(dir, 'seeds.jsonl')
await writeFile(path, lines.join('\n'))
return path
}
it('loads a single valid target', async () => {
const path = await writeSeeds([
JSON.stringify({
site: 'hn',
url: 'https://news.ycombinator.com/',
states: [{ kind: 'initial' }],
}),
])
const targets = await loadCollectionTargets(path)
expect(targets.length).toBe(1)
expect(targets[0].site).toBe('hn')
expect(targets[0].states[0]).toEqual({ kind: 'initial' })
})
it('applies default wait_ms for click_and_wait state', async () => {
const path = await writeSeeds([
JSON.stringify({
site: 'a',
url: 'https://example.com/',
states: [{ kind: 'click_and_wait', backend_id: 42 }],
}),
])
const [target] = await loadCollectionTargets(path)
expect(target.states[0]).toEqual({
kind: 'click_and_wait',
backend_id: 42,
wait_ms: 1000,
})
})
it('loads multiple targets and preserves order', async () => {
const path = await writeSeeds([
JSON.stringify({
site: 'a',
url: 'https://a.example/',
states: [{ kind: 'initial' }],
}),
JSON.stringify({
site: 'b',
url: 'https://b.example/',
states: [{ kind: 'scroll', pixels: 500 }],
}),
])
const targets = await loadCollectionTargets(path)
expect(targets.map((t) => t.site)).toEqual(['a', 'b'])
})
it('throws on duplicate site slugs', async () => {
const path = await writeSeeds([
JSON.stringify({
site: 'dup',
url: 'https://a.example/',
states: [{ kind: 'initial' }],
}),
JSON.stringify({
site: 'dup',
url: 'https://b.example/',
states: [{ kind: 'initial' }],
}),
])
await expect(loadCollectionTargets(path)).rejects.toThrow(TargetLoadError)
})
it('throws with line number on invalid JSON', async () => {
const path = await writeSeeds([
JSON.stringify({
site: 'a',
url: 'https://a.example/',
states: [{ kind: 'initial' }],
}),
'not json',
])
await expect(loadCollectionTargets(path)).rejects.toThrow(/Line 2/)
})
it('throws on invalid site slug (uppercase)', async () => {
const path = await writeSeeds([
JSON.stringify({
site: 'BadSlug',
url: 'https://a.example/',
states: [{ kind: 'initial' }],
}),
])
await expect(loadCollectionTargets(path)).rejects.toThrow(/site/)
})
it('throws on empty file', async () => {
const path = await writeSeeds([])
await expect(loadCollectionTargets(path)).rejects.toThrow(/empty/)
})
it('throws on missing file', async () => {
await expect(
loadCollectionTargets(join(dir, 'nope.jsonl')),
).rejects.toThrow(TargetLoadError)
})
})

View File

@@ -1,121 +0,0 @@
import { afterEach, beforeEach, describe, expect, it } from 'bun:test'
import { mkdir, mkdtemp, rm, writeFile } from 'node:fs/promises'
import { tmpdir } from 'node:os'
import { join } from 'node:path'
import { validateOutput } from '../../src/collectors/validator'
import type { CollectedRecord } from '../../src/types/collection-target'
const TINY_PNG_BASE64 =
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=='
async function setupValidRecord(outDir: string, id: string = 'hn_abcdef12') {
await mkdir(join(outDir, 'screenshots'), { recursive: true })
await mkdir(join(outDir, 'raw'), { recursive: true })
const pngPath = join(outDir, 'screenshots', `${id}.png`)
await writeFile(pngPath, Buffer.from(TINY_PNG_BASE64, 'base64'))
const record: CollectedRecord = {
id,
url: 'https://example.com/',
site: 'hn',
viewport: { width: 1280, height: 800 },
scroll_y: 0,
screenshot_path: `screenshots/${id}.png`,
snapshot: '[1] button "a"\n[2] link "b"',
elements: [
{
backend_id: 1,
role: 'button',
name: 'a',
bbox: [0, 0, 10, 10],
snapshot_line: '[1] button "a"',
in_viewport: true,
},
{
backend_id: 2,
role: 'link',
name: 'b',
bbox: [20, 20, 30, 30],
snapshot_line: '[2] link "b"',
in_viewport: true,
},
],
}
await writeFile(
join(outDir, 'raw', `${id}.json`),
JSON.stringify(record, null, 2),
)
return { record, id }
}
describe('validateOutput', () => {
let outDir: string
beforeEach(async () => {
outDir = await mkdtemp(join(tmpdir(), 'vl-validate-'))
})
afterEach(async () => {
await rm(outDir, { recursive: true, force: true })
})
it('returns empty errors for a valid record', async () => {
await setupValidRecord(outDir)
const errors = await validateOutput(outDir, outDir)
expect(errors).toEqual([])
})
it('flags mismatched snapshot vs elements count', async () => {
const { id } = await setupValidRecord(outDir)
const recordPath = join(outDir, 'raw', `${id}.json`)
const r = JSON.parse(await Bun.file(recordPath).text()) as CollectedRecord
r.snapshot = '[1] button "a"'
await writeFile(recordPath, JSON.stringify(r, null, 2))
const errors = await validateOutput(outDir, outDir)
expect(errors.join(' ')).toMatch(/snapshot has 1 lines but elements has 2/)
})
it('flags duplicate backend_id', async () => {
const { id } = await setupValidRecord(outDir)
const recordPath = join(outDir, 'raw', `${id}.json`)
const r = JSON.parse(await Bun.file(recordPath).text()) as CollectedRecord
r.elements[1].backend_id = 1
r.elements[1].snapshot_line = '[1] link "b"'
r.snapshot = '[1] button "a"\n[1] link "b"'
await writeFile(recordPath, JSON.stringify(r, null, 2))
const errors = await validateOutput(outDir, outDir)
expect(errors.join(' ')).toMatch(/duplicate backend_id 1/)
})
it('flags bad bbox (x1 > x2)', async () => {
const { id } = await setupValidRecord(outDir)
const recordPath = join(outDir, 'raw', `${id}.json`)
const r = JSON.parse(await Bun.file(recordPath).text()) as CollectedRecord
r.elements[0].bbox = [100, 0, 50, 10]
await writeFile(recordPath, JSON.stringify(r, null, 2))
const errors = await validateOutput(outDir, outDir)
expect(errors.join(' ')).toMatch(/bad bbox/)
})
it('flags missing screenshot file', async () => {
const { id } = await setupValidRecord(outDir)
await rm(join(outDir, 'screenshots', `${id}.png`))
const errors = await validateOutput(outDir, outDir)
expect(errors.join(' ')).toMatch(/screenshot_path missing/)
})
it('flags id that does not match filename stem', async () => {
const { id } = await setupValidRecord(outDir, 'hn_deadbeef')
const recordPath = join(outDir, 'raw', `${id}.json`)
const r = JSON.parse(await Bun.file(recordPath).text()) as CollectedRecord
r.id = 'hn_11111111'
await writeFile(recordPath, JSON.stringify(r, null, 2))
const errors = await validateOutput(outDir, outDir)
expect(errors.join(' ')).toMatch(/does not match filename stem/)
})
it('returns an error when raw dir has no json records', async () => {
await mkdir(join(outDir, 'raw'), { recursive: true })
await mkdir(join(outDir, 'screenshots'), { recursive: true })
const errors = await validateOutput(outDir, outDir)
expect(errors.join(' ')).toMatch(/no \.json records/)
})
})

View File

@@ -1,6 +1,6 @@
{
"name": "@browseros/server",
"version": "0.0.88",
"version": "0.0.83",
"description": "BrowserOS server",
"type": "module",
"main": "./src/index.ts",
@@ -10,21 +10,9 @@
"scripts": {
"start": "bun --watch --env-file=.env.development src/index.ts",
"build": "bun ../../scripts/build/server.ts --target=all",
"test": "bun run test:all",
"test:all": "bun run ./tests/__helpers__/run-test-group.ts all",
"test:agent": "bun run ./tests/__helpers__/run-test-group.ts agent",
"test:api": "bun run ./tests/__helpers__/run-test-group.ts api",
"test:browser": "bun run ./tests/__helpers__/run-test-group.ts browser",
"test:cdp": "bun run test:browser",
"test:core": "bun run ./tests/__helpers__/run-test-group.ts core",
"test:integration": "bun run ./tests/__helpers__/run-test-group.ts integration",
"test:root": "bun run ./tests/__helpers__/run-test-group.ts root",
"test:sdk": "bun run ./tests/__helpers__/run-test-group.ts sdk",
"test:skills": "bun run ./tests/__helpers__/run-test-group.ts skills",
"test:tools": "bun run ./tests/__helpers__/run-test-group.ts tools",
"test:tools:acl": "bun run test:cleanup && bun --env-file=.env.development test ./tests/tools/acl-scorer.test.ts",
"test:tools:filesystem": "bun run test:cleanup && bun --env-file=.env.development test ./tests/tools/filesystem",
"test:tools:input": "bun run test:cleanup && bun --env-file=.env.development test ./tests/tools/input.test.ts",
"test:tools": "bun run test:cleanup && bun --env-file=.env.development test tests/tools",
"test:integration": "bun run test:cleanup && bun --env-file=.env.development test tests/server.integration.test.ts",
"test:sdk": "bun run test:cleanup && bun --env-file=.env.development test tests/sdk",
"test:cleanup": "./tests/__helpers__/cleanup.sh",
"typecheck": "tsc --noEmit",
"devtools": "bunx @ai-sdk/devtools"

View File

@@ -1,22 +1,23 @@
services:
openclaw-gateway:
# Pin away from latest because newer OpenClaw releases regress OpenRouter chat streams.
image: ${OPENCLAW_IMAGE:-ghcr.io/openclaw/openclaw:2026.4.12}
image: ${OPENCLAW_IMAGE:-ghcr.io/openclaw/openclaw:latest}
ports:
- "127.0.0.1:${OPENCLAW_GATEWAY_PORT:-18789}:18789"
env_file:
- ./.openclaw/.env
environment:
- HOME=/home/node
- OPENCLAW_HOME=/home/node
- OPENCLAW_STATE_DIR=/home/node/.openclaw
- OPENCLAW_GATEWAY_TOKEN=${OPENCLAW_GATEWAY_TOKEN:-}
- OPENCLAW_NO_RESPAWN=1
- NODE_COMPILE_CACHE=/var/tmp/openclaw-compile-cache
- NODE_ENV=production
- OPENCLAW_GATEWAY_TOKEN=${OPENCLAW_GATEWAY_TOKEN}
- OPENCLAW_GATEWAY_BIND=lan
- TZ=${TZ}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- GEMINI_API_KEY=${GEMINI_API_KEY:-}
- OPENROUTER_API_KEY=${OPENROUTER_API_KEY:-}
- GROQ_API_KEY=${GROQ_API_KEY:-}
- MISTRAL_API_KEY=${MISTRAL_API_KEY:-}
- MOONSHOT_API_KEY=${MOONSHOT_API_KEY:-}
volumes:
- ${OPENCLAW_HOST_HOME}:/home/node
- ${OPENCLAW_CONFIG_DIR}:/home/node/.openclaw
extra_hosts:
- "host.containers.internal:host-gateway"
command:

View File

@@ -9,10 +9,6 @@ import { LLM_PROVIDERS } from '@browseros/shared/schemas/llm'
import { createOpenRouter } from '@openrouter/ai-sdk-provider'
import type { LanguageModel } from 'ai'
import { createBrowserOSFetch } from '../lib/browseros-fetch'
import {
createMockBrowserOSLanguageModel,
shouldUseMockBrowserOSLLM,
} from '../lib/clients/llm/mock-language-model'
import { createCodexFetch } from '../lib/clients/oauth/codex-fetch'
import { createCopilotFetch } from '../lib/clients/oauth/copilot-fetch'
import { logger } from '../lib/logger'
@@ -223,9 +219,6 @@ const PROVIDER_FACTORIES: Record<string, ProviderFactory> = {
export function createLanguageModel(
config: ResolvedAgentConfig,
): LanguageModel {
if (shouldUseMockBrowserOSLLM(config)) {
return createMockBrowserOSLanguageModel()
}
const provider = config.provider as string
const factory = PROVIDER_FACTORIES[provider]
if (!factory) throw new Error(`Unknown provider: ${provider}`)

View File

@@ -8,6 +8,11 @@
*/
import { OPENCLAW_GATEWAY_PORT } from '@browseros/shared/constants/openclaw'
import { BROWSEROS_ROLE_TEMPLATES } from '@browseros/shared/constants/role-aware-agents'
import type {
BrowserOSAgentRoleId,
BrowserOSCustomRoleInput,
} from '@browseros/shared/types/role-aware-agents'
import { Hono } from 'hono'
import { stream } from 'hono/streaming'
import { logger } from '../../lib/logger'
@@ -17,14 +22,23 @@ import {
OpenClawInvalidAgentNameError,
OpenClawProtectedAgentError,
} from '../services/openclaw/errors'
import { isUnsupportedOpenClawProviderError } from '../services/openclaw/openclaw-provider-map'
import { getOpenClawService } from '../services/openclaw/openclaw-service'
function getCreateAgentValidationError(body: { name?: string }): string | null {
if (!body.name?.trim()) {
return 'Name is required'
}
return null
function isValidBoundaryMode(
value: unknown,
): value is BrowserOSCustomRoleInput['boundaries'][number]['defaultMode'] {
return value === 'allow' || value === 'ask' || value === 'block'
}
function isValidCustomRoleBoundary(value: unknown): boolean {
if (!value || typeof value !== 'object') return false
const boundary = value as Record<string, unknown>
return (
typeof boundary.key === 'string' &&
typeof boundary.label === 'string' &&
typeof boundary.description === 'string' &&
isValidBoundaryMode(boundary.defaultMode)
)
}
export function createOpenClawRoutes() {
@@ -75,9 +89,6 @@ export function createOpenClawRoutes() {
providerType: body.providerType,
providerName: body.providerName,
})
if (isUnsupportedOpenClawProviderError(err)) {
return c.json({ error: err.message }, 400)
}
if (message.includes('Podman is not available')) {
return c.json({ error: message }, 503)
}
@@ -143,23 +154,97 @@ export function createOpenClawRoutes() {
}
})
.get('/roles', async (c) => {
return c.json({
roles: BROWSEROS_ROLE_TEMPLATES.map((role) => ({
id: role.id,
name: role.name,
shortDescription: role.shortDescription,
longDescription: role.longDescription,
recommendedApps: role.recommendedApps,
boundaries: role.boundaries,
defaultAgentName: role.defaultAgentName,
})),
})
})
.post('/agents', async (c) => {
const body = await c.req.json<{
name: string
roleId?: BrowserOSAgentRoleId
customRole?: BrowserOSCustomRoleInput
providerType?: string
providerName?: string
baseUrl?: string
apiKey?: string
modelId?: string
}>()
const validationError = getCreateAgentValidationError(body)
if (validationError) {
return c.json({ error: validationError }, 400)
const name = body.name?.trim()
if (!name) {
return c.json({ error: 'Name is required' }, 400)
}
if (body.roleId && body.customRole) {
return c.json(
{ error: 'Provide either roleId or customRole, not both' },
400,
)
}
if (
body.customRole &&
(!body.customRole.name?.trim() ||
!body.customRole.shortDescription?.trim() ||
!body.customRole.longDescription?.trim())
) {
return c.json(
{
error:
'Custom roles require name, shortDescription, and longDescription',
},
400,
)
}
if (
body.customRole &&
(!Array.isArray(body.customRole.recommendedApps) ||
!Array.isArray(body.customRole.boundaries))
) {
return c.json(
{
error: 'Custom roles require recommendedApps and boundaries arrays',
},
400,
)
}
if (
body.customRole &&
!body.customRole.recommendedApps.every((app) => typeof app === 'string')
) {
return c.json(
{
error: 'Custom role recommendedApps must be an array of strings',
},
400,
)
}
if (
body.customRole &&
!body.customRole.boundaries.every(isValidCustomRoleBoundary)
) {
return c.json(
{
error:
'Custom role boundaries must include key, label, description, and a valid defaultMode',
},
400,
)
}
try {
const agent = await getOpenClawService().createAgent({
name: body.name.trim(),
name,
roleId: body.roleId,
customRole: body.customRole,
providerType: body.providerType,
providerName: body.providerName,
baseUrl: body.baseUrl,
@@ -174,9 +259,6 @@ export function createOpenClawRoutes() {
if (err instanceof OpenClawInvalidAgentNameError) {
return c.json({ error: err.message }, 400)
}
if (isUnsupportedOpenClawProviderError(err)) {
return c.json({ error: err.message }, 400)
}
const message = err instanceof Error ? err.message : String(err)
return c.json({ error: message }, 500)
}
@@ -241,9 +323,6 @@ export function createOpenClawRoutes() {
}
})
} catch (err) {
if (isUnsupportedOpenClawProviderError(err)) {
return c.json({ error: err.message }, 400)
}
const message = err instanceof Error ? err.message : String(err)
return c.json({ error: message }, 500)
}
@@ -273,17 +352,12 @@ export function createOpenClawRoutes() {
}
try {
const result = await getOpenClawService().updateProviderKeys(body)
await getOpenClawService().updateProviderKeys(body)
return c.json({
status: result.restarted ? 'restarting' : 'updated',
message: result.restarted
? 'Provider updated, restarting gateway'
: 'Provider updated without a restart',
status: 'restarting',
message: 'Provider updated, restarting gateway',
})
} catch (err) {
if (isUnsupportedOpenClawProviderError(err)) {
return c.json({ error: err.message }, 400)
}
const message = err instanceof Error ? err.message : String(err)
return c.json({ error: message }, 500)
}

View File

@@ -153,24 +153,6 @@ export class ContainerRuntime {
)
}
async runGatewaySetupCommand(
command: string[],
onLog?: LogFn,
): Promise<number> {
return this.compose(
[
'run',
'--rm',
'--no-deps',
'--entrypoint',
'node',
'openclaw-gateway',
...command.slice(1),
],
onLog,
)
}
tailGatewayLogs(onLine: LogFn): () => void {
return this.podman.tailContainerLogs(
OPENCLAW_GATEWAY_CONTAINER_NAME,

View File

@@ -0,0 +1,754 @@
/**
* @license
* Copyright 2025 BrowserOS
* SPDX-License-Identifier: AGPL-3.0-or-later
*
* WebSocket client for the OpenClaw Gateway protocol.
* Handles handshake (challenge → connect → hello-ok) with Ed25519 device
* identity signing, JSON-RPC over WS, and auto-reconnect.
* Used for agent CRUD and health — chat uses HTTP.
*/
import crypto from 'node:crypto'
import { mkdirSync, readFileSync, writeFileSync } from 'node:fs'
import { join } from 'node:path'
import { OPENCLAW_CONTAINER_HOME } from '@browseros/shared/constants/openclaw'
import { logger } from '../../../lib/logger'
const RPC_TIMEOUT_MS = 15_000
const SCOPES = [
'operator.read',
'operator.write',
'operator.admin',
'operator.approvals',
'operator.pairing',
]
interface DeviceIdentity {
deviceId: string
publicKeyPem: string
privateKeyPem: string
}
interface PendingRequest {
resolve: (value: unknown) => void
reject: (reason: Error) => void
timer: ReturnType<typeof setTimeout>
}
interface WsFrame {
type: 'req' | 'res' | 'event'
id?: string
method?: string
params?: Record<string, unknown>
ok?: boolean
payload?: Record<string, unknown>
error?: { message: string; code?: string }
event?: string
}
export type GatewayClientConnectionState =
| 'idle'
| 'connecting'
| 'connected'
| 'closed'
| 'failed'
export interface GatewayHandshakeError {
code?: string
message: string
}
export interface OpenClawStreamEvent {
type:
| 'text-delta'
| 'thinking'
| 'tool-start'
| 'tool-end'
| 'tool-output'
| 'lifecycle'
| 'done'
| 'error'
data: Record<string, unknown>
}
export interface GatewayAgentEntry {
agentId: string
name: string
workspace: string
model?: string
}
// ── Device Identity Helpers ─────────────────────────────────────────
function rawPublicKeyFromPem(pem: string): Buffer {
const der = Buffer.from(
pem.replace(/-----[^-]+-----/g, '').replace(/\s/g, ''),
'base64',
)
return der.subarray(12)
}
function signChallenge(
device: DeviceIdentity,
nonce: string,
token: string,
): { signature: string; signedAt: number; publicKey: string } {
const signedAt = Date.now()
const payload = `v3|${device.deviceId}|cli|cli|operator|${SCOPES.join(',')}|${signedAt}|${token}|${nonce}|${process.platform}|`
const privateKey = crypto.createPrivateKey(device.privateKeyPem)
const sig = crypto.sign(null, Buffer.from(payload, 'utf-8'), privateKey)
return {
signature: sig.toString('base64url'),
signedAt,
publicKey: rawPublicKeyFromPem(device.publicKeyPem).toString('base64url'),
}
}
/**
* Generates a client Ed25519 identity and pre-seeds it into the gateway's
* paired devices file so the gateway trusts it on next boot.
* Must be called before compose up (or requires a restart after).
*/
export function ensureClientIdentity(openclawDir: string): DeviceIdentity {
const identityPath = join(openclawDir, 'client-identity.json')
try {
return JSON.parse(readFileSync(identityPath, 'utf-8'))
} catch {
// Generate new identity
}
const { publicKey, privateKey } = crypto.generateKeyPairSync('ed25519')
const publicKeyPem = publicKey
.export({ type: 'spki', format: 'pem' })
.toString()
const privateKeyPem = privateKey
.export({ type: 'pkcs8', format: 'pem' })
.toString()
const rawPub = rawPublicKeyFromPem(publicKeyPem)
const deviceId = crypto.createHash('sha256').update(rawPub).digest('hex')
const identity: DeviceIdentity = { deviceId, publicKeyPem, privateKeyPem }
writeFileSync(identityPath, JSON.stringify(identity, null, 2), {
mode: 0o600,
})
seedPairedDevice(openclawDir, identity)
logger.info('Generated client device identity and pre-seeded pairing')
return identity
}
function seedPairedDevice(openclawDir: string, identity: DeviceIdentity): void {
const devicesDir = join(openclawDir, 'devices')
mkdirSync(devicesDir, { recursive: true })
const pairedPath = join(devicesDir, 'paired.json')
let paired: Record<string, unknown> = {}
try {
paired = JSON.parse(readFileSync(pairedPath, 'utf-8'))
} catch {
// First time
}
const rawPub = rawPublicKeyFromPem(identity.publicKeyPem)
paired[identity.deviceId] = {
deviceId: identity.deviceId,
publicKey: rawPub.toString('base64url'),
platform: process.platform,
clientId: 'cli',
clientMode: 'cli',
role: 'operator',
roles: ['operator'],
scopes: SCOPES,
pairedAt: Date.now(),
label: 'browseros-server',
}
writeFileSync(pairedPath, JSON.stringify(paired, null, 2), { mode: 0o600 })
}
// ── Gateway Client ──────────────────────────────────────────────────
export class GatewayClient {
private ws: WebSocket | null = null
private _connected = false
private pendingRequests = new Map<string, PendingRequest>()
private device: DeviceIdentity | null = null
private connectionState: GatewayClientConnectionState = 'idle'
private lastHandshakeError: GatewayHandshakeError | null = null
constructor(
private readonly port: number,
private readonly token: string,
private readonly openclawDir: string,
private readonly version = '1.0.0',
) {
try {
const identityPath = join(this.openclawDir, 'client-identity.json')
this.device = JSON.parse(readFileSync(identityPath, 'utf-8'))
} catch {
logger.warn('Client device identity not found, WS auth may fail')
}
}
get isConnected(): boolean {
return this._connected
}
get state(): GatewayClientConnectionState {
return this.connectionState
}
get lastError(): GatewayHandshakeError | null {
return this.lastHandshakeError
}
async connect(): Promise<void> {
return new Promise((resolve, reject) => {
this.connectionState = 'connecting'
this.lastHandshakeError = null
logger.info('Connecting to OpenClaw Gateway WS', {
port: this.port,
hasDeviceIdentity: !!this.device,
})
this.ws = new WebSocket(`ws://127.0.0.1:${this.port}`, {
headers: { Origin: `http://127.0.0.1:${this.port}` },
} as unknown as string[])
let handshakeComplete = false
let connectReqId: string | null = null
this.ws.onmessage = (event) => {
const frame = GatewayClient.parseFrame(event.data)
if (!frame) return
if (!handshakeComplete) {
if (frame.type === 'event' && frame.event === 'connect.challenge') {
const nonce = (frame.payload as Record<string, unknown>)
?.nonce as string
logger.info('Received OpenClaw Gateway challenge', {
hasNonce: !!nonce,
hasDeviceIdentity: !!this.device,
})
connectReqId = globalThis.crypto.randomUUID()
const params: Record<string, unknown> = {
minProtocol: 3,
maxProtocol: 3,
client: {
id: 'cli',
version: this.version,
platform: process.platform,
mode: 'cli',
},
role: 'operator',
scopes: SCOPES,
caps: [],
commands: [],
permissions: {},
auth: { token: this.token },
locale: 'en-US',
userAgent: `browseros-server/${this.version}`,
}
if (this.device && nonce) {
const signed = signChallenge(this.device, nonce, this.token)
params.device = {
id: this.device.deviceId,
publicKey: signed.publicKey,
signature: signed.signature,
signedAt: signed.signedAt,
nonce,
}
}
this.ws?.send(
JSON.stringify({
type: 'req',
id: connectReqId,
method: 'connect',
params,
}),
)
return
}
if (frame.type === 'res' && frame.id === connectReqId) {
if (frame.ok) {
handshakeComplete = true
this._connected = true
this.connectionState = 'connected'
logger.info('Gateway WS connected')
resolve()
} else {
const msg = frame.error?.message ?? 'Handshake failed'
this.connectionState = 'failed'
this.lastHandshakeError = {
message: msg,
code: frame.error?.code,
}
logger.error('Gateway WS handshake rejected', {
error: msg,
code: frame.error?.code,
})
reject(new Error(msg))
}
return
}
return
}
this.resolvePendingRequest(frame)
}
this.ws.onerror = (err) => {
logger.error('Gateway WS socket error', {
error: err instanceof Error ? err.message : 'unknown',
handshakeComplete,
})
if (!handshakeComplete) {
this.connectionState = 'failed'
reject(
new Error(
`WS connection error: ${err instanceof Error ? err.message : 'unknown'}`,
),
)
}
}
this.ws.onclose = () => {
this._connected = false
this.connectionState = 'closed'
this.rejectAllPending('WebSocket closed')
if (handshakeComplete) {
logger.info('Gateway WS disconnected')
}
this.ws = null
}
})
}
disconnect(): void {
this._connected = false
this.connectionState = 'closed'
this.rejectAllPending('Client disconnecting')
if (this.ws) {
this.ws.onclose = null
this.ws.close()
this.ws = null
}
}
// ── RPC ──────────────────────────────────────────────────────────────
async rpc<T = Record<string, unknown>>(
method: string,
params: Record<string, unknown> = {},
): Promise<T> {
if (!this._connected || !this.ws) {
throw new Error('Gateway WS not connected')
}
const id = globalThis.crypto.randomUUID()
return new Promise<T>((resolve, reject) => {
const timer = setTimeout(() => {
this.pendingRequests.delete(id)
reject(new Error(`RPC timeout: ${method}`))
}, RPC_TIMEOUT_MS)
this.pendingRequests.set(id, {
resolve: resolve as (value: unknown) => void,
reject,
timer,
})
this.ws?.send(JSON.stringify({ type: 'req', id, method, params }))
})
}
// ── Agent Methods ────────────────────────────────────────────────────
async listAgents(): Promise<GatewayAgentEntry[]> {
const result = await this.rpc<{
agents: Array<{
id: string
name?: string
workspace: string
model?: string
}>
}>('agents.list')
return (result.agents ?? []).map((a) => ({
agentId: a.id,
name: a.name ?? a.id,
workspace: a.workspace,
model: a.model,
}))
}
async createAgent(input: {
name: string
workspace: string
model?: string
}): Promise<GatewayAgentEntry> {
const result = await this.rpc<{
agentId?: string
id?: string
name?: string
workspace?: string
model?: string
}>('agents.create', input)
return {
agentId: result.agentId ?? result.id ?? input.name,
name: result.name ?? input.name,
workspace: result.workspace ?? input.workspace,
model: result.model ?? input.model,
}
}
async deleteAgent(agentId: string): Promise<void> {
await this.rpc('agents.delete', { id: agentId })
}
// ── Health ───────────────────────────────────────────────────────────
async getHealth(): Promise<Record<string, unknown>> {
return this.rpc('health')
}
// ── Chat Stream ─────────────────────────────────────────────────────
chatStream(
agentId: string,
sessionKey: string,
message: string,
): ReadableStream<OpenClawStreamEvent> {
if (!this._connected) {
throw new Error('Gateway WS not connected')
}
const fullSessionKey = `agent:${agentId}:browseros-${sessionKey}`
const idempotencyKey = globalThis.crypto.randomUUID()
const streamClient = new GatewayClient(
this.port,
this.token,
this.openclawDir,
this.version,
)
return new ReadableStream<OpenClawStreamEvent>({
start: async (controller) => {
try {
await streamClient.connect()
} catch (error) {
controller.enqueue({
type: 'error',
data: {
message:
error instanceof Error
? error.message
: 'Gateway WS not connected',
},
})
controller.close()
return
}
const ws = streamClient.ws
if (!ws) {
controller.enqueue({
type: 'error',
data: { message: 'Gateway WS not connected' },
})
controller.close()
return
}
const subscribeId = globalThis.crypto.randomUUID()
const agentReqId = globalThis.crypto.randomUUID()
let finished = false
const finish = (event?: OpenClawStreamEvent) => {
if (finished) return
finished = true
if (event) controller.enqueue(event)
controller.close()
streamClient.disconnect()
}
ws.onmessage = (event) => {
const frame = GatewayClient.parseFrame(event.data)
if (!frame) return
if (
this.handleChatStreamControlFrame(
frame,
subscribeId,
agentReqId,
finish,
)
) {
return
}
this.handleChatStreamEventFrame(frame, controller, finish)
}
ws.onclose = () => {
if (finished) return
finish({
type: 'error',
data: { message: 'Gateway WS disconnected' },
})
}
ws.onerror = () => {
if (finished) return
finish({
type: 'error',
data: { message: 'Gateway WS connection error' },
})
}
ws.send(
JSON.stringify({
type: 'req',
id: subscribeId,
method: 'sessions.subscribe',
params: { sessionKey: fullSessionKey },
}),
)
ws.send(
JSON.stringify({
type: 'req',
id: agentReqId,
method: 'agent',
params: {
message,
sessionKey: fullSessionKey,
idempotencyKey,
},
}),
)
},
cancel: () => {
if (streamClient.ws?.readyState === WebSocket.OPEN) {
streamClient.ws.send(
JSON.stringify({
type: 'req',
id: globalThis.crypto.randomUUID(),
method: 'sessions.abort',
params: { sessionKey: fullSessionKey },
}),
)
}
streamClient.disconnect()
},
})
}
// ── Helpers ──────────────────────────────────────────────────────────
static agentWorkspace(name: string): string {
return name === 'main'
? `${OPENCLAW_CONTAINER_HOME}/workspace`
: `${OPENCLAW_CONTAINER_HOME}/workspace-${name}`
}
private static parseFrame(data: unknown): WsFrame | null {
try {
return JSON.parse(
typeof data === 'string'
? data
: new TextDecoder().decode(data as ArrayBuffer),
) as WsFrame
} catch {
return null
}
}
private rejectAllPending(reason: string): void {
for (const [id, pending] of this.pendingRequests) {
clearTimeout(pending.timer)
pending.reject(new Error(reason))
this.pendingRequests.delete(id)
}
}
private resolvePendingRequest(frame: WsFrame): void {
if (frame.type !== 'res' || !frame.id) return
const pending = this.pendingRequests.get(frame.id)
if (!pending) return
this.pendingRequests.delete(frame.id)
clearTimeout(pending.timer)
if (frame.ok) {
pending.resolve(frame.payload)
} else {
pending.reject(new Error(frame.error?.message ?? 'RPC error'))
}
}
private handleChatStreamControlFrame(
frame: WsFrame,
subscribeId: string,
agentReqId: string,
finish: (event?: OpenClawStreamEvent) => void,
): boolean {
if (frame.type !== 'res' || !frame.id) return false
if (frame.id !== subscribeId && frame.id !== agentReqId) return false
if (!frame.ok) {
finish({
type: 'error',
data: {
message: frame.error?.message ?? 'RPC error',
code: frame.error?.code,
},
})
}
return true
}
private handleChatStreamEventFrame(
frame: WsFrame,
controller: ReadableStreamDefaultController<OpenClawStreamEvent>,
finish: (event?: OpenClawStreamEvent) => void,
): void {
if (frame.type !== 'event' || !frame.event || !frame.payload) return
switch (frame.event) {
case 'agent':
this.handleAgentStreamEvent(frame.payload, controller)
return
case 'session.tool':
this.handleSessionToolStreamEvent(frame.payload, controller)
return
case 'session.message':
this.handleSessionMessageStreamEvent(frame.payload, controller)
return
case 'chat':
this.handleChatCompletionEvent(frame.payload, finish)
return
default:
return
}
}
private handleAgentStreamEvent(
payload: Record<string, unknown>,
controller: ReadableStreamDefaultController<OpenClawStreamEvent>,
): void {
const streamType = payload.stream as string | undefined
const data = payload.data as Record<string, unknown> | undefined
if (streamType === 'assistant' && data?.delta) {
controller.enqueue({
type: 'text-delta',
data: { text: data.delta },
})
return
}
if (streamType === 'item' && data) {
const phase = data.phase as string | undefined
if (phase === 'start') {
controller.enqueue({
type: 'tool-start',
data: {
toolCallId: data.toolCallId ?? data.id,
toolName: data.name ?? data.title,
kind: data.kind,
},
})
return
}
if (phase === 'end') {
controller.enqueue({
type: 'tool-end',
data: {
toolCallId: data.toolCallId ?? data.id,
status: data.status,
durationMs: data.durationMs,
},
})
return
}
}
if (streamType === 'lifecycle') {
controller.enqueue({
type: 'lifecycle',
data: { phase: data?.phase ?? payload.phase },
})
}
}
private handleSessionToolStreamEvent(
payload: Record<string, unknown>,
controller: ReadableStreamDefaultController<OpenClawStreamEvent>,
): void {
const toolData = (payload.data as Record<string, unknown>) ?? payload
const phase = (toolData.phase as string) ?? (payload.phase as string)
if (phase !== 'result') return
controller.enqueue({
type: 'tool-output',
data: {
toolCallId: toolData.toolCallId,
isError: toolData.isError ?? false,
meta: toolData.meta,
},
})
}
private handleSessionMessageStreamEvent(
payload: Record<string, unknown>,
controller: ReadableStreamDefaultController<OpenClawStreamEvent>,
): void {
const message = payload.message as Record<string, unknown> | undefined
if (message?.role !== 'assistant') return
const content = message.content as
| Array<Record<string, unknown>>
| undefined
if (!content) return
for (const block of content) {
if (block.type !== 'thinking') continue
const text =
(block.thinking as string) ??
(block.content as string) ??
(block.text as string) ??
''
if (!text) continue
controller.enqueue({
type: 'thinking',
data: { text },
})
}
}
private handleChatCompletionEvent(
payload: Record<string, unknown>,
finish: (event?: OpenClawStreamEvent) => void,
): void {
if ((payload.state as string | undefined) !== 'final') return
finish({
type: 'done',
data: { text: (payload.text as string) ?? '' },
})
}
}

View File

@@ -1,407 +0,0 @@
/**
* @license
* Copyright 2025 BrowserOS
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
import { OPENCLAW_CONTAINER_HOME } from '@browseros/shared/constants/openclaw'
type LogFn = (line: string) => void
interface ContainerExecutor {
execInContainer(command: string[], onLog?: LogFn): Promise<number>
}
export interface OpenClawConfigBatchEntry {
path: string
value: unknown
}
interface RawAgentRecord {
id: string
name?: string
workspace: string
model?: string
}
export interface OpenClawAgentRecord {
agentId: string
name: string
workspace: string
model?: string
}
export class OpenClawCliClient {
constructor(private readonly executor: ContainerExecutor) {}
async runOnboard(
input: {
acceptRisk?: boolean
authChoice?: string
customBaseUrl?: string
customCompatibility?: 'anthropic' | 'openai-completions'
customModelId?: string
customProviderId?: string
gatewayAuth?: 'none' | 'password' | 'token'
gatewayBind?: 'auto' | 'custom' | 'lan' | 'loopback' | 'tailnet'
gatewayPort?: number
gatewayToken?: string
gatewayTokenRefEnv?: string
installDaemon?: boolean
mode?: 'local' | 'remote'
nonInteractive?: boolean
reset?: boolean
resetScope?: 'config' | 'config+creds+sessions' | 'full'
secretInputMode?: 'plain' | 'ref'
skipHealth?: boolean
workspace?: string
} = {},
): Promise<void> {
const args = ['onboard']
if (input.nonInteractive) {
args.push('--non-interactive')
}
if (input.mode) {
args.push('--mode', input.mode)
}
if (input.workspace) {
args.push('--workspace', input.workspace)
}
if (input.reset) {
args.push('--reset')
}
if (input.resetScope) {
args.push('--reset-scope', input.resetScope)
}
if (input.authChoice) {
args.push('--auth-choice', input.authChoice)
}
if (input.secretInputMode) {
args.push('--secret-input-mode', input.secretInputMode)
}
if (input.customBaseUrl) {
args.push('--custom-base-url', input.customBaseUrl)
}
if (input.customModelId) {
args.push('--custom-model-id', input.customModelId)
}
if (input.customProviderId) {
args.push('--custom-provider-id', input.customProviderId)
}
if (input.customCompatibility) {
args.push('--custom-compatibility', input.customCompatibility)
}
if (input.gatewayAuth) {
args.push('--gateway-auth', input.gatewayAuth)
}
if (input.gatewayToken) {
args.push('--gateway-token', input.gatewayToken)
}
if (input.gatewayTokenRefEnv) {
args.push('--gateway-token-ref-env', input.gatewayTokenRefEnv)
}
if (input.gatewayPort) {
args.push('--gateway-port', String(input.gatewayPort))
}
if (input.gatewayBind) {
args.push('--gateway-bind', input.gatewayBind)
}
if (input.installDaemon === true) {
args.push('--install-daemon')
} else if (input.installDaemon === false) {
args.push('--no-install-daemon')
}
if (input.skipHealth) {
args.push('--skip-health')
}
if (input.acceptRisk) {
args.push('--accept-risk')
}
await this.runCommand(args)
}
async setConfig(path: string, value: unknown): Promise<void> {
await this.runCommand(['config', 'set', path, formatConfigValue(value)])
}
async setConfigBatch(entries: OpenClawConfigBatchEntry[]): Promise<void> {
await this.runCommand([
'config',
'set',
'--batch-json',
JSON.stringify(entries),
])
}
async getConfig(path: string): Promise<unknown> {
const output = await this.runCommand(['config', 'get', path])
return parseConfigValue(output)
}
async validateConfig(): Promise<unknown> {
const output = await this.runCommand(['config', 'validate', '--json'])
return parseConfigValue(output)
}
async setDefaultModel(model: string): Promise<void> {
await this.runCommand(['models', 'set', model])
}
async listAgents(): Promise<OpenClawAgentRecord[]> {
const records = await this.runAgentListCommand()
const agents = Array.isArray(records) ? records : (records.agents ?? [])
return agents.map((record) => ({
agentId: record.id,
name: record.name ?? record.id,
workspace: record.workspace,
model: record.model,
}))
}
async createAgent(input: {
name: string
model?: string
}): Promise<OpenClawAgentRecord> {
const workspace = this.agentWorkspace(input.name)
const args = ['agents', 'add', input.name, '--workspace', workspace]
if (input.model) {
args.push('--model', input.model)
}
args.push('--non-interactive', '--json')
await this.runCommand(args)
const agents = await this.listAgents()
const agent = agents.find((entry) => entry.agentId === input.name)
if (!agent) {
throw new Error(`Created agent ${input.name} was not found in agent list`)
}
return agent
}
async deleteAgent(agentId: string): Promise<void> {
await this.runCommand(['agents', 'delete', agentId, '--force', '--json'])
}
async probe(): Promise<void> {
await this.listAgents()
}
private agentWorkspace(name: string): string {
return name === 'main'
? `${OPENCLAW_CONTAINER_HOME}/workspace`
: `${OPENCLAW_CONTAINER_HOME}/workspace-${name}`
}
private async runCommand(args: string[]): Promise<string> {
const output: string[] = []
const command = ['node', 'dist/index.js', ...args]
const exitCode = await this.executor.execInContainer(command, (line) => {
output.push(line)
})
if (exitCode !== 0) {
const detail = output.join('\n').trim()
throw new Error(
detail || `OpenClaw command failed (${args.slice(0, 2).join(' ')})`,
)
}
return output.join('\n').trim()
}
private async runAgentListCommand(): Promise<
RawAgentRecord[] | { agents?: RawAgentRecord[] }
> {
const output = await this.runCommand(['agents', 'list', '--json'])
return parseAgentListOutput(output)
}
}
function formatConfigValue(value: unknown): string {
if (typeof value === 'string') return value
return JSON.stringify(value)
}
function parseConfigValue(output: string): unknown {
const parsed = selectConfigJson<unknown>(output)
return parsed ?? output
}
function parseAgentListOutput(
output: string,
): RawAgentRecord[] | { agents?: RawAgentRecord[] } {
const parsed = parseFirstMatchingJson<
RawAgentRecord[] | { agents?: RawAgentRecord[] }
>(output, isAgentListPayload)
if (parsed !== null) return parsed
throw new Error(
`Failed to parse OpenClaw JSON output: ${output.slice(0, 200)}`,
)
}
function parseFirstMatchingJson<T>(
output: string,
predicate?: (value: unknown) => boolean,
): T | null {
const candidates = collectJsonCandidates(output)
for (const candidate of candidates) {
const parsed = tryParseJson<T>(candidate)
if (parsed === null) continue
if (predicate && !predicate(parsed)) continue
return parsed
}
return null
}
function selectConfigJson<T>(output: string): T | null {
const candidates = collectJsonCandidates(output)
const parsedCandidates: Array<{ text: string; value: T }> = []
for (const candidate of candidates) {
const parsed = tryParseJson<T>(candidate)
if (parsed === null) continue
if (isStructuredLogPayload(parsed)) continue
parsedCandidates.push({ text: candidate, value: parsed })
}
if (parsedCandidates.length === 0) return null
return parsedCandidates.reduce((best, candidate) =>
candidate.text.length > best.text.length ? candidate : best,
).value
}
function collectJsonCandidates(output: string): string[] {
const candidates = [output.trim()]
for (const line of output.split(/\r?\n/)) {
const trimmed = line.trim()
if (trimmed) candidates.push(trimmed)
}
for (let index = 0; index < output.length; index += 1) {
const char = output[index]
if (char !== '[' && char !== '{') continue
const extracted = extractJsonSubstring(output, index)
if (extracted) {
candidates.push(extracted)
}
}
return candidates
}
function extractJsonSubstring(
output: string,
startIndex: number,
): string | null {
const opening = output[startIndex]
const closing = opening === '{' ? '}' : ']'
const stack: string[] = [closing]
let inString = false
let escaped = false
for (let index = startIndex + 1; index < output.length; index += 1) {
const char = output[index]
if (inString) {
if (escaped) {
escaped = false
continue
}
if (char === '\\') {
escaped = true
continue
}
if (char === '"') {
inString = false
}
continue
}
if (char === '"') {
inString = true
continue
}
if (char === '{') {
stack.push('}')
continue
}
if (char === '[') {
stack.push(']')
continue
}
const expectedClosing = stack[stack.length - 1]
if (char === expectedClosing) {
stack.pop()
if (stack.length === 0) {
return output.slice(startIndex, index + 1)
}
}
}
return null
}
function tryParseJson<T>(value: string): T | null {
const trimmed = value.trim()
if (!trimmed) return null
try {
return JSON.parse(trimmed) as T
} catch {
return null
}
}
function isAgentListPayload(
value: unknown,
): value is RawAgentRecord[] | { agents?: RawAgentRecord[] } {
if (Array.isArray(value)) {
return value.every(isRawAgentRecord)
}
if (!isPlainObject(value)) return false
if (!('agents' in value)) return false
const agents = (value as { agents?: unknown }).agents
return (
agents === undefined ||
(Array.isArray(agents) && agents.every(isRawAgentRecord))
)
}
function isRawAgentRecord(value: unknown): value is RawAgentRecord {
return (
isPlainObject(value) &&
typeof value.id === 'string' &&
typeof value.workspace === 'string' &&
(value.name === undefined || typeof value.name === 'string') &&
(value.model === undefined || typeof value.model === 'string')
)
}
function isPlainObject(value: unknown): value is Record<string, unknown> {
return typeof value === 'object' && value !== null && !Array.isArray(value)
}
function isStructuredLogPayload(value: unknown): boolean {
if (!isPlainObject(value)) return false
return (
typeof value.level === 'string' &&
(typeof value.message === 'string' || typeof value.msg === 'string')
)
}

View File

@@ -0,0 +1,279 @@
/**
* @license
* Copyright 2025 BrowserOS
* SPDX-License-Identifier: AGPL-3.0-or-later
*
* Pure functions for building OpenClaw bootstrap configuration.
* Config is write-once at setup — agent CRUD uses WS RPC, not config edits.
*/
import {
OPENCLAW_CONTAINER_HOME,
OPENCLAW_GATEWAY_PORT,
} from '@browseros/shared/constants/openclaw'
import { DEFAULT_PORTS } from '@browseros/shared/constants/ports'
const OPENCLAW_IMAGE = 'ghcr.io/openclaw/openclaw:latest'
export const PROVIDER_ENV_MAP: Record<string, string> = {
anthropic: 'ANTHROPIC_API_KEY',
openai: 'OPENAI_API_KEY',
google: 'GEMINI_API_KEY',
openrouter: 'OPENROUTER_API_KEY',
moonshot: 'MOONSHOT_API_KEY',
groq: 'GROQ_API_KEY',
mistral: 'MISTRAL_API_KEY',
}
export interface OpenClawProviderInput {
providerType?: string
providerName?: string
baseUrl?: string
modelId?: string
apiKey?: string
}
export interface BootstrapConfigInput {
gatewayPort: number
gatewayToken: string
browserosServerPort?: number
providerType?: string
providerName?: string
baseUrl?: string
modelId?: string
}
export interface EnvFileInput {
image?: string
port?: number
token: string
configDir: string
timezone?: string
providerKeys?: Record<string, string>
}
export interface ResolvedProviderConfig {
model?: string
providerKeys: Record<string, string>
models?: {
mode: 'merge'
providers: Record<string, Record<string, unknown>>
}
}
function hasBuiltinProvider(providerType?: string): providerType is string {
return !!providerType && providerType in PROVIDER_ENV_MAP
}
/**
* OpenRouter's public slugs use dots for version numbers
* (e.g. `anthropic/claude-haiku-4.5`), but openclaw's model registry expects
* dashes (`claude-haiku-4-5`). Passing the dotted form makes openclaw fail
* the registry lookup silently and the agent turn completes with zero
* payloads. Rewrite dots to dashes for openrouter model ids only.
*/
function normalizeBuiltinModelId(
providerType: string,
modelId: string,
): string {
if (providerType !== 'openrouter') return modelId
return modelId.replace(/\./g, '-')
}
export function deriveOpenClawProviderId(providerInput: {
providerType?: string
providerName?: string
baseUrl?: string
}): string {
const source =
providerInput.providerName?.trim() ||
providerInput.baseUrl?.trim() ||
providerInput.providerType?.trim() ||
'custom-provider'
const candidate = source
.toLowerCase()
.replace(/^https?:\/\//, '')
.replace(/[^a-z0-9]+/g, '-')
.replace(/^-|-$/g, '')
return candidate || 'custom-provider'
}
export function deriveOpenClawApiKeyEnvVar(providerId: string): string {
return `${providerId.toUpperCase().replace(/-/g, '_')}_API_KEY`
}
export function resolveProviderConfig(
input: OpenClawProviderInput,
): ResolvedProviderConfig {
if (!input.providerType) {
return { providerKeys: {} }
}
if (hasBuiltinProvider(input.providerType)) {
const providerKeys: Record<string, string> = {}
if (input.apiKey) {
providerKeys[PROVIDER_ENV_MAP[input.providerType]] = input.apiKey
}
const normalizedModelId = input.modelId
? normalizeBuiltinModelId(input.providerType, input.modelId)
: undefined
return {
providerKeys,
model: normalizedModelId
? `${input.providerType}/${normalizedModelId}`
: undefined,
}
}
if (!input.baseUrl) {
return { providerKeys: {} }
}
const providerId = deriveOpenClawProviderId(input)
const apiKeyEnvVar = deriveOpenClawApiKeyEnvVar(providerId)
const providerKeys: Record<string, string> = {}
if (input.apiKey) {
providerKeys[apiKeyEnvVar] = input.apiKey
}
const providerConfig: Record<string, unknown> = {
baseUrl: input.baseUrl,
apiKey: `\${${apiKeyEnvVar}}`,
api: 'openai-completions',
}
if (input.modelId) {
providerConfig.models = [{ id: input.modelId, name: input.modelId }]
}
return {
providerKeys,
model: input.modelId ? `${providerId}/${input.modelId}` : undefined,
models: {
mode: 'merge',
providers: {
[providerId]: providerConfig,
},
},
}
}
export function buildBootstrapConfig(
input: BootstrapConfigInput,
): Record<string, unknown> {
const serverPort = input.browserosServerPort ?? DEFAULT_PORTS.server
const provider = resolveProviderConfig(input)
const defaults: Record<string, unknown> = {
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace`,
timeoutSeconds: 4200,
thinkingDefault: 'adaptive',
}
if (provider.model) {
defaults.model = { primary: provider.model }
}
const config: Record<string, unknown> = {
gateway: {
mode: 'local',
port: input.gatewayPort,
bind: 'lan',
auth: { mode: 'token', token: input.gatewayToken },
reload: { mode: 'restart' },
controlUi: {
allowInsecureAuth: true,
allowedOrigins: [
`http://127.0.0.1:${input.gatewayPort}`,
`http://localhost:${input.gatewayPort}`,
],
},
http: {
endpoints: {
chatCompletions: { enabled: true },
},
},
},
agents: { defaults },
tools: {
profile: 'full',
web: {
search: { provider: 'duckduckgo', enabled: true },
},
exec: {
host: 'gateway',
security: 'full',
ask: 'off',
},
},
cron: { enabled: true },
hooks: {
internal: {
enabled: true,
entries: {
'boot-md': { enabled: true },
'bootstrap-extra-files': { enabled: true },
'session-memory': { enabled: true },
},
},
},
mcp: {
servers: {
browseros: {
url: `http://host.containers.internal:${serverPort}/mcp`,
transport: 'streamable-http',
},
},
},
approvals: {
exec: { enabled: false },
},
skills: {
install: { nodeManager: 'bun' },
},
}
if (provider.models) {
config.models = provider.models
}
if (process.env.NODE_ENV === 'development') {
config.logging = { level: 'debug', consoleLevel: 'debug' }
}
return config
}
export function buildEnvFile(input: EnvFileInput): string {
const lines: string[] = [
`OPENCLAW_IMAGE=${input.image ?? OPENCLAW_IMAGE}`,
`OPENCLAW_GATEWAY_PORT=${input.port ?? OPENCLAW_GATEWAY_PORT}`,
`OPENCLAW_GATEWAY_TOKEN=${input.token}`,
`OPENCLAW_CONFIG_DIR=${input.configDir}`,
`TZ=${input.timezone ?? Intl.DateTimeFormat().resolvedOptions().timeZone}`,
]
if (input.providerKeys) {
for (const [key, value] of Object.entries(input.providerKeys)) {
lines.push(`${key}=${value}`)
}
}
return `${lines.join('\n')}\n`
}
export function resolveProviderKeys(
input: OpenClawProviderInput,
): Record<string, string> {
return resolveProviderConfig(input).providerKeys
}
export function resolveProviderModel(
input: OpenClawProviderInput,
): string | undefined {
return resolveProviderConfig(input).model
}

View File

@@ -1,96 +0,0 @@
/**
* @license
* Copyright 2025 BrowserOS
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
import { join } from 'node:path'
import { OPENCLAW_GATEWAY_PORT } from '@browseros/shared/constants/openclaw'
// Pin away from latest because newer OpenClaw releases regress OpenRouter chat streams.
const OPENCLAW_IMAGE = 'ghcr.io/openclaw/openclaw:2026.4.12'
const STATE_DIR_NAME = '.openclaw'
export function getOpenClawStateDir(openclawDir: string): string {
return join(openclawDir, STATE_DIR_NAME)
}
export function getOpenClawStateConfigPath(openclawDir: string): string {
return join(getOpenClawStateDir(openclawDir), 'openclaw.json')
}
export function getOpenClawStateEnvPath(openclawDir: string): string {
return join(getOpenClawStateDir(openclawDir), '.env')
}
export function getHostWorkspaceDir(
openclawDir: string,
agentName: string,
): string {
return join(
getOpenClawStateDir(openclawDir),
agentName === 'main' ? 'workspace' : `workspace-${agentName}`,
)
}
export function buildComposeEnvFile(input: {
hostHome: string
image?: string
port?: number
timezone?: string
gatewayToken?: string
}): string {
const lines = [
`OPENCLAW_IMAGE=${input.image ?? OPENCLAW_IMAGE}`,
`OPENCLAW_GATEWAY_PORT=${input.port ?? OPENCLAW_GATEWAY_PORT}`,
`OPENCLAW_HOST_HOME=${input.hostHome}`,
`TZ=${input.timezone ?? Intl.DateTimeFormat().resolvedOptions().timeZone}`,
]
if (input.gatewayToken) {
lines.push(`OPENCLAW_GATEWAY_TOKEN=${input.gatewayToken}`)
}
lines.push('')
return lines.join('\n')
}
export function mergeEnvContent(
current: string,
updates: Record<string, string>,
): { changed: boolean; content: string } {
if (Object.keys(updates).length === 0) {
return {
changed: false,
content: normalizeEnvContent(current),
}
}
const lines = current === '' ? [] : current.replace(/\r\n/g, '\n').split('\n')
const nextLines = [...lines]
let changed = false
for (const [key, value] of Object.entries(updates)) {
const replacement = `${key}=${value}`
const index = nextLines.findIndex((line) => line.startsWith(`${key}=`))
if (index === -1) {
nextLines.push(replacement)
changed = true
continue
}
if (nextLines[index] === replacement) {
continue
}
nextLines[index] = replacement
changed = true
}
const content = normalizeEnvContent(nextLines.join('\n'))
return {
changed: changed || content !== normalizeEnvContent(current),
content,
}
}
function normalizeEnvContent(content: string): string {
const trimmed = content.trim()
return trimmed ? `${trimmed}\n` : ''
}

View File

@@ -1,255 +0,0 @@
/**
* @license
* Copyright 2025 BrowserOS
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
import { createParser, type EventSourceMessage } from 'eventsource-parser'
import type { OpenClawStreamEvent } from './openclaw-types'
export interface OpenClawChatRequest {
agentId: string
sessionKey: string
message: string
signal?: AbortSignal
}
export class OpenClawHttpChatClient {
constructor(
private readonly port: number,
private readonly getToken: () => Promise<string>,
) {}
async streamChat(
input: OpenClawChatRequest,
): Promise<ReadableStream<OpenClawStreamEvent>> {
const response = await this.fetchChat(input)
const body = response.body
if (!body) {
throw new Error('OpenClaw chat response had no body')
}
return createEventStream(body, input.signal)
}
private async fetchChat(input: OpenClawChatRequest): Promise<Response> {
const token = await this.getToken()
const response = await fetch(
`http://127.0.0.1:${this.port}/v1/chat/completions`,
{
method: 'POST',
headers: {
Authorization: `Bearer ${token}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: resolveAgentModel(input.agentId),
stream: true,
messages: [{ role: 'user', content: input.message }],
user: `browseros:${input.agentId}:${input.sessionKey}`,
}),
signal: input.signal,
},
)
if (response.ok) {
return response
}
const detail = await response.text()
throw new Error(
detail || `OpenClaw chat failed with status ${response.status}`,
)
}
}
function resolveAgentModel(agentId: string): string {
return agentId === 'main' ? 'openclaw' : `openclaw/${agentId}`
}
function createEventStream(
body: ReadableStream<Uint8Array>,
signal?: AbortSignal,
): ReadableStream<OpenClawStreamEvent> {
return new ReadableStream<OpenClawStreamEvent>({
start(controller) {
void pumpChatEvents(body, controller, signal)
},
})
}
async function pumpChatEvents(
body: ReadableStream<Uint8Array>,
controller: ReadableStreamDefaultController<OpenClawStreamEvent>,
signal?: AbortSignal,
): Promise<void> {
const reader = body.getReader()
const decoder = new TextDecoder()
let text = ''
let done = false
const parser = createParser({
onEvent(message) {
if (done) return
const nextText = updateAccumulatedText(message, text)
done = handleMessage(message, controller, nextText, done)
if (!done) {
text = nextText
}
},
})
try {
while (true) {
if (signal?.aborted) {
await reader.cancel()
controller.close()
return
}
const { done: streamDone, value } = await reader.read()
if (streamDone) break
parser.feed(decoder.decode(value, { stream: true }))
}
} catch (error) {
if (!done) {
controller.enqueue({
type: 'error',
data: {
message: error instanceof Error ? error.message : String(error),
},
})
controller.close()
}
} finally {
if (!done) {
controller.close()
}
reader.releaseLock()
}
}
function handleMessage(
message: EventSourceMessage,
controller: ReadableStreamDefaultController<OpenClawStreamEvent>,
text: string,
done: boolean,
): boolean {
if (message.data === '[DONE]') {
return finishStream(controller, text, done)
}
const chunk = parseChunk(message.data)
if (!chunk) {
controller.enqueue({
type: 'error',
data: { message: 'Failed to parse OpenClaw chat stream chunk' },
})
controller.close()
return true
}
for (const event of mapChunkToEvents(chunk)) {
controller.enqueue(event)
}
return hasFinishReason(chunk) ? finishStream(controller, text, done) : false
}
function updateAccumulatedText(
message: EventSourceMessage,
text: string,
): string {
const chunk = parseChunk(message.data)
if (!chunk) return text
let next = text
for (const choice of readChoices(chunk)) {
const delta = readDeltaText(choice)
if (delta) {
next += delta
}
}
return next
}
function finishStream(
controller: ReadableStreamDefaultController<OpenClawStreamEvent>,
text: string,
done: boolean,
): boolean {
if (!done) {
if (!text.trim()) {
controller.enqueue({
type: 'error',
data: {
message: "Agent couldn't generate a response. Please try again.",
},
})
controller.close()
return true
}
controller.enqueue({
type: 'done',
data: { text },
})
controller.close()
}
return true
}
function mapChunkToEvents(
chunk: Record<string, unknown>,
): OpenClawStreamEvent[] {
const events: OpenClawStreamEvent[] = []
for (const choice of readChoices(chunk)) {
const delta = readDeltaText(choice)
if (delta) {
events.push({
type: 'text-delta',
data: { text: delta },
})
}
}
return events
}
function hasFinishReason(chunk: Record<string, unknown>): boolean {
return readChoices(chunk).some((choice) => !!readFinishReason(choice))
}
function readChoices(
chunk: Record<string, unknown>,
): Array<Record<string, unknown>> {
const choices = chunk.choices
return Array.isArray(choices)
? choices.filter(
(choice): choice is Record<string, unknown> =>
!!choice && typeof choice === 'object',
)
: []
}
function readDeltaText(choice: Record<string, unknown>): string {
const delta = choice.delta
if (!delta || typeof delta !== 'object') return ''
const content = (delta as Record<string, unknown>).content
return typeof content === 'string' ? content : ''
}
function readFinishReason(choice: Record<string, unknown>): string | null {
const reason = choice.finish_reason
return typeof reason === 'string' && reason ? reason : null
}
function parseChunk(data: string): Record<string, unknown> | null {
try {
return JSON.parse(data) as Record<string, unknown>
} catch {
return null
}
}

View File

@@ -1,96 +0,0 @@
/**
* @license
* Copyright 2025 BrowserOS
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
export const SUPPORTED_OPENCLAW_PROVIDERS = [
'openrouter',
'openai',
'anthropic',
'moonshot',
] as const
export type SupportedOpenClawProvider =
(typeof SUPPORTED_OPENCLAW_PROVIDERS)[number]
const PROVIDER_ENV_VARS: Record<SupportedOpenClawProvider, string> = {
anthropic: 'ANTHROPIC_API_KEY',
moonshot: 'MOONSHOT_API_KEY',
openai: 'OPENAI_API_KEY',
openrouter: 'OPENROUTER_API_KEY',
}
export class UnsupportedOpenClawProviderError extends Error {
constructor(providerType: string) {
super(`Unsupported OpenClaw provider: ${providerType}`)
this.name = 'UnsupportedOpenClawProviderError'
}
}
export function isUnsupportedOpenClawProviderError(
error: unknown,
): error is UnsupportedOpenClawProviderError {
return (
error instanceof UnsupportedOpenClawProviderError ||
(error instanceof Error &&
error.name === 'UnsupportedOpenClawProviderError')
)
}
export function isSupportedOpenClawProvider(
providerType: string,
): providerType is SupportedOpenClawProvider {
return SUPPORTED_OPENCLAW_PROVIDERS.includes(
providerType as SupportedOpenClawProvider,
)
}
export function assertSupportedOpenClawProvider(
providerType?: string,
): SupportedOpenClawProvider | undefined {
if (!providerType) {
return undefined
}
if (!isSupportedOpenClawProvider(providerType)) {
throw new UnsupportedOpenClawProviderError(providerType)
}
return providerType
}
export function buildOpenClawModelRef(
providerType: SupportedOpenClawProvider,
modelId?: string,
): string | undefined {
return modelId ? `${providerType}/${modelId}` : undefined
}
export function getOpenClawProviderEnvVar(
providerType: SupportedOpenClawProvider,
): string {
return PROVIDER_ENV_VARS[providerType]
}
export function resolveSupportedOpenClawProvider(input: {
providerType?: string
providerName?: string
baseUrl?: string
apiKey?: string
modelId?: string
}): {
envValues: Record<string, string>
model?: string
providerType?: SupportedOpenClawProvider
} {
const providerType = assertSupportedOpenClawProvider(input.providerType)
if (!providerType) {
return { envValues: {} }
}
const envVar = getOpenClawProviderEnvVar(providerType)
return {
envValues: input.apiKey ? { [envVar]: input.apiKey } : {},
model: buildOpenClawModelRef(providerType, input.modelId),
providerType,
}
}

View File

@@ -1,18 +0,0 @@
/**
* @license
* Copyright 2025 BrowserOS
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
export interface OpenClawStreamEvent {
type:
| 'text-delta'
| 'thinking'
| 'tool-start'
| 'tool-end'
| 'tool-output'
| 'lifecycle'
| 'done'
| 'error'
data: Record<string, unknown>
}

View File

@@ -8,33 +8,10 @@
* On Linux, machine operations are no-ops since Podman runs natively.
*/
import { existsSync } from 'node:fs'
import { join } from 'node:path'
const isLinux = process.platform === 'linux'
const PODMAN_BUNDLE_PATH = ['bin', 'third_party', 'podman'] as const
export type LogFn = (msg: string) => void
function getPodmanBinaryName(platform: NodeJS.Platform): string {
return platform === 'win32' ? 'podman.exe' : 'podman'
}
export function resolveBundledPodmanPath(
resourcesDir?: string,
platform: NodeJS.Platform = process.platform,
): string | null {
if (!resourcesDir) return null
const bundledPath = join(
resourcesDir,
...PODMAN_BUNDLE_PATH,
getPodmanBinaryName(platform),
)
return existsSync(bundledPath) ? bundledPath : null
}
export class PodmanRuntime {
private podmanPath: string
private machineReady = false
@@ -99,9 +76,9 @@ export class PodmanRuntime {
'machine',
'init',
'--cpus',
'8',
'2',
'--memory',
'8096',
'2048',
'--disk-size',
'10',
],
@@ -266,19 +243,6 @@ export class PodmanRuntime {
let runtime: PodmanRuntime | null = null
export function configurePodmanRuntime(config: {
resourcesDir?: string
podmanPath?: string
}): PodmanRuntime {
const podmanPath =
config.podmanPath ??
resolveBundledPodmanPath(config.resourcesDir) ??
'podman'
runtime = new PodmanRuntime({ podmanPath })
return runtime
}
export function getPodmanRuntime(): PodmanRuntime {
if (!runtime) runtime = new PodmanRuntime()
return runtime

View File

@@ -0,0 +1,200 @@
import {
type BROWSEROS_ROLE_TEMPLATES,
getBrowserOSRoleTemplate,
} from '@browseros/shared/constants/role-aware-agents'
import type {
BrowserOSAgentRoleId,
BrowserOSAgentRoleSummary,
BrowserOSCustomRoleInput,
BrowserOSRoleTemplate,
} from '@browseros/shared/types/role-aware-agents'
type RoleTemplate = (typeof BROWSEROS_ROLE_TEMPLATES)[number]
interface BootstrapRenderableRole {
name: string
shortDescription: string
longDescription: string
recommendedApps: string[]
boundaries: BrowserOSRoleTemplate['boundaries']
bootstrap: BrowserOSRoleTemplate['bootstrap']
}
export interface RoleBootstrapFiles {
'AGENTS.md': string
'SOUL.md': string
'TOOLS.md': string
'.browseros-role.json': string
}
export function resolveRoleTemplate(
roleId: BrowserOSAgentRoleId,
): RoleTemplate {
const role = getBrowserOSRoleTemplate(roleId)
if (!role) {
throw new Error(`Unknown BrowserOS role: ${roleId}`)
}
return role
}
export function buildRoleBootstrapFiles(input: {
role: BrowserOSRoleTemplate | BrowserOSCustomRoleInput
agentName: string
}): RoleBootstrapFiles {
const normalizedRole = normalizeRoleForBootstrap(input.role)
const roleId = 'id' in input.role ? input.role.id : undefined
return {
'AGENTS.md': normalizedRole.bootstrap.agentsMd,
'SOUL.md': normalizedRole.bootstrap.soulMd,
'TOOLS.md': normalizedRole.bootstrap.toolsMd,
'.browseros-role.json': `${JSON.stringify(
{
version: 1,
roleSource: roleId ? 'builtin' : 'custom',
roleId,
roleName: normalizedRole.name,
shortDescription: normalizedRole.shortDescription,
createdBy: 'browseros',
agentName: input.agentName,
},
null,
2,
)}\n`,
}
}
export function toRoleSummary(
role: BrowserOSRoleTemplate | BrowserOSCustomRoleInput,
): BrowserOSAgentRoleSummary {
const normalizedRole = normalizeRoleForBootstrap(role)
return {
roleSource: 'id' in role ? 'builtin' : 'custom',
roleId: 'id' in role ? role.id : undefined,
roleName: normalizedRole.name,
shortDescription: normalizedRole.shortDescription,
}
}
export function normalizeCustomRole(
role: BrowserOSCustomRoleInput,
): BootstrapRenderableRole {
const recommendedApps = Array.isArray(role.recommendedApps)
? role.recommendedApps.filter(
(app): app is string => typeof app === 'string',
)
: []
const boundaries = Array.isArray(role.boundaries) ? role.boundaries : []
return {
name: role.name,
shortDescription: role.shortDescription,
longDescription: role.longDescription,
recommendedApps,
boundaries,
bootstrap: {
agentsMd:
role.bootstrap?.agentsMd?.trim() ||
buildAgentsMd({
name: role.name,
longDescription: role.longDescription,
boundaries,
}),
soulMd:
role.bootstrap?.soulMd?.trim() ||
buildSoulMd({
name: role.name,
shortDescription: role.shortDescription,
longDescription: role.longDescription,
}),
toolsMd:
role.bootstrap?.toolsMd?.trim() ||
buildToolsMd({
boundaries,
recommendedApps,
}),
},
}
}
function normalizeRoleForBootstrap(
role: BrowserOSRoleTemplate | BrowserOSCustomRoleInput,
): BootstrapRenderableRole {
return 'id' in role ? role : normalizeCustomRole(role)
}
function buildAgentsMd(input: {
name: string
longDescription: string
boundaries: BrowserOSRoleTemplate['boundaries']
}): string {
const boundaryLines = input.boundaries
.map(
(boundary) =>
`- ${boundary.label}: ${boundary.description} Default mode: ${boundary.defaultMode}.`,
)
.join('\n')
return `# ${input.name}
You are the ${input.name} specialist for this workspace.
## Core Purpose
${input.longDescription}
## Operating Rules
${boundaryLines}
## Default Output Style
- concise
- action-oriented
- explicit about blockers and approvals
`
}
function buildSoulMd(input: {
name: string
shortDescription: string
longDescription: string
}): string {
return `# Operating Style
You act like a trusted ${input.name}.
## Working Posture
- calm
- structured
- direct
- explicit about tradeoffs
## Role Framing
${input.shortDescription}
${input.longDescription}
`
}
function buildToolsMd(input: {
boundaries: BrowserOSRoleTemplate['boundaries']
recommendedApps: string[]
}): string {
const boundaryLines = input.boundaries
.map((boundary) => `- ${boundary.label}: ${boundary.defaultMode}`)
.join('\n')
const appsLine =
input.recommendedApps.length > 0
? input.recommendedApps.join(', ')
: 'No specific apps configured yet.'
return `# Tooling Guidelines
- Use BrowserOS MCP for browser and connected SaaS tasks.
- Prefer read, summarize, and draft flows.
- Keep outputs in the workspace when possible so work remains inspectable.
## Recommended Apps
${appsLine}
## Boundary Defaults
${boundaryLines}
`
}

View File

@@ -517,45 +517,15 @@ export class Browser {
return null
}
private async resolveWindowIdForNewPage(opts?: {
hidden?: boolean
windowId?: number
}): Promise<number | undefined> {
if (!opts?.hidden) {
return opts?.windowId
}
if (opts.windowId !== undefined) {
const windows = await this.listWindows()
const targetWindow = windows.find(
(window) => window.windowId === opts.windowId,
)
if (targetWindow && !targetWindow.isVisible) {
return targetWindow.windowId
}
if (targetWindow?.isVisible) {
logger.warn(
'Requested hidden page target window is visible, creating a new hidden window instead',
{
requestedWindowId: opts.windowId,
},
)
}
}
const hiddenWindow = await this.createWindow({ hidden: true })
return hiddenWindow.windowId
}
async newPage(
url: string,
opts?: { hidden?: boolean; background?: boolean; windowId?: number },
): Promise<number> {
const windowId = await this.resolveWindowIdForNewPage(opts)
const createResult = await this.cdp.Browser.createTab({
url,
...(opts?.hidden !== undefined && { hidden: opts.hidden }),
...(opts?.background !== undefined && { background: opts.background }),
...(windowId !== undefined && { windowId }),
...(opts?.windowId !== undefined && { windowId: opts.windowId }),
})
const tabId = (createResult.tab as TabInfo).tabId
@@ -583,7 +553,7 @@ export class Browser {
loadProgress: tabInfo.loadProgress,
isPinned: tabInfo.isPinned,
isHidden: tabInfo.isHidden,
windowId: tabInfo.windowId ?? windowId,
windowId: tabInfo.windowId,
index: tabInfo.index,
groupId: tabInfo.groupId,
})
@@ -911,39 +881,6 @@ export class Browser {
}
}
async setViewport(
page: number,
width: number,
height: number,
deviceScaleFactor: number = 1,
): Promise<void> {
if (width <= 0 || height <= 0) {
throw new Error(
`Invalid viewport: width=${width} height=${height} must both be > 0`,
)
}
const session = await this.resolveSession(page)
await session.Emulation.setDeviceMetricsOverride({
width,
height,
deviceScaleFactor,
mobile: false,
})
}
async clearViewport(page: number): Promise<void> {
const session = await this.resolveSession(page)
await session.Emulation.clearDeviceMetricsOverride()
}
async getElementBbox(
page: number,
backendNodeId: number,
): Promise<elements.Bbox> {
const session = await this.resolveSession(page)
return elements.getElementBbox(session, backendNodeId)
}
async evaluate(
page: number,
expression: string,

View File

@@ -6,36 +6,6 @@ function quadCenter(q: number[]): { x: number; y: number } {
return { x, y }
}
export interface Bbox {
x1: number
y1: number
x2: number
y2: number
}
function quadBounds(q: number[]): Bbox {
const xs = [q[0], q[2], q[4], q[6]]
const ys = [q[1], q[3], q[5], q[7]]
const x1 = Math.min(...xs)
const y1 = Math.min(...ys)
const x2 = Math.max(...xs)
const y2 = Math.max(...ys)
if (
!Number.isFinite(x1) ||
!Number.isFinite(y1) ||
!Number.isFinite(x2) ||
!Number.isFinite(y2)
) {
throw new Error('Quad contains non-finite coordinates')
}
return {
x1: Math.floor(x1),
y1: Math.floor(y1),
x2: Math.ceil(x2),
y2: Math.ceil(y2),
}
}
/** 3-tier fallback: getContentQuads -> getBoxModel -> getBoundingClientRect */
export async function getElementCenter(
session: ProtocolApi,
@@ -81,56 +51,6 @@ export async function getElementCenter(
return { x: rect.x + rect.w / 2, y: rect.y + rect.h / 2 }
}
/** Axis-aligned bbox in viewport pixels via the same 3-tier fallback as getElementCenter. */
export async function getElementBbox(
session: ProtocolApi,
backendNodeId: number,
): Promise<Bbox> {
try {
const quadsResult = await session.DOM.getContentQuads({ backendNodeId })
if (quadsResult.quads?.length) {
const q = quadsResult.quads[0] as unknown as number[]
if (q && q.length >= 8) return quadBounds(q)
}
} catch {
// fall through
}
try {
const boxResult = await session.DOM.getBoxModel({ backendNodeId })
const content = boxResult.model?.content as unknown as number[] | undefined
if (content && content.length >= 8) return quadBounds(content)
} catch {
// fall through
}
const resolved = await session.DOM.resolveNode({ backendNodeId })
const objectId = resolved.object?.objectId
if (!objectId) {
throw new Error(
'Could not resolve element — it may have been removed from the page.',
)
}
const boundsResult = await session.Runtime.callFunctionOn({
functionDeclaration:
'function(){var r=this.getBoundingClientRect();return{x:r.left,y:r.top,w:r.width,h:r.height}}',
objectId,
returnByValue: true,
})
const rect = boundsResult.result?.value as
| { x: number; y: number; w: number; h: number }
| undefined
if (!rect) throw new Error('Could not get element bounds.')
return {
x1: Math.floor(rect.x),
y1: Math.floor(rect.y),
x2: Math.ceil(rect.x + rect.w),
y2: Math.ceil(rect.y + rect.h),
}
}
export async function scrollIntoView(
session: ProtocolApi,
backendNodeId: number,

View File

@@ -6,19 +6,8 @@ import { PATHS } from '@browseros/shared/constants/paths'
import type { ServerDiscoveryConfig } from '@browseros/shared/types/server-config'
import { logger } from './logger'
const DEV_BROWSEROS_DIR_NAME = '.browseros-dev'
export function getBrowserosDir(): string {
const dirName =
process.env.NODE_ENV === 'development'
? DEV_BROWSEROS_DIR_NAME
: PATHS.BROWSEROS_DIR_NAME
return join(homedir(), dirName)
}
export function logDevelopmentBrowserosDir(): void {
if (process.env.NODE_ENV !== 'development') return
logger.info(`Using development BrowserOS directory: ${getBrowserosDir()}`)
return join(homedir(), PATHS.BROWSEROS_DIR_NAME)
}
export function getMemoryDir(): string {
@@ -68,7 +57,6 @@ export function removeServerConfigSync(): void {
}
export async function ensureBrowserosDir(): Promise<void> {
logDevelopmentBrowserosDir()
await mkdir(getMemoryDir(), { recursive: true })
await mkdir(getSkillsDir(), { recursive: true })
await mkdir(getBuiltinSkillsDir(), { recursive: true })

View File

@@ -11,10 +11,6 @@ import { INLINED_ENV } from '../../../env'
import { logger } from '../../logger'
import { fetchBrowserOSConfig, getLLMConfigFromProvider } from '../gateway'
import { getOAuthTokenManager } from '../oauth'
import {
resolveMockBrowserOSConfig,
shouldUseMockBrowserOSLLM,
} from './mock-language-model'
import type { ResolvedLLMConfig } from './types'
export async function resolveLLMConfig(
@@ -53,9 +49,6 @@ export async function resolveLLMConfig(
// BrowserOS gateway: fetch config from remote service
if (config.provider === LLM_PROVIDERS.BROWSEROS) {
if (shouldUseMockBrowserOSLLM(config)) {
return resolveMockBrowserOSConfig(config, browserosId)
}
return resolveBrowserOSConfig(config, browserosId)
}

View File

@@ -1,83 +0,0 @@
import type {
LanguageModelV3GenerateResult,
LanguageModelV3StreamPart,
LanguageModelV3Usage,
} from '@ai-sdk/provider'
import { LLM_PROVIDERS, type LLMConfig } from '@browseros/shared/schemas/llm'
import { type LanguageModel, simulateReadableStream } from 'ai'
import { MockLanguageModelV3 } from 'ai/test'
import type { ResolvedLLMConfig } from './types'
export const MOCK_BROWSEROS_MODEL_ID = 'browseros-test-mock'
export const MOCK_BROWSEROS_RESPONSE_TEXT = 'Mock BrowserOS test response.'
const MOCK_USAGE: LanguageModelV3Usage = {
inputTokens: {
total: 1,
noCache: 1,
cacheRead: undefined,
cacheWrite: undefined,
},
outputTokens: {
total: 4,
text: 4,
reasoning: undefined,
},
}
function createMockResult(): LanguageModelV3GenerateResult {
return {
content: [{ type: 'text', text: MOCK_BROWSEROS_RESPONSE_TEXT }],
finishReason: { unified: 'stop', raw: 'stop' },
usage: MOCK_USAGE,
warnings: [],
}
}
export function isMockBrowserOSLLMEnabled(): boolean {
return process.env.BROWSEROS_USE_MOCK_LLM === 'true'
}
export function shouldUseMockBrowserOSLLM(
config: Pick<LLMConfig, 'provider'>,
): boolean {
return (
config.provider === LLM_PROVIDERS.BROWSEROS && isMockBrowserOSLLMEnabled()
)
}
export function resolveMockBrowserOSConfig(
config: LLMConfig,
browserosId?: string,
): ResolvedLLMConfig {
return {
...config,
model: config.model ?? MOCK_BROWSEROS_MODEL_ID,
browserosId,
upstreamProvider: LLM_PROVIDERS.OPENAI,
}
}
export function createMockBrowserOSLanguageModel(): LanguageModel {
const chunks: LanguageModelV3StreamPart[] = [
{ type: 'text-start', id: 'text-1' },
{
type: 'text-delta',
id: 'text-1',
delta: MOCK_BROWSEROS_RESPONSE_TEXT,
},
{ type: 'text-end', id: 'text-1' },
{
type: 'finish',
finishReason: { unified: 'stop', raw: 'stop' },
usage: MOCK_USAGE,
},
]
return new MockLanguageModelV3({
doGenerate: async () => createMockResult(),
doStream: async () => ({
stream: simulateReadableStream({ chunks }),
}),
}) as LanguageModel
}

View File

@@ -21,10 +21,6 @@ import { logger } from '../../logger'
import { createOpenRouterCompatibleFetch } from '../../openrouter-fetch'
import { createCodexFetch } from '../oauth/codex-fetch'
import { createCopilotFetch } from '../oauth/copilot-fetch'
import {
createMockBrowserOSLanguageModel,
shouldUseMockBrowserOSLLM,
} from './mock-language-model'
import type { ResolvedLLMConfig } from './types'
type ProviderFactory = (config: ResolvedLLMConfig) => LanguageModel
@@ -199,9 +195,6 @@ const PROVIDER_FACTORIES: Record<string, ProviderFactory> = {
}
export function createLLMProvider(config: ResolvedLLMConfig): LanguageModel {
if (shouldUseMockBrowserOSLLM(config)) {
return createMockBrowserOSLanguageModel()
}
const factory = PROVIDER_FACTORIES[config.provider]
if (!factory) throw new Error(`Unknown provider: ${config.provider}`)
return factory(config)

View File

@@ -13,11 +13,7 @@ import fs from 'node:fs'
import path from 'node:path'
import { EXIT_CODES } from '@browseros/shared/constants/exit-codes'
import { createHttpServer } from './api/server'
import {
configureOpenClawService,
getOpenClawService,
} from './api/services/openclaw/openclaw-service'
import { configurePodmanRuntime } from './api/services/openclaw/podman-runtime'
import { getOpenClawService } from './api/services/openclaw/openclaw-service'
import { CdpBackend } from './browser/backends/cdp'
import { Browser } from './browser/browser'
import type { ServerConfig } from './config'
@@ -59,9 +55,6 @@ export class Application {
resourcesDir: path.resolve(this.config.resourcesDir),
})
configurePodmanRuntime({
resourcesDir: path.resolve(this.config.resourcesDir),
})
await this.initCoreServices()
if (!this.config.cdpPort) {
@@ -126,10 +119,7 @@ export class Application {
this.logStartupSummary()
startSkillSync()
configureOpenClawService({
browserosServerPort: this.config.serverPort,
resourcesDir: path.resolve(this.config.resourcesDir),
})
getOpenClawService(this.config.serverPort)
.tryAutoStart()
.catch((err) =>
logger.warn('OpenClaw auto-start failed', {

View File

@@ -5,64 +5,19 @@ interface SemanticScore {
backend: string
}
interface EmbeddingOutput {
tolist: () => number[][]
dispose?: () => void
}
interface FeatureExtractionPipeline {
(
texts: string[],
options: { pooling: string; normalize: boolean },
): Promise<EmbeddingOutput>
dispose?: () => Promise<void>
}
type FeatureExtractionPipeline = (
texts: string[],
options: { pooling: string; normalize: boolean },
) => Promise<{ tolist: () => number[][] }>
let pipelineInstance: FeatureExtractionPipeline | null = null
const LOAD_RETRY_MS = 60_000
let lastLoadFailedAt = 0
let cleanupListener: (() => void) | null = null
function getModelName(): string {
return process.env.ACL_EMBEDDING_MODEL ?? 'Xenova/bge-small-en-v1.5'
}
function isSemanticDisabled(): boolean {
return process.env.ACL_EMBEDDING_DISABLE === 'true'
}
export async function disposeSemanticPipeline(): Promise<void> {
const current = pipelineInstance
pipelineInstance = null
if (cleanupListener) {
process.removeListener('beforeExit', cleanupListener)
cleanupListener = null
}
if (!current?.dispose) {
return
}
try {
await current.dispose()
} catch (error) {
logger.warn('ACL embedding model disposal failed', {
error: error instanceof Error ? error.message : String(error),
})
}
}
function registerPipelineCleanup(): void {
if (cleanupListener) {
return
}
cleanupListener = () => {
// beforeExit cannot await async cleanup, so explicit disposal is still
// required anywhere teardown must be deterministic.
void disposeSemanticPipeline()
}
process.once('beforeExit', cleanupListener)
}
async function ensurePipeline(): Promise<FeatureExtractionPipeline | null> {
if (pipelineInstance) return pipelineInstance
if (lastLoadFailedAt > 0 && Date.now() - lastLoadFailedAt < LOAD_RETRY_MS) {
@@ -75,7 +30,6 @@ async function ensurePipeline(): Promise<FeatureExtractionPipeline | null> {
dtype: 'fp32',
})
pipelineInstance = extractor as unknown as FeatureExtractionPipeline
registerPipelineCleanup()
lastLoadFailedAt = 0
logger.info('ACL embedding model loaded', { model: getModelName() })
return pipelineInstance
@@ -110,7 +64,6 @@ export async function computeSemanticSimilarity(
right: string,
): Promise<SemanticScore> {
if (!left || !right) return { score: 0, backend: 'none' }
if (isSemanticDisabled()) return { score: 0, backend: 'disabled' }
const extractor = await ensurePipeline()
if (!extractor) return { score: 0, backend: 'error' }
@@ -121,7 +74,6 @@ export async function computeSemanticSimilarity(
normalize: true,
})
const embeddings = output.tolist()
output.dispose?.()
const score = cosineSimilarity(embeddings[0], embeddings[1])
return {
score: Math.max(0, Math.min(score, 1)),

View File

@@ -1,37 +0,0 @@
import { resolve } from 'node:path'
async function main(): Promise<void> {
const fixtureName = process.argv[2]
if (!fixtureName) {
throw new Error('Fixture name is required')
}
process.env.LOG_LEVEL = 'silent'
delete process.env.ACL_EMBEDDING_DISABLE
const [{ scoreFixture }, { disposeSemanticPipeline }] = await Promise.all([
import('../../src/tools/acl/acl-scorer'),
import('../../src/tools/acl/acl-embeddings'),
])
const fixturePath = resolve(
import.meta.dir,
`../__fixtures__/acl/${fixtureName}.json`,
)
const fixture = await Bun.file(fixturePath).json()
const decision = await scoreFixture(
fixture.tool_name,
fixture.page_url,
fixture.element,
fixture.rules,
)
await disposeSemanticPipeline()
process.stdout.write(JSON.stringify(decision))
}
main().catch((error) => {
console.error(
error instanceof Error ? (error.stack ?? error.message) : String(error),
)
process.exitCode = 1
})

View File

@@ -1,146 +0,0 @@
import { spawnSync } from 'node:child_process'
import { existsSync, mkdirSync, readdirSync } from 'node:fs'
import { dirname, resolve } from 'node:path'
const projectRoot = resolve(import.meta.dir, '..', '..')
const testsRoot = resolve(projectRoot, 'tests')
const cleanupScript = resolve(testsRoot, '__helpers__/cleanup.sh')
const preferredDirectoryGroups = [
'agent',
'api',
'skills',
'tools',
'browser',
'sdk',
]
const ignoredDirectories = new Set(['__fixtures__', '__helpers__'])
const rootGroupExclusions = new Set(['server.integration.test.ts'])
const testFilePattern = /\.(test|spec)\.[cm]?[jt]sx?$/
function compareGroupNames(left: string, right: string): number {
const leftIndex = preferredDirectoryGroups.indexOf(left)
const rightIndex = preferredDirectoryGroups.indexOf(right)
const leftRank =
leftIndex === -1 ? preferredDirectoryGroups.length : leftIndex
const rightRank =
rightIndex === -1 ? preferredDirectoryGroups.length : rightIndex
if (leftRank !== rightRank) {
return leftRank - rightRank
}
return left.localeCompare(right)
}
function listDirectoryGroups(): string[] {
return readdirSync(testsRoot, { withFileTypes: true })
.filter(
(entry) => entry.isDirectory() && !ignoredDirectories.has(entry.name),
)
.map((entry) => entry.name)
.sort(compareGroupNames)
}
function listRootTestTargets(): string[] {
return readdirSync(testsRoot, { withFileTypes: true })
.filter((entry) => entry.isFile() && testFilePattern.test(entry.name))
.filter((entry) => !rootGroupExclusions.has(entry.name))
.map((entry) => `./tests/${entry.name}`)
.sort((left, right) => left.localeCompare(right))
}
function listAllGroups(): string[] {
const groups = [...listDirectoryGroups()]
if (existsSync(resolve(testsRoot, 'server.integration.test.ts'))) {
groups.push('integration')
}
if (listRootTestTargets().length > 0) {
groups.push('root')
}
return groups
}
function listAvailableGroupNames(): string[] {
return ['all', 'core', 'cdp', ...listAllGroups()].sort((left, right) =>
left.localeCompare(right),
)
}
function getCompositeGroupMembers(group: string): string[] | null {
if (group === 'all') {
return listAllGroups()
}
if (group === 'core') {
return ['agent', 'api', 'skills', 'root']
}
return null
}
function getAtomicGroupTargets(group: string): string[] {
if (group === 'cdp') {
return getAtomicGroupTargets('browser')
}
if (group === 'integration') {
return existsSync(resolve(testsRoot, 'server.integration.test.ts'))
? ['./tests/server.integration.test.ts']
: []
}
if (group === 'root') {
return listRootTestTargets()
}
if (existsSync(resolve(testsRoot, group))) {
return [`./tests/${group}`]
}
return []
}
function runCommand(cmd: string[], label: string): number {
console.log(`\n==> ${label}`)
const result = spawnSync(cmd[0], cmd.slice(1), {
cwd: projectRoot,
env: process.env,
stdio: 'inherit',
})
if (result.error) {
throw result.error
}
return result.status ?? 1
}
function runAtomicGroup(group: string): number {
const targets = getAtomicGroupTargets(group)
if (targets.length === 0) {
throw new Error(
`Unknown test group "${group}". Available groups: ${listAvailableGroupNames().join(', ')}`,
)
}
runCommand(['bash', cleanupScript], `Cleaning up test resources for ${group}`)
const junitPath = process.env.BROWSEROS_JUNIT_PATH?.trim()
const cmd = [process.execPath, '--env-file=.env.development', 'test']
if (junitPath) {
const outputPath = resolve(projectRoot, junitPath)
mkdirSync(dirname(outputPath), { recursive: true })
cmd.push('--reporter=junit', `--reporter-outfile=${outputPath}`)
}
cmd.push(...targets)
return runCommand(cmd, `Running ${group} tests`)
}
function runGroup(group: string): number {
const compositeMembers = getCompositeGroupMembers(group)
if (compositeMembers) {
let exitCode = 0
for (const member of compositeMembers) {
const status = runGroup(member)
if (status !== 0 && exitCode === 0) {
exitCode = status
}
}
return exitCode
}
return runAtomicGroup(group)
}
const requestedGroup = process.argv[2] ?? 'all'
process.exit(runGroup(requestedGroup))

View File

@@ -79,11 +79,7 @@ export async function spawnServer(config: ServerConfig): Promise<ServerState> {
],
{
stdio: ['ignore', 'pipe', 'pipe'],
env: {
...globalThis.process.env,
NODE_ENV: 'test',
BROWSEROS_USE_MOCK_LLM: 'true',
},
env: { ...globalThis.process.env, NODE_ENV: 'test' },
},
)

View File

@@ -1168,9 +1168,8 @@ describe('compaction E2E — pruning and output reduction', () => {
{ role: 'user', content: 'x'.repeat(3000) },
]
const estimated = estimateTokensForThreshold(messages, config)
expect(estimated).toBe(
Math.ceil(1000 * config.safetyMultiplier) + config.fixedOverhead,
)
// 3000 chars / 3 = 1000 tokens, * 1.3 = 1300, + 12000 = 13300
expect(estimated).toBe(Math.ceil(1000 * 1.3) + 12_000)
})
})

View File

@@ -19,7 +19,7 @@ afterEach(() => {
})
describe('createKlavisRoutes', () => {
it('normalizes string integrations into unauthenticated entries', async () => {
it('normalizes string integrations into authenticated entries', async () => {
globalThis.fetch = (async () =>
Response.json({
integrations: ['Google Docs', 'Slack'],
@@ -32,8 +32,8 @@ describe('createKlavisRoutes', () => {
assert.strictEqual(response.status, 200)
assert.deepStrictEqual(body, {
integrations: [
{ name: 'Google Docs', is_authenticated: false },
{ name: 'Slack', is_authenticated: false },
{ name: 'Google Docs', is_authenticated: true },
{ name: 'Slack', is_authenticated: true },
],
count: 2,
})

View File

@@ -1,215 +0,0 @@
/**
* @license
* Copyright 2025 BrowserOS
*/
import { afterEach, describe, expect, it, mock } from 'bun:test'
import { UnsupportedOpenClawProviderError } from '../../../src/api/services/openclaw/openclaw-provider-map'
describe('createOpenClawRoutes', () => {
afterEach(() => {
mock.restore()
})
it('preserves BrowserOS SSE framing and session headers for chat', async () => {
const actualOpenClawService = await import(
'../../../src/api/services/openclaw/openclaw-service'
)
const chatStream = mock(
async () =>
new ReadableStream({
start(controller) {
controller.enqueue({
type: 'text-delta',
data: { text: 'Hello' },
})
controller.enqueue({
type: 'done',
data: { text: 'Hello' },
})
controller.close()
},
}),
)
mock.module('../../../src/api/services/openclaw/openclaw-service', () => ({
...actualOpenClawService,
getOpenClawService: () =>
({
chatStream,
}) as never,
}))
const { createOpenClawRoutes } = await import(
'../../../src/api/routes/openclaw'
)
const route = createOpenClawRoutes()
const response = await route.request('/agents/research/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
message: 'hi',
sessionKey: 'session-123',
}),
})
expect(response.status).toBe(200)
expect(response.headers.get('Content-Type')).toContain('text/event-stream')
expect(response.headers.get('X-Session-Key')).toBe('session-123')
expect(chatStream).toHaveBeenCalledWith('research', 'session-123', 'hi')
expect(await response.text()).toBe(
'data: {"type":"text-delta","data":{"text":"Hello"}}\n\n' +
'data: {"type":"done","data":{"text":"Hello"}}\n\n' +
'data: [DONE]\n\n',
)
})
it('returns 400 for unsupported provider payloads', async () => {
const actualOpenClawService = await import(
'../../../src/api/services/openclaw/openclaw-service'
)
const updateProviderKeys = mock(async () => {
throw new UnsupportedOpenClawProviderError('google')
})
mock.module('../../../src/api/services/openclaw/openclaw-service', () => ({
...actualOpenClawService,
getOpenClawService: () =>
({
updateProviderKeys,
}) as never,
}))
const { createOpenClawRoutes } = await import(
'../../../src/api/routes/openclaw'
)
const route = createOpenClawRoutes()
const response = await route.request('/providers', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
providerType: 'google',
apiKey: 'google-key',
}),
})
expect(response.status).toBe(400)
expect(updateProviderKeys).toHaveBeenCalledWith({
providerType: 'google',
apiKey: 'google-key',
})
expect(await response.json()).toEqual({
error: 'Unsupported OpenClaw provider: google',
})
})
it('returns a non-restarting response when only the default model changes', async () => {
const actualOpenClawService = await import(
'../../../src/api/services/openclaw/openclaw-service'
)
const updateProviderKeys = mock(async () => ({
restarted: false,
modelUpdated: true,
}))
mock.module('../../../src/api/services/openclaw/openclaw-service', () => ({
...actualOpenClawService,
getOpenClawService: () =>
({
updateProviderKeys,
}) as never,
}))
const { createOpenClawRoutes } = await import(
'../../../src/api/routes/openclaw'
)
const route = createOpenClawRoutes()
const response = await route.request('/providers', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
providerType: 'openai',
apiKey: 'sk-test',
modelId: 'gpt-5.4-mini',
}),
})
expect(response.status).toBe(200)
expect(updateProviderKeys).toHaveBeenCalledWith({
providerType: 'openai',
apiKey: 'sk-test',
modelId: 'gpt-5.4-mini',
})
expect(await response.json()).toEqual({
status: 'updated',
message: 'Provider updated without a restart',
})
})
it('does not expose a roles route', async () => {
const { createOpenClawRoutes } = await import(
'../../../src/api/routes/openclaw'
)
const route = createOpenClawRoutes()
const response = await route.request('/roles')
expect(response.status).toBe(404)
})
it('ignores role fields when creating agents', async () => {
const actualOpenClawService = await import(
'../../../src/api/services/openclaw/openclaw-service'
)
const createAgent = mock(async () => ({
agentId: 'research',
name: 'research',
workspace: '/home/node/.openclaw/workspace-research',
}))
mock.module('../../../src/api/services/openclaw/openclaw-service', () => ({
...actualOpenClawService,
getOpenClawService: () =>
({
createAgent,
}) as never,
}))
const { createOpenClawRoutes } = await import(
'../../../src/api/routes/openclaw'
)
const route = createOpenClawRoutes()
const response = await route.request('/agents', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
name: 'research',
roleId: 'chief-of-staff',
customRole: {
name: 'Ignored',
shortDescription: 'Ignored',
longDescription: 'Ignored',
recommendedApps: [],
boundaries: [],
},
providerType: 'openai',
apiKey: 'sk-test',
modelId: 'gpt-5.4-mini',
}),
})
expect(response.status).toBe(201)
expect(createAgent).toHaveBeenCalledWith({
name: 'research',
providerType: 'openai',
providerName: undefined,
baseUrl: undefined,
apiKey: 'sk-test',
modelId: 'gpt-5.4-mini',
})
})
})

View File

@@ -1,412 +0,0 @@
/**
* @license
* Copyright 2025 BrowserOS
*/
import { describe, expect, it, mock } from 'bun:test'
import { OPENCLAW_CONTAINER_HOME } from '@browseros/shared/constants/openclaw'
import { OpenClawCliClient } from '../../../../src/api/services/openclaw/openclaw-cli-client'
describe('OpenClawCliClient', () => {
it('passes real non-interactive onboarding flags through to the upstream cli', async () => {
const execInContainer = mock(async (command: string[]) => {
expect(command).toEqual([
'node',
'dist/index.js',
'onboard',
'--non-interactive',
'--mode',
'local',
'--auth-choice',
'skip',
'--gateway-auth',
'token',
'--gateway-port',
'18789',
'--gateway-bind',
'lan',
'--no-install-daemon',
'--skip-health',
'--accept-risk',
])
return 0
})
const client = new OpenClawCliClient({ execInContainer })
await client.runOnboard({
nonInteractive: true,
mode: 'local',
authChoice: 'skip',
gatewayAuth: 'token',
gatewayPort: 18789,
gatewayBind: 'lan',
installDaemon: false,
skipHealth: true,
acceptRisk: true,
})
})
it('uses batch mode for grouped config writes', async () => {
const execInContainer = mock(async (command: string[]) => {
expect(command).toEqual([
'node',
'dist/index.js',
'config',
'set',
'--batch-json',
'[{"path":"gateway.mode","value":"local"},{"path":"gateway.http.endpoints.chatCompletions.enabled","value":true}]',
])
return 0
})
const client = new OpenClawCliClient({ execInContainer })
await client.setConfigBatch([
{
path: 'gateway.mode',
value: 'local',
},
{
path: 'gateway.http.endpoints.chatCompletions.enabled',
value: true,
},
])
})
it('runs upstream CLI commands without appending a gateway token flag', async () => {
const execInContainer = mock(
async (command: string[], onLog?: (line: string) => void) => {
if (command[2] === 'agents' && command[3] === 'list') {
onLog?.(
JSON.stringify([
{
id: 'main',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace`,
model: 'openrouter/anthropic/claude-sonnet-4.5',
},
]),
)
}
return 0
},
)
const client = new OpenClawCliClient({ execInContainer })
const agents = await client.listAgents()
expect(execInContainer.mock.calls[0]?.[0]).toEqual([
'node',
'dist/index.js',
'agents',
'list',
'--json',
])
expect(agents[0]?.model).toBe('openrouter/anthropic/claude-sonnet-4.5')
})
it('derives the workspace when creating an agent', async () => {
let callIndex = 0
const execInContainer = mock(
async (command: string[], onLog?: (line: string) => void) => {
callIndex += 1
if (callIndex === 1) {
expect(command).toEqual([
'node',
'dist/index.js',
'agents',
'add',
'research',
'--workspace',
`${OPENCLAW_CONTAINER_HOME}/workspace-research`,
'--model',
'openai/gpt-5.4-mini',
'--non-interactive',
'--json',
])
return 0
}
onLog?.(
JSON.stringify([
{
id: 'main',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace`,
},
{
id: 'research',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace-research`,
model: 'openai/gpt-5.4-mini',
},
]),
)
return 0
},
)
const client = new OpenClawCliClient({ execInContainer })
const agent = await client.createAgent({
name: 'research',
model: 'openai/gpt-5.4-mini',
})
expect(execInContainer).toHaveBeenCalledTimes(2)
expect(agent).toEqual({
agentId: 'research',
name: 'research',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace-research`,
model: 'openai/gpt-5.4-mini',
})
})
it('parses agent lists from mixed log and JSON output', async () => {
const execInContainer = mock(
async (_command: string[], onLog?: (line: string) => void) => {
onLog?.('starting agent listing')
onLog?.(
JSON.stringify([
{
id: 'main',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace`,
},
]),
)
onLog?.('done')
return 0
},
)
const client = new OpenClawCliClient({ execInContainer })
const agents = await client.listAgents()
expect(agents).toEqual([
{
agentId: 'main',
name: 'main',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace`,
},
])
})
it('parses pretty-printed JSON surrounded by logs', async () => {
const execInContainer = mock(
async (_command: string[], onLog?: (line: string) => void) => {
onLog?.('starting agent listing')
onLog?.('[')
onLog?.(' {')
onLog?.(' "id": "main",')
onLog?.(` "workspace": "${OPENCLAW_CONTAINER_HOME}/workspace",`)
onLog?.(' "model": "openrouter/anthropic/claude-sonnet-4.5"')
onLog?.(' }')
onLog?.(']')
onLog?.('done')
return 0
},
)
const client = new OpenClawCliClient({ execInContainer })
const agents = await client.listAgents()
expect(agents).toEqual([
{
agentId: 'main',
name: 'main',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace`,
model: 'openrouter/anthropic/claude-sonnet-4.5',
},
])
})
it('skips structured JSON logs before the real agent list payload', async () => {
const execInContainer = mock(
async (_command: string[], onLog?: (line: string) => void) => {
onLog?.(
JSON.stringify({
level: 'info',
message: 'agent list requested',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace`,
}),
)
onLog?.(
JSON.stringify([
{
id: 'main',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace`,
model: 'openrouter/anthropic/claude-sonnet-4.5',
},
]),
)
return 0
},
)
const client = new OpenClawCliClient({ execInContainer })
const agents = await client.listAgents()
expect(agents).toEqual([
{
agentId: 'main',
name: 'main',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace`,
model: 'openrouter/anthropic/claude-sonnet-4.5',
},
])
})
it('preserves exit details when the CLI fails', async () => {
const execInContainer = mock(
async (_command: string[], onLog?: (line: string) => void) => {
onLog?.('agent already exists')
return 1
},
)
const client = new OpenClawCliClient({ execInContainer })
await expect(client.listAgents()).rejects.toThrow('agent already exists')
})
it('parses config get output from mixed logs and pretty-printed JSON', async () => {
const execInContainer = mock(
async (command: string[], onLog?: (line: string) => void) => {
if (command[2] === 'config' && command[3] === 'get') {
onLog?.('reading config')
onLog?.('{')
onLog?.(' "gateway": {')
onLog?.(' "mode": "local"')
onLog?.(' }')
onLog?.('}')
onLog?.('done')
}
return 0
},
)
const client = new OpenClawCliClient({ execInContainer })
const config = await client.getConfig('gateway')
expect(config).toEqual({
gateway: {
mode: 'local',
},
})
})
it('skips structured JSON log lines before config get payloads', async () => {
const execInContainer = mock(
async (command: string[], onLog?: (line: string) => void) => {
if (command[2] === 'config' && command[3] === 'get') {
onLog?.(
JSON.stringify({
level: 'info',
message: 'reading config',
}),
)
onLog?.('{')
onLog?.(' "gateway": {')
onLog?.(' "mode": "local"')
onLog?.(' }')
onLog?.('}')
}
return 0
},
)
const client = new OpenClawCliClient({ execInContainer })
const config = await client.getConfig('gateway')
expect(config).toEqual({
gateway: {
mode: 'local',
},
})
})
it('skips structured JSON log lines before config validate payloads', async () => {
const execInContainer = mock(
async (command: string[], onLog?: (line: string) => void) => {
if (command[2] === 'config' && command[3] === 'validate') {
onLog?.(
JSON.stringify({
level: 'info',
message: 'validating config',
}),
)
onLog?.(
JSON.stringify({
ok: true,
warnings: [],
}),
)
}
return 0
},
)
const client = new OpenClawCliClient({ execInContainer })
const result = await client.validateConfig()
expect(result).toEqual({
ok: true,
warnings: [],
})
})
it('keeps the config get payload when a structured JSON log follows it', async () => {
const execInContainer = mock(
async (command: string[], onLog?: (line: string) => void) => {
if (command[2] === 'config' && command[3] === 'get') {
onLog?.('{')
onLog?.(' "gateway": {')
onLog?.(' "mode": "local"')
onLog?.(' }')
onLog?.('}')
onLog?.(
JSON.stringify({
level: 'info',
message: 'config fetched',
}),
)
}
return 0
},
)
const client = new OpenClawCliClient({ execInContainer })
const config = await client.getConfig('gateway')
expect(config).toEqual({
gateway: {
mode: 'local',
},
})
})
it('keeps the config validate payload when a structured JSON log follows it', async () => {
const execInContainer = mock(
async (command: string[], onLog?: (line: string) => void) => {
if (command[2] === 'config' && command[3] === 'validate') {
onLog?.(
JSON.stringify({
ok: true,
warnings: [],
}),
)
onLog?.(
JSON.stringify({
level: 'info',
message: 'config validated',
}),
)
}
return 0
},
)
const client = new OpenClawCliClient({ execInContainer })
const result = await client.validateConfig()
expect(result).toEqual({
ok: true,
warnings: [],
})
})
})

View File

@@ -1,32 +0,0 @@
import { afterEach, describe, expect, it } from 'bun:test'
import { mkdtemp, rm } from 'node:fs/promises'
import { tmpdir } from 'node:os'
import { join } from 'node:path'
import {
resolveComposeResourcePath,
SOURCE_COMPOSE_RESOURCE,
} from '../../../../src/api/services/openclaw/openclaw-service'
describe('resolveComposeResourcePath', () => {
let tempDir: string | null = null
afterEach(async () => {
if (tempDir) {
await rm(tempDir, { recursive: true, force: true })
tempDir = null
}
})
it('prefers the packaged resourcesDir copy when present', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-compose-resource-'))
const resourcesDir = join(tempDir, 'resources')
const composePath = join(resourcesDir, 'openclaw-compose.yml')
await Bun.write(composePath, 'services:\n')
expect(resolveComposeResourcePath(resourcesDir)).toBe(composePath)
})
it('falls back to the source tree when no packaged copy exists', () => {
expect(resolveComposeResourcePath(undefined)).toBe(SOURCE_COMPOSE_RESOURCE)
})
})

View File

@@ -1,28 +0,0 @@
/**
* @license
* Copyright 2025 BrowserOS
*/
import { describe, expect, it } from 'bun:test'
import { buildComposeEnvFile } from '../../../../src/api/services/openclaw/openclaw-env'
describe('buildComposeEnvFile', () => {
it('pins the default OpenClaw image to 2026.4.12', () => {
expect(
buildComposeEnvFile({
hostHome: '/tmp/openclaw-home',
timezone: 'UTC',
}),
).toContain('OPENCLAW_IMAGE=ghcr.io/openclaw/openclaw:2026.4.12')
})
it('respects an explicit image override', () => {
expect(
buildComposeEnvFile({
hostHome: '/tmp/openclaw-home',
timezone: 'UTC',
image: 'ghcr.io/openclaw/openclaw:custom',
}),
).toContain('OPENCLAW_IMAGE=ghcr.io/openclaw/openclaw:custom')
})
})

View File

@@ -1,240 +0,0 @@
/**
* @license
* Copyright 2025 BrowserOS
*/
import { afterEach, describe, expect, it, mock } from 'bun:test'
import { OpenClawHttpChatClient } from '../../../../src/api/services/openclaw/openclaw-http-chat-client'
describe('OpenClawHttpChatClient', () => {
const originalFetch = globalThis.fetch
afterEach(() => {
globalThis.fetch = originalFetch
})
it('maps chat completion deltas into BrowserOS stream events', async () => {
const fetchMock = mock((_url: string | URL, _init?: RequestInit) =>
Promise.resolve(
new Response(
new ReadableStream({
start(controller) {
const encoder = new TextEncoder()
controller.enqueue(
encoder.encode(
'data: {"choices":[{"delta":{"content":"Hello"}}]}\n\n',
),
)
controller.enqueue(
encoder.encode(
'data: {"choices":[{"delta":{"content":" world"}}]}\n\n',
),
)
controller.enqueue(
encoder.encode(
'data: {"choices":[{"delta":{},"finish_reason":"stop"}]}\n\n',
),
)
controller.enqueue(encoder.encode('data: [DONE]\n\n'))
controller.close()
},
}),
{
status: 200,
headers: { 'Content-Type': 'text/event-stream' },
},
),
),
)
globalThis.fetch = fetchMock as typeof globalThis.fetch
const client = new OpenClawHttpChatClient(
18789,
async () => 'gateway-token',
)
const stream = await client.streamChat({
agentId: 'research',
sessionKey: 'session-123',
message: 'hi',
})
const events = await readEvents(stream)
const call = fetchMock.mock.calls[0]
expect(call?.[0]).toBe('http://127.0.0.1:18789/v1/chat/completions')
expect(call?.[1]).toMatchObject({
method: 'POST',
headers: {
Authorization: 'Bearer gateway-token',
'Content-Type': 'application/json',
},
})
expect(JSON.parse(String(call?.[1]?.body))).toEqual({
model: 'openclaw/research',
stream: true,
messages: [{ role: 'user', content: 'hi' }],
user: 'browseros:research:session-123',
})
expect(events).toEqual([
{ type: 'text-delta', data: { text: 'Hello' } },
{ type: 'text-delta', data: { text: ' world' } },
{ type: 'done', data: { text: 'Hello world' } },
])
})
it('uses openclaw for the main agent', async () => {
const fetchMock = mock(() =>
Promise.resolve(
new Response(
new ReadableStream({
start(controller) {
controller.close()
},
}),
{
status: 200,
headers: { 'Content-Type': 'text/event-stream' },
},
),
),
)
globalThis.fetch = fetchMock as typeof globalThis.fetch
const client = new OpenClawHttpChatClient(
18789,
async () => 'gateway-token',
)
await client.streamChat({
agentId: 'main',
sessionKey: 'session-123',
message: 'hi',
})
const body = JSON.parse(String(fetchMock.mock.calls[0]?.[1]?.body)) as {
model: string
}
expect(body.model).toBe('openclaw')
})
it('throws on non-success HTTP responses', async () => {
globalThis.fetch = mock(() =>
Promise.resolve(new Response('Unauthorized', { status: 401 })),
) as typeof globalThis.fetch
const client = new OpenClawHttpChatClient(
18789,
async () => 'gateway-token',
)
await expect(
client.streamChat({
agentId: 'research',
sessionKey: 'session-123',
message: 'hi',
}),
).rejects.toThrow('Unauthorized')
})
it('surfaces an error when OpenClaw finishes without assistant text', async () => {
globalThis.fetch = mock(() =>
Promise.resolve(
new Response(
new ReadableStream({
start(controller) {
const encoder = new TextEncoder()
controller.enqueue(
encoder.encode(
'data: {"choices":[{"delta":{},"finish_reason":"stop"}]}\n\n',
),
)
controller.enqueue(encoder.encode('data: [DONE]\n\n'))
controller.close()
},
}),
{
status: 200,
headers: { 'Content-Type': 'text/event-stream' },
},
),
),
) as typeof globalThis.fetch
const client = new OpenClawHttpChatClient(
18789,
async () => 'gateway-token',
)
const stream = await client.streamChat({
agentId: 'main',
sessionKey: 'session-123',
message: 'hi',
})
await expect(readEvents(stream)).resolves.toEqual([
{
type: 'error',
data: {
message: "Agent couldn't generate a response. Please try again.",
},
},
])
})
it('stops processing batched SSE events after a malformed chunk closes the stream', async () => {
const fetchMock = mock(() =>
Promise.resolve(
new Response(
new ReadableStream({
start(controller) {
const encoder = new TextEncoder()
controller.enqueue(
encoder.encode(
'data: {"choices":[{"delta":{"content":"Hello"}}]}\n\n' +
'data: not-json\n\n' +
'data: {"choices":[{"delta":{"content":" world"}}]}\n\n',
),
)
controller.close()
},
}),
{
status: 200,
headers: { 'Content-Type': 'text/event-stream' },
},
),
),
)
globalThis.fetch = fetchMock as typeof globalThis.fetch
const client = new OpenClawHttpChatClient(
18789,
async () => 'gateway-token',
)
const stream = await client.streamChat({
agentId: 'research',
sessionKey: 'session-123',
message: 'hi',
})
await expect(readEvents(stream)).resolves.toEqual([
{ type: 'text-delta', data: { text: 'Hello' } },
{
type: 'error',
data: { message: 'Failed to parse OpenClaw chat stream chunk' },
},
])
})
})
async function readEvents(
stream: ReadableStream<{ type: string; data: Record<string, unknown> }>,
): Promise<Array<{ type: string; data: Record<string, unknown> }>> {
const reader = stream.getReader()
const events: Array<{ type: string; data: Record<string, unknown> }> = []
while (true) {
const { done, value } = await reader.read()
if (done) break
events.push(value)
}
return events
}

View File

@@ -1,692 +0,0 @@
/**
* @license
* Copyright 2025 BrowserOS
*/
import { afterEach, describe, expect, it, mock } from 'bun:test'
import { existsSync } from 'node:fs'
import { mkdir, mkdtemp, readFile, rm, writeFile } from 'node:fs/promises'
import { tmpdir } from 'node:os'
import { join } from 'node:path'
import { OPENCLAW_CONTAINER_HOME } from '@browseros/shared/constants/openclaw'
import {
resolveSupportedOpenClawProvider,
UnsupportedOpenClawProviderError,
} from '../../../../src/api/services/openclaw/openclaw-provider-map'
import { OpenClawService } from '../../../../src/api/services/openclaw/openclaw-service'
type MutableOpenClawService = OpenClawService & {
openclawDir: string
token: string
restart: ReturnType<typeof mock>
runtime: {
ensureReady?: () => Promise<void>
isPodmanAvailable?: () => Promise<boolean>
getMachineStatus?: () => Promise<{ initialized: boolean; running: boolean }>
isReady: () => Promise<boolean>
copyComposeFile?: (_source: string) => Promise<void>
writeEnvFile?: (_content: string) => Promise<void>
composePull?: () => Promise<void>
composeRestart?: () => Promise<void>
composeUp?: () => Promise<void>
waitForReady?: () => Promise<boolean>
}
cliClient: {
probe?: ReturnType<typeof mock>
createAgent?: ReturnType<typeof mock>
getConfig?: ReturnType<typeof mock>
listAgents?: ReturnType<typeof mock>
}
bootstrapCliClient: {
runOnboard?: ReturnType<typeof mock>
setConfigBatch?: ReturnType<typeof mock>
setDefaultModel?: ReturnType<typeof mock>
validateConfig?: ReturnType<typeof mock>
}
}
describe('OpenClawService', () => {
let tempDir: string | null = null
afterEach(async () => {
mock.restore()
if (tempDir) {
await rm(tempDir, { recursive: true, force: true })
tempDir = null
}
})
it('creates agents through the cli client without role bootstrap files', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
const createAgent = mock(async () => ({
agentId: 'ops',
name: 'ops',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace-ops`,
model: 'openclaw/default',
}))
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.runtime = {
isReady: async () => true,
}
service.cliClient = {
createAgent,
}
const agent = await service.createAgent({
name: 'ops',
})
expect(createAgent).toHaveBeenCalledWith({
name: 'ops',
model: undefined,
})
expect(agent).toEqual({
agentId: 'ops',
name: 'ops',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace-ops`,
model: 'openclaw/default',
})
expect(
existsSync(
join(tempDir, '.openclaw', 'workspace-ops', '.browseros-role.json'),
),
).toBe(false)
})
it('lists plain agent entries without role metadata', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
await mkdir(join(tempDir, '.openclaw', 'workspace-ops'), {
recursive: true,
})
await writeFile(join(tempDir, '.openclaw', 'openclaw.json'), '{}')
await writeFile(
join(tempDir, '.openclaw', 'workspace-ops', '.browseros-role.json'),
'{"roleId":"chief-of-staff"}\n',
'utf-8',
)
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.runtime = {
isReady: async () => true,
}
service.cliClient = {
getConfig: mock(async () => 'cli-token'),
listAgents: mock(async () => [
{
agentId: 'ops',
name: 'ops',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace-ops`,
model: 'openai/gpt-5.4-mini',
},
]),
}
await expect(service.listAgents()).resolves.toEqual([
{
agentId: 'ops',
name: 'ops',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace-ops`,
model: 'openai/gpt-5.4-mini',
},
])
})
it('maps successful cli client probes into connected status', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
await mkdir(join(tempDir, '.openclaw'), { recursive: true })
await writeFile(join(tempDir, '.openclaw', 'openclaw.json'), '{}')
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.runtime = {
isPodmanAvailable: async () => true,
getMachineStatus: async () => ({ initialized: true, running: true }),
isReady: async () => true,
}
service.cliClient = {
getConfig: mock(async () => 'cli-token'),
listAgents: mock(async () => [
{
agentId: 'main',
name: 'main',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace`,
},
{
agentId: 'ops',
name: 'ops',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace-ops`,
},
]),
}
const status = await service.getStatus()
expect(status).toEqual({
status: 'running',
podmanAvailable: true,
machineReady: true,
port: 18789,
agentCount: 2,
error: null,
controlPlaneStatus: 'connected',
lastGatewayError: null,
lastRecoveryReason: null,
})
})
it('creates the main agent during setup when the gateway starts without one', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
const steps: string[] = []
const runOnboard = mock(async () => {
steps.push('onboard')
})
const setConfigBatch = mock(async () => {
steps.push('batch')
})
const setDefaultModel = mock(async () => {})
const validateConfig = mock(async () => {
steps.push('validate')
return { ok: true }
})
const getConfig = mock(async (path: string) => {
if (path === 'gateway.auth.token') return 'cli-token'
return null
})
const createAgent = mock(async () => ({
agentId: 'main',
name: 'main',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace`,
}))
const writeEnvFile = mock(async (_content: string) => {})
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.runtime = {
isPodmanAvailable: async () => true,
ensureReady: async () => {},
isReady: async () => true,
copyComposeFile: async () => {},
writeEnvFile,
composePull: async () => {},
composeRestart: mock(async () => {
steps.push('restart')
}),
composeUp: mock(async () => {
steps.push('up')
}),
waitForReady: mock(async () => {
steps.push('ready')
return true
}),
}
service.cliClient = {
getConfig,
probe: mock(async () => {}),
listAgents: mock(async () => []),
createAgent,
}
service.bootstrapCliClient = {
runOnboard,
setConfigBatch,
setDefaultModel,
validateConfig,
}
await service.setup({})
expect(runOnboard).toHaveBeenCalledWith({
acceptRisk: true,
authChoice: 'skip',
gatewayAuth: 'token',
gatewayBind: 'lan',
gatewayPort: 18789,
installDaemon: false,
mode: 'local',
nonInteractive: true,
skipHealth: true,
})
expect(setConfigBatch).toHaveBeenCalledWith(
expect.arrayContaining([
{
path: 'mcp.servers.browseros.url',
value: 'http://host.containers.internal:9100/mcp',
},
{
path: 'mcp.servers.browseros.transport',
value: 'streamable-http',
},
{
path: 'gateway.http.endpoints.chatCompletions.enabled',
value: true,
},
]),
)
expect(validateConfig).toHaveBeenCalled()
expect(createAgent).toHaveBeenCalledWith({
name: 'main',
model: undefined,
})
expect(steps).toEqual(['onboard', 'batch', 'validate', 'up', 'ready'])
expect(writeEnvFile).toHaveBeenCalledWith(
expect.stringContaining(`OPENCLAW_HOST_HOME=${tempDir}`),
)
expect(service.runtime.composeRestart).not.toHaveBeenCalled()
})
it('applies setup-time config in one batch before the gateway starts', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
const runOnboard = mock(async () => {})
const setConfigBatch = mock(async () => {})
const validateConfig = mock(async () => ({ ok: true }))
const getConfig = mock(async (path: string) => {
if (path === 'gateway.auth.token') return 'cli-token'
return null
})
const createAgent = mock(async () => ({
agentId: 'main',
name: 'main',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace`,
}))
const waitForReady = mock(async () => true)
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.runtime = {
isPodmanAvailable: async () => true,
ensureReady: async () => {},
isReady: async () => true,
copyComposeFile: async () => {},
writeEnvFile: async () => {},
composePull: async () => {},
composeRestart: mock(async () => {}),
composeUp: async () => {},
waitForReady,
}
service.cliClient = {
getConfig,
probe: mock(async () => {}),
listAgents: mock(async () => []),
createAgent,
}
service.bootstrapCliClient = {
runOnboard,
setConfigBatch,
setDefaultModel: mock(async () => {}),
validateConfig,
}
await expect(service.setup({})).resolves.toBeUndefined()
expect(setConfigBatch).toHaveBeenCalledTimes(1)
expect(waitForReady).toHaveBeenCalledTimes(1)
expect(createAgent).toHaveBeenCalledWith({
name: 'main',
model: undefined,
})
expect(service.runtime.composeRestart).not.toHaveBeenCalled()
})
it('loads the persisted gateway token from the mounted config before control plane calls', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
await mkdir(join(tempDir, '.openclaw'), { recursive: true })
await writeFile(
join(tempDir, '.openclaw', 'openclaw.json'),
JSON.stringify({
gateway: {
auth: {
token: 'cli-token',
},
},
}),
)
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.token = 'random-token'
service.runtime = {
isReady: async () => true,
}
service.cliClient = {
listAgents: mock(async () => {
expect(service.token).toBe('cli-token')
return []
}),
}
await service.listAgents()
})
it('caches the loaded gateway token from config across steady-state control plane calls', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
await mkdir(join(tempDir, '.openclaw'), { recursive: true })
await writeFile(
join(tempDir, '.openclaw', 'openclaw.json'),
JSON.stringify({
gateway: {
auth: {
token: 'cli-token',
},
},
}),
)
const listAgents = mock(async () => [])
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.runtime = {
isReady: async () => true,
}
service.cliClient = {
listAgents,
}
await service.listAgents()
await service.listAgents()
expect(listAgents).toHaveBeenCalledTimes(2)
})
it('writes provider credentials into the mounted state env file during setup', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.runtime = {
isPodmanAvailable: async () => true,
ensureReady: async () => {},
isReady: async () => true,
copyComposeFile: async () => {},
writeEnvFile: async () => {},
composePull: async () => {},
composeRestart: async () => {},
composeUp: async () => {},
waitForReady: async () => true,
}
service.cliClient = {
getConfig: mock(async (path: string) =>
path === 'gateway.auth.token' ? 'cli-token' : null,
),
probe: mock(async () => {}),
listAgents: mock(async () => [
{
agentId: 'main',
name: 'main',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace`,
},
]),
createAgent: mock(async () => {
throw new Error('createAgent should not be called when main exists')
}),
}
service.bootstrapCliClient = {
runOnboard: mock(async () => {}),
setConfigBatch: mock(async () => {}),
setDefaultModel: mock(async () => {}),
validateConfig: mock(async () => ({ ok: true })),
}
await service.setup({
providerType: 'openai',
apiKey: 'sk-test',
modelId: 'gpt-5.4-mini',
})
expect(
await readFile(join(tempDir, '.openclaw', '.env'), 'utf-8'),
).toContain('OPENAI_API_KEY=sk-test')
})
it('keeps openrouter model refs verbatim without rewriting dots', () => {
const provider = resolveSupportedOpenClawProvider({
providerType: 'openrouter',
apiKey: 'or-key',
modelId: 'anthropic/claude-haiku-4.5',
})
expect(provider).toEqual({
envValues: {
OPENROUTER_API_KEY: 'or-key',
},
model: 'openrouter/anthropic/claude-haiku-4.5',
providerType: 'openrouter',
})
})
it('only resolves env vars for the supported bootstrap providers', () => {
expect(
resolveSupportedOpenClawProvider({
providerType: 'anthropic',
apiKey: 'ant-key',
modelId: 'claude-sonnet-4.5',
}),
).toEqual({
envValues: {
ANTHROPIC_API_KEY: 'ant-key',
},
model: 'anthropic/claude-sonnet-4.5',
providerType: 'anthropic',
})
expect(
resolveSupportedOpenClawProvider({
providerType: 'moonshot',
apiKey: 'moon-key',
modelId: 'kimi-k2',
}),
).toEqual({
envValues: {
MOONSHOT_API_KEY: 'moon-key',
},
model: 'moonshot/kimi-k2',
providerType: 'moonshot',
})
expect(() =>
resolveSupportedOpenClawProvider({
providerType: 'google',
apiKey: 'google-key',
modelId: 'gemini-2.5-pro',
}),
).toThrow(new UnsupportedOpenClawProviderError('google'))
expect(() =>
resolveSupportedOpenClawProvider({
providerType: 'custom-api-key',
baseUrl: 'https://example.test/v1',
apiKey: 'custom-key',
modelId: 'custom-model',
}),
).toThrow(new UnsupportedOpenClawProviderError('custom-api-key'))
})
it('rejects unsupported providers before mutating env or creating agents', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
const createAgent = mock(async () => ({
agentId: 'ops',
name: 'ops',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace-ops`,
}))
const restart = mock(async () => {})
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.restart = restart
service.runtime = {
isReady: async () => true,
}
service.cliClient = {
createAgent,
}
await expect(
service.createAgent({
name: 'ops',
providerType: 'google',
apiKey: 'google-key',
modelId: 'gemini-2.5-pro',
}),
).rejects.toThrow('Unsupported OpenClaw provider')
expect(createAgent).not.toHaveBeenCalled()
expect(restart).not.toHaveBeenCalled()
expect(existsSync(join(tempDir, '.openclaw', '.env'))).toBe(false)
})
it('passes openrouter model refs through verbatim into agent creation', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
await mkdir(join(tempDir, '.openclaw'), { recursive: true })
await writeFile(
join(tempDir, '.openclaw', '.env'),
'OPENROUTER_API_KEY=or-key\n',
'utf-8',
)
const createAgent = mock(async () => ({
agentId: 'research',
name: 'research',
workspace: `${OPENCLAW_CONTAINER_HOME}/workspace-research`,
model: 'openrouter/anthropic/claude-haiku-4.5',
}))
const restart = mock(async () => {})
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.restart = restart
service.runtime = {
isReady: async () => true,
}
service.cliClient = {
createAgent,
}
await service.createAgent({
name: 'research',
providerType: 'openrouter',
apiKey: 'or-key',
modelId: 'anthropic/claude-haiku-4.5',
})
expect(createAgent).toHaveBeenCalledWith({
name: 'research',
model: 'openrouter/anthropic/claude-haiku-4.5',
})
expect(restart).not.toHaveBeenCalled()
})
it('updateProviderKeys rejects unsupported providers without restarting', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
const restart = mock(async () => {})
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.restart = restart
await expect(
service.updateProviderKeys({
providerType: 'google',
apiKey: 'google-key',
modelId: 'gemini-2.5-pro',
}),
).rejects.toThrow('Unsupported OpenClaw provider')
expect(restart).not.toHaveBeenCalled()
})
it('does not restart when provider env content is unchanged', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
await mkdir(join(tempDir, '.openclaw'), { recursive: true })
await writeFile(
join(tempDir, '.openclaw', '.env'),
'OPENAI_API_KEY=sk-test\n',
'utf-8',
)
const restart = mock(async () => {})
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.restart = restart
await service.updateProviderKeys({
providerType: 'openai',
apiKey: 'sk-test',
})
expect(restart).not.toHaveBeenCalled()
expect(await readFile(join(tempDir, '.openclaw', '.env'), 'utf-8')).toBe(
'OPENAI_API_KEY=sk-test\n',
)
})
it('applies the default model when provider keys are unchanged', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
await mkdir(join(tempDir, '.openclaw'), { recursive: true })
await writeFile(
join(tempDir, '.openclaw', '.env'),
'OPENAI_API_KEY=sk-test\n',
'utf-8',
)
const restart = mock(async () => {})
const setDefaultModel = mock(async () => {})
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.restart = restart
service.runtime = {
isReady: async () => true,
waitForReady: async () => true,
}
service.cliClient = {
setDefaultModel,
}
await expect(
service.updateProviderKeys({
providerType: 'openai',
apiKey: 'sk-test',
modelId: 'gpt-5.4-mini',
}),
).resolves.toEqual({
modelUpdated: true,
restarted: false,
})
expect(setDefaultModel).toHaveBeenCalledWith('openai/gpt-5.4-mini')
expect(restart).not.toHaveBeenCalled()
expect(await readFile(join(tempDir, '.openclaw', '.env'), 'utf-8')).toBe(
'OPENAI_API_KEY=sk-test\n',
)
})
it('does not persist env updates when setting the default model fails', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'openclaw-service-'))
await mkdir(join(tempDir, '.openclaw'), { recursive: true })
const setDefaultModel = mock(async () => {
throw new Error('container unavailable')
})
const restart = mock(async () => {})
const service = new OpenClawService() as MutableOpenClawService
service.openclawDir = tempDir
service.restart = restart
service.cliClient = {
setDefaultModel,
}
await expect(
service.updateProviderKeys({
providerType: 'openai',
apiKey: 'sk-test',
modelId: 'gpt-5.4-mini',
}),
).rejects.toThrow('container unavailable')
expect(setDefaultModel).toHaveBeenCalledWith('openai/gpt-5.4-mini')
expect(restart).not.toHaveBeenCalled()
expect(existsSync(join(tempDir, '.openclaw', '.env'))).toBe(false)
})
})

View File

@@ -1,83 +0,0 @@
/**
* @license
* Copyright 2025 BrowserOS
*/
import { afterEach, beforeEach, describe, expect, it } from 'bun:test'
import fs from 'node:fs'
import os from 'node:os'
import path from 'node:path'
import {
configurePodmanRuntime,
getPodmanRuntime,
resolveBundledPodmanPath,
} from '../../../../src/api/services/openclaw/podman-runtime'
describe('podman runtime', () => {
let tempDir: string
beforeEach(() => {
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'browseros-podman-test-'))
})
afterEach(() => {
fs.rmSync(tempDir, { recursive: true, force: true })
configurePodmanRuntime({ podmanPath: 'podman' })
})
it('returns the bundled podman path when the executable exists', () => {
const bundledPath = path.join(
tempDir,
'bin',
'third_party',
'podman',
'podman',
)
fs.mkdirSync(path.dirname(bundledPath), { recursive: true })
fs.writeFileSync(bundledPath, 'podman')
expect(resolveBundledPodmanPath(tempDir, 'darwin')).toBe(bundledPath)
})
it('uses the windows executable name for bundled podman', () => {
const bundledPath = path.join(
tempDir,
'bin',
'third_party',
'podman',
'podman.exe',
)
fs.mkdirSync(path.dirname(bundledPath), { recursive: true })
fs.writeFileSync(bundledPath, 'podman')
expect(resolveBundledPodmanPath(tempDir, 'win32')).toBe(bundledPath)
})
it('returns null when no bundled podman executable exists', () => {
expect(resolveBundledPodmanPath(tempDir, 'darwin')).toBeNull()
})
it('configures the runtime to prefer the bundled podman path', () => {
const bundledPath = path.join(
tempDir,
'bin',
'third_party',
'podman',
'podman',
)
fs.mkdirSync(path.dirname(bundledPath), { recursive: true })
fs.writeFileSync(bundledPath, 'podman')
const runtime = configurePodmanRuntime({ resourcesDir: tempDir })
expect(runtime.getPodmanPath()).toBe(bundledPath)
expect(getPodmanRuntime().getPodmanPath()).toBe(bundledPath)
})
it('falls back to PATH podman when no bundled executable is present', () => {
const runtime = configurePodmanRuntime({ resourcesDir: tempDir })
expect(runtime.getPodmanPath()).toBe('podman')
})
})

View File

@@ -1,75 +0,0 @@
/**
* @license
* Copyright 2025 BrowserOS
*/
import { afterEach, beforeEach, describe, expect, it, mock } from 'bun:test'
import { homedir } from 'node:os'
import { join } from 'node:path'
import { PATHS } from '@browseros/shared/constants/paths'
import {
getBrowserosDir,
logDevelopmentBrowserosDir,
} from '../src/lib/browseros-dir'
import { logger } from '../src/lib/logger'
describe('getBrowserosDir', () => {
const originalNodeEnv = process.env.NODE_ENV
beforeEach(() => {
delete process.env.NODE_ENV
})
afterEach(() => {
if (originalNodeEnv === undefined) {
delete process.env.NODE_ENV
return
}
process.env.NODE_ENV = originalNodeEnv
})
it('uses a separate home directory in development', () => {
process.env.NODE_ENV = 'development'
expect(getBrowserosDir()).toBe(join(homedir(), '.browseros-dev'))
})
it('uses the standard home directory outside development', () => {
process.env.NODE_ENV = 'test'
expect(getBrowserosDir()).toBe(join(homedir(), PATHS.BROWSEROS_DIR_NAME))
})
it('logs the resolved development directory path', () => {
process.env.NODE_ENV = 'development'
const originalInfo = logger.info
const info = mock(() => {})
logger.info = info
try {
logDevelopmentBrowserosDir()
expect(info).toHaveBeenCalledWith(
`Using development BrowserOS directory: ${join(homedir(), '.browseros-dev')}`,
)
} finally {
logger.info = originalInfo
}
})
it('does not log a development directory outside development', () => {
process.env.NODE_ENV = 'test'
const originalInfo = logger.info
const info = mock(() => {})
logger.info = info
try {
logDevelopmentBrowserosDir()
expect(info).not.toHaveBeenCalled()
} finally {
logger.info = originalInfo
}
})
})

View File

@@ -3,7 +3,7 @@
* Copyright 2025 BrowserOS
*/
import { afterEach, describe, expect, it, mock, spyOn } from 'bun:test'
import { afterEach, describe, expect, it, mock } from 'bun:test'
const config = {
cdpPort: 9222,
@@ -19,85 +19,87 @@ const config = {
describe('Application.start', () => {
afterEach(() => {
mock.restore()
mock.clearAllMocks()
})
it('starts with the CDP backend only', async () => {
const apiServer = await import('../src/api/server')
const browserModule = await import('../src/browser/browser')
const cdpModule = await import('../src/browser/backends/cdp')
const browserosDir = await import('../src/lib/browseros-dir')
const dbModule = await import('../src/lib/db')
const identityModule = await import('../src/lib/identity')
const loggerModule = await import('../src/lib/logger')
const metricsModule = await import('../src/lib/metrics')
const sentryModule = await import('../src/lib/sentry')
const soulModule = await import('../src/lib/soul')
const openclawService = await import(
'../src/api/services/openclaw/openclaw-service'
)
const podmanRuntime = await import(
'../src/api/services/openclaw/podman-runtime'
)
const migrateModule = await import('../src/skills/migrate')
const remoteSyncModule = await import('../src/skills/remote-sync')
const createHttpServer = spyOn(apiServer, 'createHttpServer')
createHttpServer.mockImplementation(async () => ({}) as never)
const createHttpServer = mock(async () => ({}))
const cdpConnect = mock(async () => {})
spyOn(cdpModule.CdpBackend.prototype, 'connect').mockImplementation(
cdpConnect,
)
spyOn(browserosDir, 'cleanOldSessions').mockImplementation(async () => {})
spyOn(browserosDir, 'ensureBrowserosDir').mockImplementation(async () => {})
spyOn(browserosDir, 'writeServerConfig').mockImplementation(async () => {})
spyOn(browserosDir, 'removeServerConfigSync').mockImplementation(() => {})
spyOn(dbModule, 'initializeDb').mockImplementation(() => ({}) as never)
spyOn(identityModule.identity, 'initialize').mockImplementation(() => {})
spyOn(identityModule.identity, 'getBrowserOSId').mockImplementation(
() => 'browseros-id',
)
const loggerInfo = spyOn(loggerModule.logger, 'info').mockImplementation(
() => {},
)
const loggerWarn = spyOn(loggerModule.logger, 'warn').mockImplementation(
() => {},
)
spyOn(loggerModule.logger, 'debug').mockImplementation(() => {})
const loggerError = spyOn(loggerModule.logger, 'error').mockImplementation(
() => {},
)
spyOn(loggerModule.logger, 'setLogFile').mockImplementation(() => {})
spyOn(metricsModule.metrics, 'initialize').mockImplementation(() => {})
spyOn(metricsModule.metrics, 'isEnabled').mockImplementation(() => true)
spyOn(metricsModule.metrics, 'log').mockImplementation(() => {})
spyOn(sentryModule.Sentry, 'setContext').mockImplementation(() => {})
spyOn(sentryModule.Sentry, 'setUser').mockImplementation(() => {})
spyOn(sentryModule.Sentry, 'captureException').mockImplementation(() => {})
spyOn(soulModule, 'seedSoulTemplate').mockImplementation(async () => {})
spyOn(migrateModule, 'migrateBuiltinSkills').mockImplementation(
async () => {},
)
spyOn(remoteSyncModule, 'syncBuiltinSkills').mockImplementation(
async () => {},
)
spyOn(remoteSyncModule, 'startSkillSync').mockImplementation(() => {})
spyOn(remoteSyncModule, 'stopSkillSync').mockImplementation(() => {})
spyOn(podmanRuntime, 'configurePodmanRuntime').mockImplementation(() => {})
spyOn(openclawService, 'configureOpenClawService').mockImplementation(
() =>
({
tryAutoStart: async () => {},
}) as never,
)
const browserCtor = mock(() => {})
const loggerInfo = mock(() => {})
const loggerWarn = mock(() => {})
const loggerDebug = mock(() => {})
const loggerError = mock(() => {})
mock.module('../src/api/server', () => ({
createHttpServer,
}))
mock.module('../src/browser/backends/cdp', () => ({
CdpBackend: class {
async connect(): Promise<void> {
await cdpConnect()
}
},
}))
mock.module('../src/browser/browser', () => ({
Browser: class {
constructor(cdp: unknown) {
browserCtor(cdp)
}
},
}))
mock.module('../src/lib/browseros-dir', () => ({
cleanOldSessions: mock(async () => {}),
ensureBrowserosDir: mock(async () => {}),
removeServerConfigSync: mock(() => {}),
writeServerConfig: mock(async () => {}),
}))
mock.module('../src/lib/db', () => ({
initializeDb: mock(() => ({})),
}))
mock.module('../src/lib/identity', () => ({
identity: {
initialize: mock(() => {}),
getBrowserOSId: mock(() => 'browseros-id'),
},
}))
mock.module('../src/lib/logger', () => ({
logger: {
setLogFile: mock(() => {}),
info: loggerInfo,
warn: loggerWarn,
debug: loggerDebug,
error: loggerError,
},
}))
mock.module('../src/lib/metrics', () => ({
metrics: {
initialize: mock(() => {}),
isEnabled: mock(() => true),
log: mock(() => {}),
},
}))
mock.module('../src/lib/sentry', () => ({
Sentry: {
setContext: mock(() => {}),
setUser: mock(() => {}),
captureException: mock(() => {}),
},
}))
mock.module('../src/lib/soul', () => ({
seedSoulTemplate: mock(async () => {}),
}))
mock.module('../src/skills/migrate', () => ({
migrateBuiltinSkills: mock(async () => {}),
}))
mock.module('../src/skills/remote-sync', () => ({
startSkillSync: mock(() => {}),
stopSkillSync: mock(() => {}),
syncBuiltinSkills: mock(async () => {}),
}))
mock.module('../src/tools/registry', () => ({
registry: {
names: () => ['test_tool'],
},
}))
const { Application } = await import('../src/main')
const app = new Application(config)
@@ -105,14 +107,9 @@ describe('Application.start', () => {
await app.start()
expect(cdpConnect).toHaveBeenCalledTimes(1)
expect(browserCtor).toHaveBeenCalledTimes(1)
expect(createHttpServer).toHaveBeenCalledTimes(1)
expect(createHttpServer.mock.calls[0]?.[0]).toEqual(
expect.objectContaining({
browser: expect.any(browserModule.Browser),
}),
)
expect(createHttpServer.mock.calls[0]?.[0]).not.toHaveProperty('controller')
expect(loggerInfo).toHaveBeenCalled()
expect(loggerWarn).not.toHaveBeenCalled()
expect(loggerError).not.toHaveBeenCalled()
})

View File

@@ -257,5 +257,24 @@ describe('Agent SDK Integration', () => {
assert.ok(result.data, 'Should return extracted data')
}, 60000)
it('passes windowId through verify()', async () => {
const testWindowId = runtimeWindowId
const agent = createAgent({ windowId: testWindowId })
const plainAgent = createAgent()
await plainAgent.nav('data:text/html,<h1>Verify Test</h1>')
const result = await agent.verify('the page has some content')
console.log('\n=== verify() with windowId ===')
console.log('windowId:', testWindowId)
console.log('result:', JSON.stringify(result, null, 2))
assert.ok(
typeof result.success === 'boolean',
'Should return success boolean',
)
}, 60000)
})
})

View File

@@ -12,7 +12,6 @@ import { URL } from 'node:url'
import { Client } from '@modelcontextprotocol/sdk/client/index.js'
import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'
import { MOCK_BROWSEROS_RESPONSE_TEXT } from '../src/lib/clients/llm/mock-language-model'
import {
cleanupBrowserOS,
ensureBrowserOS,
@@ -156,7 +155,7 @@ describe('HTTP Server Integration Tests', () => {
describe('Chat endpoint', () => {
it(
'streams a mocked chat response for BrowserOS provider requests in test mode',
'streams a chat response with BrowserOS provider',
async () => {
const conversationId = crypto.randomUUID()
@@ -207,10 +206,6 @@ describe('HTTP Server Integration Tests', () => {
fullResponse.includes('data:'),
'Should contain SSE data events',
)
assert.ok(
fullResponse.includes(MOCK_BROWSEROS_RESPONSE_TEXT),
'Should include the mocked BrowserOS chat response',
)
const deleteResponse = await fetch(
`${getBaseUrl()}/chat/${conversationId}`,

View File

@@ -1,16 +1,13 @@
import { describe, it, setDefaultTimeout } from 'bun:test'
setDefaultTimeout(30_000)
import assert from 'node:assert'
import { spawnSync } from 'node:child_process'
import { resolve } from 'node:path'
import type { AclRule, ElementProperties } from '@browseros/shared/types/acl'
import { editDistanceRatio } from '../../src/tools/acl/acl-edit-distance'
import { scoreFixture } from '../../src/tools/acl/acl-scorer'
const TEST_TIMEOUT_MS = 30_000
setDefaultTimeout(TEST_TIMEOUT_MS)
process.env.ACL_EMBEDDING_DISABLE = 'true'
// --- Edit distance tests ---
describe('editDistanceRatio', () => {
@@ -194,38 +191,19 @@ describe('scoreFixture', () => {
// --- Fixture tests ---
function runSemanticFixture(name: string) {
const runnerPath = resolve(
import.meta.dir,
'../__helpers__/acl-fixture-runner.ts',
)
const result = spawnSync(
'bun',
['--env-file=.env.development', runnerPath, name],
{
cwd: process.cwd(),
encoding: 'utf8',
timeout: TEST_TIMEOUT_MS,
env: {
...process.env,
ACL_EMBEDDING_DISABLE: 'false',
LOG_LEVEL: 'silent',
},
},
)
const failureMessage =
result.error?.message ||
result.stderr ||
result.stdout ||
'semantic fixture subprocess failed'
async function loadFixture(name: string) {
const path = resolve(import.meta.dir, `../__fixtures__/acl/${name}.json`)
return Bun.file(path).json()
}
assert.strictEqual(result.status, 0, failureMessage)
return JSON.parse(result.stdout)
async function runFixture(name: string) {
const f = await loadFixture(name)
return scoreFixture(f.tool_name, f.page_url, f.element, f.rules)
}
describe('fixture: submit-button (exact match)', () => {
it('blocks checkout submit button', async () => {
const decision = runSemanticFixture('submit-button')
const decision = await runFixture('submit-button')
assert.strictEqual(decision.blocked, true)
assert.strictEqual(decision.matchedRuleId, 'checkout-submit')
@@ -237,7 +215,7 @@ describe('fixture: submit-button (exact match)', () => {
})
it('uses the embedding model for scoring', async () => {
const decision = runSemanticFixture('submit-button')
const decision = await runFixture('submit-button')
const top = decision.candidates[0]
assert.ok(
@@ -250,7 +228,7 @@ describe('fixture: submit-button (exact match)', () => {
describe('fixture: semantic-payment (semantic match)', () => {
it('blocks "Proceed to Checkout" against payment prevention rule', async () => {
const decision = runSemanticFixture('semantic-payment')
const decision = await runFixture('semantic-payment')
assert.strictEqual(decision.blocked, true)
assert.strictEqual(decision.matchedRuleId, 'block-payments')
@@ -261,7 +239,7 @@ describe('fixture: semantic-payment (semantic match)', () => {
})
it('has a meaningful semantic score', async () => {
const decision = runSemanticFixture('semantic-payment')
const decision = await runFixture('semantic-payment')
const top = decision.candidates[0]
if (top.semanticBackend === 'transformers.js') {
@@ -275,7 +253,7 @@ describe('fixture: semantic-payment (semantic match)', () => {
describe('fixture: semantic-delete (semantic match)', () => {
it('blocks "Remove my account permanently" against deletion rule', async () => {
const decision = runSemanticFixture('semantic-delete')
const decision = await runFixture('semantic-delete')
assert.strictEqual(decision.blocked, true)
assert.strictEqual(decision.matchedRuleId, 'block-delete')
@@ -288,7 +266,7 @@ describe('fixture: semantic-delete (semantic match)', () => {
describe('fixture: semantic-send-email (semantic match)', () => {
it('blocks send button on mail compose page', async () => {
const decision = runSemanticFixture('semantic-send-email')
const decision = await runFixture('semantic-send-email')
assert.strictEqual(decision.blocked, true)
assert.strictEqual(decision.matchedRuleId, 'block-outbound-email')
@@ -301,7 +279,7 @@ describe('fixture: semantic-send-email (semantic match)', () => {
describe('fixture: semantic-safe (no false positive)', () => {
it('allows "View Report" against payment and deletion rules', async () => {
const decision = runSemanticFixture('semantic-safe')
const decision = await runFixture('semantic-safe')
assert.strictEqual(decision.blocked, false)
assert.ok(

View File

@@ -1,7 +1,6 @@
import { afterAll, describe, it } from 'bun:test'
import { describe, it } from 'bun:test'
import assert from 'node:assert'
import type { Browser } from '../../src/browser/browser'
import { disposeSemanticPipeline } from '../../src/tools/acl/acl-embeddings'
import { executeTool, type ToolContext } from '../../src/tools/framework'
import {
check,
@@ -17,9 +16,7 @@ import {
} from '../../src/tools/input'
import { close_page, navigate_page, new_page } from '../../src/tools/navigation'
import { evaluate_script, take_snapshot } from '../../src/tools/snapshot'
import { cleanupWithBrowser, withBrowser } from '../__helpers__/with-browser'
process.env.ACL_EMBEDDING_DISABLE = 'true'
import { withBrowser } from '../__helpers__/with-browser'
function textOf(result: {
content: { type: string; text?: string }[]
@@ -55,72 +52,6 @@ function findElementId(snapshotText: string, label: string): number {
return Number.parseInt(match[1], 10)
}
async function pointInsideElement(
ctx: ToolContext,
pageId: number,
elementDomId: string,
): Promise<{ x: number; y: number }> {
const pointResult = await executeTool(
evaluate_script,
{
page: pageId,
expression: `(() => {
const el = document.getElementById(${JSON.stringify(elementDomId)});
if (!el) return null;
const rect = el.getBoundingClientRect();
const insetX = Math.max(1, Math.min(10, Math.floor(rect.width / 4)));
const insetY = Math.max(1, Math.min(10, Math.floor(rect.height / 4)));
const candidates = [
{
x: Math.round(rect.left + rect.width / 2),
y: Math.round(rect.top + rect.height / 2),
},
{
x: Math.round(rect.left + insetX),
y: Math.round(rect.top + insetY),
},
{
x: Math.round(rect.right - insetX),
y: Math.round(rect.top + insetY),
},
{
x: Math.round(rect.left + insetX),
y: Math.round(rect.bottom - insetY),
},
{
x: Math.round(rect.right - insetX),
y: Math.round(rect.bottom - insetY),
},
];
for (const candidate of candidates) {
const target = document.elementFromPoint(candidate.x, candidate.y);
if (target && (target === el || el.contains(target))) {
return { ...candidate, matched: true, hitId: target.id || null };
}
}
const fallback = candidates[0];
const fallbackTarget = document.elementFromPoint(fallback.x, fallback.y);
return {
...fallback,
matched: false,
hitId: fallbackTarget instanceof Element ? fallbackTarget.id || null : null,
};
})()`,
},
ctx,
AbortSignal.timeout(30_000),
)
const point = structuredOf<{
value: { x: number; y: number; matched: boolean; hitId: string | null }
} | null>(pointResult)?.value
assert.ok(point, `Expected a point for #${elementDomId}`)
assert.ok(
point.matched,
`Expected coordinates inside #${elementDomId}, got ${point.hitId ?? 'null'}`,
)
return { x: point.x, y: point.y }
}
const FORM_PAGE = `data:text/html,${encodeURIComponent(`<!DOCTYPE html>
<html><body>
<h1>Test Form</h1>
@@ -158,11 +89,6 @@ const FORM_PAGE = `data:text/html,${encodeURIComponent(`<!DOCTYPE html>
</script>
</body></html>`)}`
afterAll(async () => {
await disposeSemanticPipeline()
await cleanupWithBrowser()
})
describe('input tools', () => {
it('fill types text into an input', async () => {
await withBrowser(async ({ execute }) => {
@@ -484,7 +410,7 @@ describe('input tools', () => {
{
id: 'submit-rule',
sitePattern: '*',
textMatch: 'Submit',
description: 'submit',
enabled: true,
},
]
@@ -511,7 +437,7 @@ describe('input tools', () => {
{
id: 'submit-rule',
sitePattern: '*',
textMatch: 'Submit',
description: 'submit',
enabled: true,
},
{
@@ -531,7 +457,24 @@ describe('input tools', () => {
)
const pageId = pageIdOf(newResult)
const buttonPoint = await pointInsideElement(ctx, pageId, 'submit-btn')
const buttonCenter = await executeTool(
evaluate_script,
{
page: pageId,
expression: `(() => {
const rect = document.getElementById('submit-btn').getBoundingClientRect();
return {
x: Math.round(rect.left + rect.width / 2),
y: Math.round(rect.top + rect.height / 2),
};
})()`,
},
ctx,
AbortSignal.timeout(30_000),
)
const buttonPoint = structuredOf<{ value: { x: number; y: number } }>(
buttonCenter,
).value
const blockedClick = await executeTool(
click_at,
@@ -549,7 +492,24 @@ describe('input tools', () => {
},
]
const inputPoint = await pointInsideElement(ctx, pageId, 'name')
const inputCenter = await executeTool(
evaluate_script,
{
page: pageId,
expression: `(() => {
const rect = document.getElementById('name').getBoundingClientRect();
return {
x: Math.round(rect.left + rect.width / 2),
y: Math.round(rect.top + rect.height / 2),
};
})()`,
},
ctx,
AbortSignal.timeout(30_000),
)
const inputPoint = structuredOf<{ value: { x: number; y: number } }>(
inputCenter,
).value
const blockedType = await executeTool(
type_at,

View File

@@ -156,7 +156,7 @@
},
"apps/server": {
"name": "@browseros/server",
"version": "0.0.88",
"version": "0.0.83",
"bin": {
"browseros-server": "./src/index.ts",
},

View File

@@ -27,17 +27,10 @@
"build:agent": "bun run codegen:agent && bun run --filter @browseros/agent build",
"build:agent-sdk": "bun run --filter @browseros-ai/agent-sdk build",
"codegen:agent": "bun run --filter @browseros/agent codegen",
"test": "bun run test:all",
"test:all": "bun run test:server && bun run test:agent && bun run test:eval && bun run test:agent-sdk && bun run test:build",
"test:server": "bun run --filter @browseros/server test",
"test": "FORCE_COLOR=1 bun run --filter @browseros/server --elide-lines=0 test:tools",
"test:tools": "bun run --filter @browseros/server test:tools",
"test:cdp": "bun run --filter @browseros/server test:cdp",
"test:integration": "bun run --filter @browseros/server test:integration",
"test:sdk": "bun run --filter @browseros/server test:sdk",
"test:agent": "bun run ./scripts/run-bun-test.ts ./apps/agent",
"test:eval": "bun run ./scripts/run-bun-test.ts ./apps/eval/tests",
"test:agent-sdk": "bun run ./scripts/run-bun-test.ts ./packages/agent-sdk",
"test:build": "bun run ./scripts/run-bun-test.ts ./scripts/build",
"typecheck": "bun run --filter '*' typecheck",
"lint": "bunx biome check",
"lint:fix": "bunx biome check --write --unsafe",

View File

@@ -45,9 +45,9 @@ export class Agent implements AsyncDisposable, AgentContext {
readonly baseUrl: string
readonly llmConfig?: LLMConfig
readonly signal?: AbortSignal
readonly browserContext?: BrowserContext
readonly stateful: boolean
private _browserContext?: BrowserContext
private progressCallback?: (event: UIMessageStreamEvent) => void
private _sessionId: string | null = null
private _disposed = false
@@ -57,7 +57,7 @@ export class Agent implements AsyncDisposable, AgentContext {
this.llmConfig = options.llm
this.progressCallback = options.onProgress
this.signal = options.signal
this._browserContext = options.browserContext
this.browserContext = options.browserContext
this.stateful = options.stateful ?? true
if (this.stateful) {
@@ -65,10 +65,6 @@ export class Agent implements AsyncDisposable, AgentContext {
}
}
get browserContext(): BrowserContext | undefined {
return this._browserContext
}
get sessionId(): string | null {
return this._sessionId
}
@@ -106,21 +102,6 @@ export class Agent implements AsyncDisposable, AgentContext {
this.progressCallback?.(event)
}
updateNavigationContext(
result: { tabId: number; windowId?: number },
url: string,
): void {
const nextWindowId = result.windowId ?? this._browserContext?.windowId
this._browserContext = {
...this._browserContext,
...(nextWindowId !== undefined ? { windowId: nextWindowId } : {}),
activeTab: {
id: result.tabId,
url,
},
}
}
/**
* Navigate to a URL and wait for the page to load.
*

View File

@@ -15,11 +15,4 @@ export interface AgentContext {
emit(event: UIMessageStreamEvent): void
throwIfAborted(): void
updateNavigationContext(
result: {
tabId: number
windowId?: number
},
url: string,
): void
}

View File

@@ -15,13 +15,14 @@ async function executeAct(
ctx.throwIfAborted()
const url = `${ctx.baseUrl}/sdk/act`
const browserContext =
options?.windowId === undefined
? ctx.browserContext
: {
...(ctx.browserContext ?? {}),
windowId: options.windowId,
}
const browserContextForAct = ctx.browserContext
? {
windowId: ctx.browserContext.windowId,
enabledMcpServers: ctx.browserContext.enabledMcpServers,
customMcpServers: ctx.browserContext.customMcpServers,
}
: undefined
let response: Response
try {
@@ -32,7 +33,7 @@ async function executeAct(
instruction,
context: options?.context,
maxSteps: options?.maxSteps,
browserContext,
browserContext: browserContextForAct,
llm: ctx.llmConfig,
sessionId: ctx.sessionId,
}),

View File

@@ -27,7 +27,6 @@ export async function extract<T>(
schema: jsonSchema,
context: options.context,
windowId: ctx.browserContext?.windowId,
tabId: ctx.browserContext?.activeTab?.id,
},
ExtractionError,
)

View File

@@ -25,13 +25,6 @@ export async function nav(
NavigationError,
)
if (result.success && result.tabId !== undefined) {
ctx.updateNavigationContext(
{ tabId: result.tabId, windowId: result.windowId },
url,
)
}
ctx.emit({
type: 'text-delta',
id: 'nav',

View File

@@ -23,7 +23,6 @@ export async function verify(
expectation,
context: options?.context,
windowId: ctx.browserContext?.windowId,
tabId: ctx.browserContext?.activeTab?.id,
llm: ctx.llmConfig,
},
VerificationError,
@@ -55,7 +54,6 @@ export async function verifyInternal(
{
expectation,
windowId: ctx.browserContext?.windowId,
tabId: ctx.browserContext?.activeTab?.id,
llm: ctx.llmConfig,
},
VerificationError,

View File

@@ -113,10 +113,6 @@ export interface ProgressEvent {
export interface NavResult {
/** Whether navigation succeeded */
success: boolean
/** The tab that was navigated */
tabId?: number
/** The window containing the navigated tab */
windowId?: number
}
/**

View File

@@ -332,94 +332,6 @@ describe('Agent', () => {
expect(agent.sessionId).not.toBe(originalSessionId)
})
it('uses the active tab established by nav() for subsequent act() calls', async () => {
let callCount = 0
globalThis.fetch = mock((url: string, init?: RequestInit) => {
callCount++
if (callCount === 1) {
return Promise.resolve({
ok: true,
status: 200,
json: () =>
Promise.resolve({
success: true,
tabId: 123,
windowId: 456,
}),
} as Response)
}
const sseData = [{ type: 'start-step' }, { type: 'finish-step' }]
.map((event) => `data: ${JSON.stringify(event)}\n\n`)
.join('')
const encoded = new TextEncoder().encode(sseData)
expect(url).toBe('http://localhost:9222/sdk/act')
const body = JSON.parse(init?.body as string)
expect(body.browserContext).toEqual({
windowId: 456,
activeTab: {
id: 123,
url: 'https://example.com',
},
})
return Promise.resolve({
ok: true,
status: 200,
body: {
getReader: () => {
let read = false
return {
read: async () => {
if (read) return { done: true, value: undefined }
read = true
return { done: false, value: encoded }
},
releaseLock: () => {},
}
},
},
} as unknown as Response)
})
const agent = new Agent({ url: TEST_URL })
await agent.nav('https://example.com')
await agent.act('click the button')
})
it('allows act() to override windowId while preserving active tab context', async () => {
const fetchMock = mockSSEFetch([
{ type: 'start-step' },
{ type: 'finish-step' },
])
globalThis.fetch = fetchMock
const agent = new Agent({
url: TEST_URL,
browserContext: {
windowId: 456,
activeTab: {
id: 123,
url: 'https://example.com',
},
},
})
await agent.act('click the button', { windowId: 789 })
const call = fetchMock.mock.calls[0]
const body = JSON.parse(call[1].body)
expect(body.browserContext).toEqual({
windowId: 789,
activeTab: {
id: 123,
url: 'https://example.com',
},
})
})
})
describe('act() with verify option', () => {
@@ -703,28 +615,6 @@ describe('Agent', () => {
expect(body.llm).toEqual(llmConfig)
})
it('includes browser context windowId and active tab id', async () => {
const fetchMock = mockFetch({ success: true, reason: 'Verified' })
globalThis.fetch = fetchMock
const agent = new Agent({
url: TEST_URL,
browserContext: {
windowId: 456,
activeTab: {
id: 123,
url: 'https://example.com',
},
},
})
await agent.verify('the page has some content')
const call = fetchMock.mock.calls[0]
const body = JSON.parse(call[1].body)
expect(body.windowId).toBe(456)
expect(body.tabId).toBe(123)
})
it('returns VerifyResult on success', async () => {
globalThis.fetch = mockFetch({
success: true,

View File

@@ -1,140 +1,110 @@
{
"resources": [
{
"name": "OpenClaw compose file",
"source": {
"type": "local",
"path": "apps/server/resources/openclaw-compose.yml"
},
"destination": "resources/openclaw-compose.yml"
},
{
"name": "Podman CLI - macOS ARM64",
"name": "Bun Runtime - macOS ARM64",
"source": {
"type": "r2",
"key": "third_party/podman/podman-darwin-arm64"
"key": "third_party/bun/bun-darwin-arm64"
},
"destination": "resources/bin/third_party/podman/podman",
"destination": "resources/bin/third_party/bun",
"os": ["macos"],
"arch": ["arm64"],
"executable": true
},
{
"name": "Podman gvproxy - macOS ARM64",
"name": "Bun Runtime - macOS x64",
"source": {
"type": "r2",
"key": "third_party/podman/gvproxy-darwin-arm64"
"key": "third_party/bun/bun-darwin-x64"
},
"destination": "resources/bin/third_party/podman/gvproxy",
"os": ["macos"],
"arch": ["arm64"],
"executable": true
},
{
"name": "Podman vfkit - macOS ARM64",
"source": {
"type": "r2",
"key": "third_party/podman/vfkit-darwin-arm64"
},
"destination": "resources/bin/third_party/podman/vfkit",
"os": ["macos"],
"arch": ["arm64"],
"executable": true
},
{
"name": "Podman krunkit - macOS ARM64",
"source": {
"type": "r2",
"key": "third_party/podman/krunkit-darwin-arm64"
},
"destination": "resources/bin/third_party/podman/krunkit",
"os": ["macos"],
"arch": ["arm64"],
"executable": true
},
{
"name": "Podman mac helper - macOS ARM64",
"source": {
"type": "r2",
"key": "third_party/podman/podman-mac-helper-darwin-arm64"
},
"destination": "resources/bin/third_party/podman/podman-mac-helper",
"os": ["macos"],
"arch": ["arm64"],
"executable": true
},
{
"name": "Podman CLI - macOS x64",
"notes": "krunkit is intentionally omitted on macOS x64 because the official amd64 Podman installer ships an arm64-only krunkit helper",
"source": {
"type": "r2",
"key": "third_party/podman/podman-darwin-x64"
},
"destination": "resources/bin/third_party/podman/podman",
"destination": "resources/bin/third_party/bun",
"os": ["macos"],
"arch": ["x64"],
"executable": true
},
{
"name": "Podman gvproxy - macOS x64",
"name": "Bun Runtime - Linux ARM64",
"source": {
"type": "r2",
"key": "third_party/podman/gvproxy-darwin-x64"
"key": "third_party/bun/bun-linux-arm64"
},
"destination": "resources/bin/third_party/podman/gvproxy",
"os": ["macos"],
"destination": "resources/bin/third_party/bun",
"os": ["linux"],
"arch": ["arm64"],
"executable": true
},
{
"name": "Bun Runtime - Linux x64",
"source": {
"type": "r2",
"key": "third_party/bun/bun-linux-x64"
},
"destination": "resources/bin/third_party/bun",
"os": ["linux"],
"arch": ["x64"],
"executable": true
},
{
"name": "Podman vfkit - macOS x64",
"name": "Bun Runtime - Windows x64",
"source": {
"type": "r2",
"key": "third_party/podman/vfkit-darwin-x64"
"key": "third_party/bun/bun-windows-x64.exe"
},
"destination": "resources/bin/third_party/podman/vfkit",
"os": ["macos"],
"arch": ["x64"],
"executable": true
},
{
"name": "Podman mac helper - macOS x64",
"source": {
"type": "r2",
"key": "third_party/podman/podman-mac-helper-darwin-x64"
},
"destination": "resources/bin/third_party/podman/podman-mac-helper",
"os": ["macos"],
"arch": ["x64"],
"executable": true
},
{
"name": "Podman CLI - Windows x64",
"source": {
"type": "r2",
"key": "third_party/podman/podman-windows-x64.exe"
},
"destination": "resources/bin/third_party/podman/podman.exe",
"destination": "resources/bin/third_party/bun.exe",
"os": ["windows"],
"arch": ["x64"]
},
{
"name": "Podman gvproxy - Windows x64",
"name": "ripgrep - macOS ARM64",
"source": {
"type": "r2",
"key": "third_party/podman/gvproxy-windows-x64.exe"
"key": "third_party/ripgrep/rg-darwin-arm64"
},
"destination": "resources/bin/third_party/podman/gvproxy.exe",
"os": ["windows"],
"arch": ["x64"]
"destination": "resources/bin/third_party/rg",
"os": ["macos"],
"arch": ["arm64"],
"executable": true
},
{
"name": "Podman win-sshproxy - Windows x64",
"name": "ripgrep - macOS x64",
"source": {
"type": "r2",
"key": "third_party/podman/win-sshproxy-windows-x64.exe"
"key": "third_party/ripgrep/rg-darwin-x64"
},
"destination": "resources/bin/third_party/podman/win-sshproxy.exe",
"destination": "resources/bin/third_party/rg",
"os": ["macos"],
"arch": ["x64"],
"executable": true
},
{
"name": "ripgrep - Linux ARM64",
"source": {
"type": "r2",
"key": "third_party/ripgrep/rg-linux-arm64"
},
"destination": "resources/bin/third_party/rg",
"os": ["linux"],
"arch": ["arm64"],
"executable": true
},
{
"name": "ripgrep - Linux x64",
"source": {
"type": "r2",
"key": "third_party/ripgrep/rg-linux-x64"
},
"destination": "resources/bin/third_party/rg",
"os": ["linux"],
"arch": ["x64"],
"executable": true
},
{
"name": "ripgrep - Windows x64",
"source": {
"type": "r2",
"key": "third_party/ripgrep/rg-windows-x64.exe"
},
"destination": "resources/bin/third_party/rg.exe",
"os": ["windows"],
"arch": ["x64"]
}

View File

@@ -12,12 +12,9 @@ function validateRule(rule: ResourceRule): void {
if (!rule.name || rule.name.trim().length === 0) {
throw new Error('Manifest rule is missing name')
}
const hasSourcePath =
(rule.source.type === 'r2' && rule.source.key) ||
(rule.source.type === 'local' && rule.source.path)
if (!hasSourcePath || !rule.destination) {
if (!rule.source.key || !rule.destination) {
throw new Error(
`Manifest rule ${rule.name} is missing source path or destination`,
`Manifest rule ${rule.name} is missing source key or destination`,
)
}
}
@@ -27,21 +24,16 @@ function parseSource(raw: unknown): ResourceRule['source'] {
throw new Error('Manifest source must be an object')
}
const source = raw as Record<string, unknown>
if (source.type === 'r2') {
const key = source.key
if (typeof key !== 'string' || key.length === 0) {
throw new Error('Manifest source key is required')
}
return { type: 'r2', key }
if (source.type !== 'r2') {
throw new Error(
`Unsupported source type in manifest: ${String(source.type)}`,
)
}
if (source.type === 'local') {
const path = source.path
if (typeof path !== 'string' || path.length === 0) {
throw new Error('Manifest source path is required')
}
return { type: 'local', path }
const key = source.key
if (typeof key !== 'string' || key.length === 0) {
throw new Error('Manifest source key is required')
}
throw new Error(`Unsupported source type in manifest: ${String(source.type)}`)
return { type: 'r2', key }
}
function parseRule(raw: unknown): ResourceRule {

View File

@@ -34,28 +34,17 @@ export async function runProdResourceBuild(argv: string[]): Promise<void> {
{ ci: args.ci },
)
const manifestPath = resolve(rootDir, args.manifestPath)
if (!existsSync(manifestPath)) {
throw new Error(`Manifest not found: ${manifestPath}`)
}
const manifest = loadManifest(manifestPath)
if (args.ci) {
const distRoot = getDistProdRoot()
const localArtifacts = []
for (const binary of compiled) {
log.step(`Packaging ${binary.target.name}`)
const rules = getTargetRules(manifest, binary.target).filter(
(rule) => rule.source.type === 'local',
)
const staged = await stageCompiledArtifact(
distRoot,
binary.binaryPath,
binary.target,
buildConfig.version,
rules,
rootDir,
)
localArtifacts.push(staged)
log.success(`Packaged ${binary.target.id}`)
@@ -69,6 +58,12 @@ export async function runProdResourceBuild(argv: string[]): Promise<void> {
return
}
const manifestPath = resolve(rootDir, args.manifestPath)
if (!existsSync(manifestPath)) {
throw new Error(`Manifest not found: ${manifestPath}`)
}
const manifest = loadManifest(manifestPath)
const distRoot = getDistProdRoot()
const r2 = buildConfig.r2
if (!r2) {
@@ -81,14 +76,13 @@ export async function runProdResourceBuild(argv: string[]): Promise<void> {
for (const binary of compiled) {
const rules = getTargetRules(manifest, binary.target)
log.step(
`Staging ${binary.target.name} (${rules.length} resource rule(s))`,
`Staging ${binary.target.name} (${rules.length} download rule(s))`,
)
const staged = await stageTargetArtifact(
distRoot,
binary.binaryPath,
binary.target,
rules,
rootDir,
client,
r2,
buildConfig.version,

View File

@@ -1,99 +0,0 @@
import { afterEach, describe, expect, it } from 'bun:test'
import { mkdtemp, readFile, rm, writeFile } from 'node:fs/promises'
import { tmpdir } from 'node:os'
import { join } from 'node:path'
import { loadManifest } from './manifest'
import { stageCompiledArtifact } from './stage'
import type { BuildTarget } from './types'
const TARGET: BuildTarget = {
id: 'darwin-arm64',
name: 'macOS arm64',
os: 'macos',
arch: 'arm64',
bunTarget: 'bun-darwin-arm64-modern',
serverBinaryName: 'browseros-server-darwin-arm64',
}
describe('server artifact staging', () => {
let tempDir: string | null = null
afterEach(async () => {
if (tempDir) {
await rm(tempDir, { recursive: true, force: true })
tempDir = null
}
})
it('loads local resource rules from the manifest', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'browseros-stage-test-'))
const manifestPath = join(tempDir, 'manifest.json')
await writeFile(
manifestPath,
JSON.stringify({
resources: [
{
name: 'OpenClaw compose file',
source: {
type: 'local',
path: 'apps/server/resources/openclaw-compose.yml',
},
destination: 'resources/openclaw-compose.yml',
},
],
}),
)
expect(loadManifest(manifestPath)).toEqual({
resources: [
{
name: 'OpenClaw compose file',
source: {
type: 'local',
path: 'apps/server/resources/openclaw-compose.yml',
},
destination: 'resources/openclaw-compose.yml',
executable: false,
},
],
})
})
it('copies local resource files into the packaged artifact', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'browseros-stage-test-'))
const distRoot = join(tempDir, 'dist')
const compiledBinaryPath = join(tempDir, 'browseros-server')
const sourceRoot = join(tempDir, 'repo')
const composeSourcePath = join(
sourceRoot,
'apps/server/resources/openclaw-compose.yml',
)
await writeFile(compiledBinaryPath, '#!/bin/sh\n')
await Bun.write(composeSourcePath, 'services:\n')
const staged = await stageCompiledArtifact(
distRoot,
compiledBinaryPath,
TARGET,
'1.2.3',
[
{
name: 'OpenClaw compose file',
source: {
type: 'local',
path: 'apps/server/resources/openclaw-compose.yml',
},
destination: 'resources/openclaw-compose.yml',
},
],
sourceRoot,
)
expect(
await readFile(
join(staged.resourcesDir, 'openclaw-compose.yml'),
'utf-8',
),
).toBe('services:\n')
})
})

View File

@@ -1,5 +1,5 @@
import { chmod, cp, mkdir, rm } from 'node:fs/promises'
import { dirname, isAbsolute, join, relative, resolve } from 'node:path'
import { dirname, isAbsolute, join, relative } from 'node:path'
import type { S3Client } from '@aws-sdk/client-s3'
@@ -75,40 +75,13 @@ function resolveDestination(rootDir: string, destination: string): string {
async function stageRule(
rootDir: string,
sourceRoot: string,
rule: ResourceRule,
target: BuildTarget,
client: S3Client,
r2: R2Config,
): Promise<void> {
const destinationPath = resolveDestination(rootDir, rule.destination)
await mkdir(dirname(destinationPath), { recursive: true })
if (rule.source.type === 'local') {
await stageLocalRule(destinationPath, sourceRoot, rule, target)
} else {
await downloadObjectToFile(client, r2, rule.source.key, destinationPath)
if (rule.executable && target.os !== 'windows') {
await chmod(destinationPath, 0o755)
}
}
}
async function stageLocalRule(
destinationPath: string,
sourceRoot: string,
rule: ResourceRule,
target: BuildTarget,
): Promise<void> {
if (rule.source.type !== 'local') {
throw new Error(`Expected local source rule, got ${rule.source.type}`)
}
await mkdir(dirname(destinationPath), { recursive: true })
const sourcePath = isAbsolute(rule.source.path)
? rule.source.path
: resolve(sourceRoot, rule.source.path)
await cp(sourcePath, destinationPath)
await downloadObjectToFile(client, r2, rule.source.key, destinationPath)
if (rule.executable && target.os !== 'windows') {
await chmod(destinationPath, 0o755)
@@ -120,7 +93,6 @@ export async function stageTargetArtifact(
compiledBinaryPath: string,
target: BuildTarget,
rules: ResourceRule[],
sourceRoot: string,
client: S3Client,
r2: R2Config,
version: string,
@@ -128,7 +100,7 @@ export async function stageTargetArtifact(
const rootDir = await createArtifactRoot(distRoot, compiledBinaryPath, target)
for (const rule of rules) {
await stageRule(rootDir, sourceRoot, rule, target, client, r2)
await stageRule(rootDir, rule, target, client, r2)
}
return finalizeArtifact(rootDir, target, version)
@@ -139,22 +111,7 @@ export async function stageCompiledArtifact(
compiledBinaryPath: string,
target: BuildTarget,
version: string,
rules: ResourceRule[] = [],
sourceRoot = process.cwd(),
): Promise<StagedArtifact> {
const rootDir = await createArtifactRoot(distRoot, compiledBinaryPath, target)
for (const rule of rules) {
if (rule.source.type !== 'local') {
continue
}
await stageLocalRule(
resolveDestination(rootDir, rule.destination),
sourceRoot,
rule,
target,
)
}
return finalizeArtifact(rootDir, target, version)
}

View File

@@ -40,18 +40,11 @@ export interface BuildConfig {
r2?: R2Config
}
export interface R2ResourceSource {
export interface ResourceSource {
type: 'r2'
key: string
}
export interface LocalResourceSource {
type: 'local'
path: string
}
export type ResourceSource = R2ResourceSource | LocalResourceSource
export interface ResourceRule {
name: string
source: ResourceSource

View File

@@ -1,85 +0,0 @@
#!/usr/bin/env bash
#
# Copy a BrowserOS profile into a seed directory suitable for passing
# as --user-data-dir to a child Chromium instance (e.g. the VL collector).
#
# Usage:
# .scripts/copy-browseros-profile.sh <profile-name> <dest-dir>
#
# Example:
# .scripts/copy-browseros-profile.sh Work /tmp/vl-seed-profile
#
# Result: <dest-dir>/Default/<profile files> plus a stub Local State.
#
# Requires: jq, macOS (uses APFS clone via `cp -c`).
set -euo pipefail
PROFILE_NAME="${1:-}"
DEST_DIR="${2:-}"
if [[ -z "$PROFILE_NAME" || -z "$DEST_DIR" ]]; then
echo "usage: $0 <profile-name> <dest-dir>" >&2
exit 1
fi
SRC_ROOT="$HOME/Library/Application Support/BrowserOS"
LOCAL_STATE="$SRC_ROOT/Local State"
if [[ ! -f "$LOCAL_STATE" ]]; then
echo "error: BrowserOS Local State not found at: $LOCAL_STATE" >&2
exit 1
fi
if pgrep -qf "BrowserOS.app/Contents/MacOS/BrowserOS"; then
echo "error: BrowserOS is running. Quit it first so the profile SQLite files aren't mid-write." >&2
exit 1
fi
PROFILE_FOLDER=$(
jq -r --arg name "$PROFILE_NAME" \
'.profile.info_cache | to_entries[] | select(.value.name == $name) | .key' \
"$LOCAL_STATE"
)
if [[ -z "$PROFILE_FOLDER" ]]; then
echo "error: no profile named '$PROFILE_NAME' found. Available profiles:" >&2
jq -r '.profile.info_cache | to_entries | map(" \(.key)\t\(.value.name)") | .[]' \
"$LOCAL_STATE" >&2
exit 1
fi
SRC_PROFILE="$SRC_ROOT/$PROFILE_FOLDER"
if [[ ! -d "$SRC_PROFILE" ]]; then
echo "error: profile directory missing on disk: $SRC_PROFILE" >&2
exit 1
fi
echo "source: $SRC_PROFILE (name: $PROFILE_NAME)"
echo "dest: $DEST_DIR/Default"
if [[ -e "$DEST_DIR" ]]; then
echo "error: $DEST_DIR already exists. Remove it or pick a new path." >&2
exit 1
fi
mkdir -p "$DEST_DIR"
# APFS clone is O(1) and uses no extra disk space until files diverge.
cp -c -R "$SRC_PROFILE" "$DEST_DIR/Default"
# Strip singleton locks from the source instance.
rm -f \
"$DEST_DIR/Default/SingletonLock" \
"$DEST_DIR/Default/SingletonSocket" \
"$DEST_DIR/Default/SingletonCookie"
# Minimal Local State so Chrome doesn't complain on first launch.
echo '{}' > "$DEST_DIR/Local State"
BYTES=$(du -sh "$DEST_DIR" | awk '{print $1}')
echo "done: $BYTES at $DEST_DIR"
echo
echo "next:"
echo " launch Chromium with --user-data-dir=$DEST_DIR"
echo " (and drop --use-mock-keychain so encrypted cookies decrypt)"

View File

@@ -1,29 +0,0 @@
import { spawnSync } from 'node:child_process'
import { mkdirSync } from 'node:fs'
import { dirname, resolve } from 'node:path'
const projectRoot = resolve(import.meta.dir, '..')
const junitPath = process.env.BROWSEROS_JUNIT_PATH?.trim()
const testArgs = process.argv.slice(2)
const cmd = [process.execPath, 'test']
if (junitPath) {
const outputPath = resolve(projectRoot, junitPath)
mkdirSync(dirname(outputPath), { recursive: true })
cmd.push('--reporter=junit', `--reporter-outfile=${outputPath}`)
}
cmd.push(...testArgs)
const result = spawnSync(cmd[0], cmd.slice(1), {
cwd: projectRoot,
env: process.env,
stdio: 'inherit',
})
if (result.error) {
throw result.error
}
process.exit(result.status ?? 1)

View File

@@ -2,7 +2,6 @@ package cmd
import (
"fmt"
"time"
"browseros-dev/proc"
@@ -34,9 +33,7 @@ func runCleanup(cmd *cobra.Command, args []string) error {
if doPorts {
ports := proc.DefaultLocalPorts()
proc.LogMsgf(proc.TagInfo, "Killing processes on ports %d, %d, %d...", ports.CDP, ports.Server, ports.Extension)
if err := proc.KillPortsAndWait(ports, 3*time.Second); err != nil {
return err
}
proc.KillPorts(ports)
proc.LogMsg(proc.TagInfo, "Ports cleared")
}

View File

@@ -8,7 +8,6 @@ import (
"path/filepath"
"sync"
"syscall"
"time"
"browseros-dev/browser"
"browseros-dev/proc"
@@ -63,9 +62,7 @@ func runWatch(cmd *cobra.Command, args []string) error {
return fmt.Errorf("creating user-data dir: %w", err)
}
proc.LogMsg(proc.TagInfo, "Killing processes on preferred ports...")
if err := proc.KillPortsAndWait(defaultPorts, 3*time.Second); err != nil {
return err
}
proc.KillPorts(defaultPorts)
proc.LogMsg(proc.TagInfo, "Ports cleared")
p, reservations, err = proc.ResolveWatchPorts(false)
@@ -162,9 +159,6 @@ func runWatch(cmd *cobra.Command, args []string) error {
Env: env,
Restart: true,
Cmd: []string{"bun", "--watch", "--env-file=.env.development", "src/index.ts"},
BeforeStart: func() error {
return proc.KillPortAndWait(p.Server, 3*time.Second)
},
}))
<-sigCh

View File

@@ -11,12 +11,11 @@ import (
)
type ProcConfig struct {
Tag Tag
Dir string
Env []string
Restart bool
Cmd []string
BeforeStart func() error
Tag Tag
Dir string
Env []string
Restart bool
Cmd []string
}
type ManagedProc struct {
@@ -50,17 +49,6 @@ func (mp *ManagedProc) run(ctx context.Context) {
return
}
if mp.Cfg.BeforeStart != nil {
if err := mp.Cfg.BeforeStart(); err != nil {
LogMsg(mp.Cfg.Tag, ErrorColor.Sprintf("Pre-start failed: %v", err))
if !mp.Cfg.Restart || ctx.Err() != nil {
return
}
time.Sleep(time.Second)
continue
}
}
LogMsgf(mp.Cfg.Tag, "Starting: %s", DimColor.Sprint(strings.Join(mp.Cfg.Cmd, " ")))
cmd := exec.Command(mp.Cfg.Cmd[0], mp.Cfg.Cmd[1:]...)

View File

@@ -1,60 +0,0 @@
package proc
import (
"context"
"os"
"path/filepath"
"sync"
"sync/atomic"
"testing"
"time"
)
func TestStartManagedRunsBeforeStartOnEachRetry(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 2200*time.Millisecond)
defer cancel()
var count atomic.Int32
var wg sync.WaitGroup
StartManaged(ctx, &wg, ProcConfig{
Tag: TagInfo,
Dir: t.TempDir(),
Restart: true,
Cmd: []string{"sh", "-c", "exit 1"},
BeforeStart: func() error {
count.Add(1)
return nil
},
})
wg.Wait()
if count.Load() < 2 {
t.Fatalf("expected BeforeStart to run on retries, got %d calls", count.Load())
}
}
func TestStartManagedSkipsLaunchWhenBeforeStartFails(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sentinel := filepath.Join(t.TempDir(), "started")
var wg sync.WaitGroup
StartManaged(ctx, &wg, ProcConfig{
Tag: TagInfo,
Dir: t.TempDir(),
Restart: false,
Cmd: []string{"sh", "-c", "touch " + sentinel},
BeforeStart: func() error {
return context.DeadlineExceeded
},
})
wg.Wait()
if _, err := os.Stat(sentinel); !os.IsNotExist(err) {
t.Fatalf("expected process launch to be skipped, stat err=%v", err)
}
}

View File

@@ -133,29 +133,6 @@ func KillPort(port int) {
exec.Command("sh", "-c", fmt.Sprintf("lsof -ti:%d | xargs kill -9 2>/dev/null || true", port)).Run()
}
func KillPortAndWait(port int, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
for {
KillPort(port)
if IsPortAvailable(port) {
return nil
}
if time.Now().After(deadline) {
return fmt.Errorf("port %d is still in use after kill -9 cleanup", port)
}
time.Sleep(100 * time.Millisecond)
}
}
func KillPortsAndWait(p Ports, timeout time.Duration) error {
for _, port := range []int{p.CDP, p.Server, p.Extension} {
if err := KillPortAndWait(port, timeout); err != nil {
return err
}
}
return nil
}
func BuildEnv(p Ports, nodeEnv string) []string {
env := os.Environ()
env = append(env,

View File

@@ -1,58 +0,0 @@
#!/usr/bin/env python3
"""Shared sign metadata for BrowserOS Server binaries.
Consumed by both the Chromium-build signing path (build/modules/sign/) and the
OTA release path (build/modules/ota/). Adding a new third-party binary here
means both paths pick it up automatically.
"""
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional
@dataclass(frozen=True)
class SignSpec:
"""Per-binary codesign metadata.
``entitlements`` is the filename of the plist under
``resources/entitlements/``; ``None`` means no extra entitlements.
"""
identifier_suffix: str
options: str
entitlements: Optional[str] = None
MACOS_SERVER_BINARIES: Dict[str, SignSpec] = {
"browseros_server": SignSpec(
"browseros_server", "runtime", "browseros-executable-entitlements.plist"
),
"bun": SignSpec("bun", "runtime", "browseros-executable-entitlements.plist"),
"rg": SignSpec("rg", "runtime"),
"podman": SignSpec("podman", "runtime"),
"gvproxy": SignSpec("gvproxy", "runtime"),
"vfkit": SignSpec("vfkit", "runtime", "podman-vfkit-entitlements.plist"),
"krunkit": SignSpec("krunkit", "runtime", "podman-krunkit-entitlements.plist"),
"podman-mac-helper": SignSpec("podman_mac_helper", "runtime"),
}
WINDOWS_SERVER_BINARIES: List[str] = [
"browseros_server.exe",
"third_party/bun.exe",
"third_party/rg.exe",
"third_party/podman/podman.exe",
"third_party/podman/gvproxy.exe",
"third_party/podman/win-sshproxy.exe",
]
def macos_sign_spec_for(binary_path: Path) -> Optional[SignSpec]:
"""Look up sign metadata by file stem (e.g., ``podman-mac-helper``)."""
return MACOS_SERVER_BINARIES.get(binary_path.stem)
def expected_windows_binary_paths(server_bin_dir: Path) -> List[Path]:
"""Resolve the Windows relative-path list against a ``resources/bin`` dir."""
return [server_bin_dir / rel for rel in WINDOWS_SERVER_BINARIES]

View File

@@ -1,63 +0,0 @@
#!/usr/bin/env python3
"""Tests for the shared server-binary sign table."""
import unittest
from pathlib import Path
from .server_binaries import (
MACOS_SERVER_BINARIES,
WINDOWS_SERVER_BINARIES,
expected_windows_binary_paths,
macos_sign_spec_for,
)
ENTITLEMENTS_DIR = Path(__file__).resolve().parents[2] / "resources" / "entitlements"
class MacosServerBinariesTest(unittest.TestCase):
def test_every_entry_has_identifier_and_options(self):
for stem, spec in MACOS_SERVER_BINARIES.items():
self.assertTrue(spec.identifier_suffix, f"{stem} missing identifier_suffix")
self.assertTrue(spec.options, f"{stem} missing options")
def test_every_entitlements_plist_exists_on_disk(self):
for stem, spec in MACOS_SERVER_BINARIES.items():
if spec.entitlements is None:
continue
plist = ENTITLEMENTS_DIR / spec.entitlements
self.assertTrue(plist.exists(), f"{stem}: entitlements {plist} missing")
def test_macos_sign_spec_for_resolves_by_stem(self):
spec = macos_sign_spec_for(Path("/x/podman-mac-helper"))
assert spec is not None
self.assertEqual(spec.identifier_suffix, "podman_mac_helper")
self.assertIsNone(macos_sign_spec_for(Path("/x/not_a_known_binary")))
def test_matches_podman_bundle_layout(self):
required = {"podman", "gvproxy", "vfkit", "krunkit", "podman-mac-helper"}
self.assertTrue(required.issubset(MACOS_SERVER_BINARIES.keys()))
class WindowsServerBinariesTest(unittest.TestCase):
def test_no_duplicates(self):
self.assertEqual(
len(WINDOWS_SERVER_BINARIES), len(set(WINDOWS_SERVER_BINARIES))
)
def test_paths_within_expected_layout(self):
for rel in WINDOWS_SERVER_BINARIES:
self.assertTrue(
rel == "browseros_server.exe" or rel.startswith("third_party/"),
f"{rel} outside expected layout",
)
def test_expected_windows_binary_paths_joins_root(self):
root = Path("/tmp/fake/resources/bin")
resolved = expected_windows_binary_paths(root)
self.assertEqual(len(resolved), len(WINDOWS_SERVER_BINARIES))
for rel, abs_path in zip(WINDOWS_SERVER_BINARIES, resolved):
self.assertEqual(abs_path, root / rel)
if __name__ == "__main__":
unittest.main()

View File

@@ -7,16 +7,52 @@
<language>en</language>
<item>
<sparkle:version>0.0.86</sparkle:version>
<pubDate>Thu, 16 Apr 2026 18:58:59 +0000</pubDate>
<sparkle:version>0.0.74</sparkle:version>
<pubDate>Thu, 12 Mar 2026 21:20:48 +0000</pubDate>
<!-- macOS arm64 -->
<enclosure
url="https://cdn.browseros.com/server/browseros_server_0.0.86_darwin_arm64.zip"
url="https://cdn.browseros.com/server/browseros_server_0.0.74_darwin_arm64.zip"
sparkle:os="macos"
sparkle:arch="arm64"
sparkle:edSignature="kkM3dFanJr9TQgRPV7NOs7GwYpVfLHH+Db6oUWLHTWQFODBy8wx46fD6sioQdsB4k+9Ra9QCBm0WRSvKDkljDQ=="
length="101284695"
sparkle:edSignature="aPuQG3dtQj5v857CNSZ+Ahz3bxUOM7+tSEskW0mIbJV6969a3j1kAqOQ20D1FcxlEyYqquFOaeHpoGaDi6LsDg=="
length="22191352"
type="application/zip"/>
<!-- macOS x86_64 -->
<enclosure
url="https://cdn.browseros.com/server/browseros_server_0.0.74_darwin_x64.zip"
sparkle:os="macos"
sparkle:arch="x86_64"
sparkle:edSignature="X+FCQFH2HpBG43UiJjE0FkheyfOAUW2dhtmKn9HKRrJkqMGsaw+bhjdze1lP02oz71b8Q9AkC2NYwSUN0m0FAQ=="
length="24641802"
type="application/zip"/>
<!-- Linux arm64 -->
<enclosure
url="https://cdn.browseros.com/server/browseros_server_0.0.74_linux_arm64.zip"
sparkle:os="linux"
sparkle:arch="arm64"
sparkle:edSignature="1tnET+iFDYEc9kdwV9U3mo4rExX0JBnlJOrcEQOGBwR/478NxbOsPx3AI/H7216HlylayNj7bYLVJY/FJqY2Dg=="
length="37751728"
type="application/zip"/>
<!-- Linux x86_64 -->
<enclosure
url="https://cdn.browseros.com/server/browseros_server_0.0.74_linux_x64.zip"
sparkle:os="linux"
sparkle:arch="x86_64"
sparkle:edSignature="/OUrTZmgYWIWWWu71XAzN0B6hgs2WD9MOiZsXMvsv22TZwlEP1RdQsEO84JgFMb9if37MZX47utA2UWpSfFtAg=="
length="39041390"
type="application/zip"/>
<!-- Windows x86_64 -->
<enclosure
url="https://cdn.browseros.com/server/browseros_server_0.0.74_windows_x64.zip"
sparkle:os="windows"
sparkle:arch="x86_64"
sparkle:edSignature="qd7XYvoa59QA1bSUkaXbtBCti8DQGh3mWWfPG1qtgk5InLXJ07Y0ve/Y6ZAn8fyz6XGLEgMVhUa6eblmVuUODw=="
length="40986233"
type="application/zip"/>
</item>

View File

@@ -1,55 +0,0 @@
# BrowserOS macOS Release Build Configuration (arm64 only)
#
# Single-architecture arm64 release build. Skips the universal_build
# pipeline (no x64, no lipo merge) — follows the standard per-arch flow
# like release.windows.yaml / release.linux.yaml.
#
# Environment Variables:
# Use !env tag to reference environment variables:
# Example: chromium_src: !env CHROMIUM_SRC
build:
type: release
architecture: arm64
gn_flags:
file: build/config/gn/flags.macos.release.gn
# Explicit module execution order
modules:
# Phase 1: Setup
- clean
- git_setup
- sparkle_setup
# Phase 2: Patches & Resources
- download_resources
- resources
- bundled_extensions
- chromium_replace
- string_replaces
- series_patches
- patches
# Phase 3: Build
- configure
- compile
# Phase 4: Sign & Package
- sign_macos
- package_macos
# Phase 5: Upload
- upload
# Required environment variables
# Note: CHROMIUM_SRC can be provided via --chromium-src CLI flag, YAML config, or env var
required_envs:
- MACOS_CERTIFICATE_NAME
- PROD_MACOS_NOTARIZATION_APPLE_ID
- PROD_MACOS_NOTARIZATION_TEAM_ID
- PROD_MACOS_NOTARIZATION_PWD
# Notification settings
notifications:
slack: true

View File

@@ -9,16 +9,12 @@ from .common import (
SignedArtifact,
SERVER_PLATFORMS,
APPCAST_TEMPLATE,
find_server_resources_dir,
create_server_bundle_zip,
find_server_binary,
)
from .sign_binary import (
sign_macos_binary,
notarize_macos_binary,
notarize_macos_zip,
sign_windows_binary,
sign_server_bundle_macos,
sign_server_bundle_windows,
)
from .server import ServerOTAModule
@@ -34,14 +30,10 @@ __all__ = [
"parse_existing_appcast",
"ExistingAppcast",
"SignedArtifact",
"find_server_resources_dir",
"create_server_bundle_zip",
"find_server_binary",
"sign_macos_binary",
"notarize_macos_binary",
"notarize_macos_zip",
"sign_windows_binary",
"sign_server_bundle_macos",
"sign_server_bundle_windows",
"SERVER_PLATFORMS",
"APPCAST_TEMPLATE",
]

View File

@@ -1,93 +0,0 @@
#!/usr/bin/env python3
"""Tests for OTA bundle-zip creation."""
import stat
import sys
import tempfile
import unittest
import zipfile
from pathlib import Path
from .common import create_server_bundle_zip, find_server_resources_dir
def _write_exec(path: Path, content: bytes) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_bytes(content)
path.chmod(path.stat().st_mode | 0o755)
class CreateServerBundleZipTest(unittest.TestCase):
def test_bundles_full_resources_tree(self):
with tempfile.TemporaryDirectory() as tmp:
staging = Path(tmp) / "darwin-arm64"
resources = staging / "resources"
_write_exec(resources / "bin" / "browseros_server", b"server")
_write_exec(resources / "bin" / "third_party" / "bun", b"bun")
_write_exec(resources / "bin" / "third_party" / "rg", b"rg")
_write_exec(resources / "bin" / "third_party" / "podman" / "podman", b"pd")
_write_exec(
resources / "bin" / "third_party" / "podman" / "gvproxy", b"gv"
)
zip_path = Path(tmp) / "bundle.zip"
self.assertTrue(create_server_bundle_zip(resources, zip_path))
with zipfile.ZipFile(zip_path) as zf:
names = set(zf.namelist())
self.assertEqual(
names,
{
"resources/bin/browseros_server",
"resources/bin/third_party/bun",
"resources/bin/third_party/rg",
"resources/bin/third_party/podman/podman",
"resources/bin/third_party/podman/gvproxy",
},
)
@unittest.skipIf(sys.platform == "win32", "file mode check is meaningless on Windows")
def test_preserves_executable_bits(self):
with tempfile.TemporaryDirectory() as tmp:
resources = Path(tmp) / "darwin-arm64" / "resources"
_write_exec(resources / "bin" / "browseros_server", b"server")
zip_path = Path(tmp) / "bundle.zip"
self.assertTrue(create_server_bundle_zip(resources, zip_path))
with zipfile.ZipFile(zip_path) as zf:
info = zf.getinfo("resources/bin/browseros_server")
mode = (info.external_attr >> 16) & 0o777
self.assertTrue(mode & stat.S_IXUSR)
def test_missing_resources_dir_fails(self):
with tempfile.TemporaryDirectory() as tmp:
missing = Path(tmp) / "does-not-exist"
zip_path = Path(tmp) / "bundle.zip"
self.assertFalse(create_server_bundle_zip(missing, zip_path))
class FindServerResourcesDirTest(unittest.TestCase):
def test_returns_resources_dir_when_present(self):
with tempfile.TemporaryDirectory() as tmp:
root = Path(tmp)
(root / "darwin-arm64" / "resources" / "bin").mkdir(parents=True)
found = find_server_resources_dir(
root, {"name": "darwin_arm64", "target": "darwin-arm64"}
)
self.assertEqual(found, root / "darwin-arm64" / "resources")
def test_returns_none_when_absent(self):
with tempfile.TemporaryDirectory() as tmp:
root = Path(tmp)
self.assertIsNone(
find_server_resources_dir(
root, {"name": "darwin_arm64", "target": "darwin-arm64"}
)
)
if __name__ == "__main__":
unittest.main()

View File

@@ -1,7 +1,9 @@
#!/usr/bin/env python3
"""Common utilities for OTA update modules"""
import os
import re
import shutil
import zipfile
import xml.etree.ElementTree as ET
from datetime import datetime, timezone
@@ -11,9 +13,8 @@ from dataclasses import dataclass
from ...common.utils import log_error, log_info, log_success
# Re-exported so callers (and ota/__init__.py) can get sparkle_sign_file
# from ota.common alongside the other OTA helpers.
from ...common.sparkle import sparkle_sign_file as sparkle_sign_file
# Re-export sparkle_sign_file from common module
from ...common.sparkle import sparkle_sign_file
# Sparkle XML namespace
SPARKLE_NS = "http://www.andymatuschak.org/xml-namespaces/sparkle"
@@ -75,15 +76,33 @@ class ExistingAppcast:
artifacts: Dict[str, SignedArtifact]
def find_server_resources_dir(binaries_dir: Path, platform: dict) -> Optional[Path]:
"""Return the extracted ``resources/`` dir for a platform, or ``None``.
def find_server_binary(binaries_dir: Path, platform: dict) -> Optional[Path]:
"""Find server binary in either flat or artifact-extracted directory structure.
``binaries_dir`` is the temp root created by ``_download_artifacts``; each
platform lives at ``<binaries_dir>/<target>/resources/``.
Supports two layouts:
Flat: {binaries_dir}/{binary_name} (e.g., browseros-server-darwin-arm64)
Artifact: {binaries_dir}/{target}/resources/bin/browseros_server[.exe]
Args:
binaries_dir: Root directory containing server binaries
platform: Platform dict from SERVER_PLATFORMS
Returns:
Path to binary if found, None otherwise
"""
# Flat structure (used with --binaries pointing to mono build output)
flat_path = binaries_dir / platform["binary"]
if flat_path.exists():
return flat_path
# Artifact-extracted structure (used after download_resources)
target = platform.get("target", platform["name"].replace("_", "-"))
resources = binaries_dir / target / "resources"
return resources if resources.is_dir() else None
bin_name = "browseros_server.exe" if platform["os"] == "windows" else "browseros_server"
artifact_path = binaries_dir / target / "resources" / "bin" / bin_name
if artifact_path.exists():
return artifact_path
return None
def parse_existing_appcast(appcast_path: Path) -> Optional[ExistingAppcast]:
@@ -235,31 +254,46 @@ def generate_server_appcast(
)
def create_server_bundle_zip(resources_dir: Path, output_zip: Path) -> bool:
"""Zip an extracted ``resources/`` tree into a Sparkle payload.
def create_server_zip(
binary_path: Path,
output_zip: Path,
is_windows: bool = False,
) -> bool:
"""Create zip with proper structure: resources/bin/browseros_server
Produces entries like ``resources/bin/browseros_server``,
``resources/bin/third_party/podman/podman`` — mirroring what the agent
build staged and what the Chromium build bakes into the installed app.
File modes are preserved by ``ZipFile.write`` so executable bits survive.
Args:
binary_path: Path to the binary to package
output_zip: Path for output zip file
is_windows: Whether this is Windows binary (affects target name)
Returns:
True on success, False on failure
"""
if not resources_dir.is_dir():
log_error(f"Resources dir not found: {resources_dir}")
return False
bundle_root = resources_dir.parent
staging_dir = output_zip.parent / f"staging_{output_zip.stem}"
try:
with zipfile.ZipFile(output_zip, "w", zipfile.ZIP_DEFLATED) as zf:
for path in sorted(resources_dir.rglob("*")):
if not path.is_file():
continue
arcname = path.relative_to(bundle_root).as_posix()
zf.write(path, arcname)
staging_dir.mkdir(parents=True, exist_ok=True)
bin_dir = staging_dir / "resources" / "bin"
bin_dir.mkdir(parents=True, exist_ok=True)
target_name = "browseros_server.exe" if is_windows else "browseros_server"
shutil.copy2(binary_path, bin_dir / target_name)
with zipfile.ZipFile(output_zip, 'w', zipfile.ZIP_DEFLATED) as zf:
for root, _, files in os.walk(staging_dir):
for file in files:
file_path = Path(root) / file
arcname = file_path.relative_to(staging_dir)
zf.write(file_path, arcname)
log_success(f"Created {output_zip.name}")
return True
except Exception as e:
log_error(f"Failed to create bundle zip: {e}")
log_error(f"Failed to create zip: {e}")
return False
finally:
if staging_dir.exists():
shutil.rmtree(staging_dir)
def get_appcast_path(channel: str = "alpha") -> Path:

View File

@@ -10,6 +10,7 @@ from ...common.module import CommandModule, ValidationError
from ...common.context import Context
from ...common.utils import (
log_info,
log_error,
log_success,
log_warning,
IS_MACOS,
@@ -22,14 +23,15 @@ from .common import (
sparkle_sign_file,
generate_server_appcast,
parse_existing_appcast,
create_server_bundle_zip,
create_server_zip,
get_appcast_path,
find_server_resources_dir,
find_server_binary,
)
from .sign_binary import (
notarize_macos_zip,
sign_server_bundle_macos,
sign_server_bundle_windows,
sign_macos_binary,
notarize_macos_binary,
sign_windows_binary,
get_entitlements_path,
)
from ..storage import get_r2_client, upload_file_to_r2, download_file_from_r2
from ..storage.download import extract_artifact_zip
@@ -87,8 +89,11 @@ class ServerOTAModule(CommandModule):
return [p for p in SERVER_PLATFORMS if p["name"] in requested]
return SERVER_PLATFORMS
def _download_artifacts(self, ctx: Context, download_dir: Path) -> None:
"""Download and extract server artifact zips from R2 into ``download_dir``."""
def _download_artifacts(self, ctx: Context) -> Path:
"""Download server artifact zips from R2 latest/ and extract them."""
download_dir = Path(tempfile.mkdtemp(prefix="ota_artifacts_"))
self._download_dir = download_dir
r2_client = get_r2_client(ctx.env)
if not r2_client:
raise RuntimeError("Failed to create R2 client")
@@ -112,85 +117,69 @@ class ServerOTAModule(CommandModule):
zip_path.unlink()
log_success(f"Downloaded {len(platforms)} artifact(s)")
return download_dir
def execute(self, context: Context) -> None:
ctx = context
log_info(f"\n🚀 BrowserOS Server OTA v{self.version} ({self.channel})")
log_info("=" * 70)
with tempfile.TemporaryDirectory(prefix="ota_artifacts_") as dl, \
tempfile.TemporaryDirectory(prefix="ota_staging_") as st:
binaries_dir = Path(dl)
temp_dir = Path(st)
log_info(f"Temp directory: {temp_dir}")
# Download artifacts from R2
binaries_dir = self._download_artifacts(ctx)
self._download_artifacts(ctx, binaries_dir)
signed_artifacts = self._build_platform_artifacts(
ctx, binaries_dir, temp_dir
)
self._finalize_release(ctx, signed_artifacts)
platforms = self._get_platforms()
temp_dir = Path(tempfile.mkdtemp())
log_info(f"Temp directory: {temp_dir}")
def _build_platform_artifacts(
self, ctx: Context, binaries_dir: Path, temp_dir: Path
) -> List[SignedArtifact]:
"""Sign + zip + Sparkle-sign each platform; fail fast on any error.
Any per-platform failure raises ``RuntimeError`` so a broken
credential or unregistered binary cannot silently omit a platform
from a published release.
"""
signed_artifacts: List[SignedArtifact] = []
for platform in self._get_platforms():
for platform in platforms:
log_info(f"\n📦 Processing {platform['name']}...")
source_resources = find_server_resources_dir(binaries_dir, platform)
if not source_resources:
raise RuntimeError(
f"Resources dir not found for {platform['name']}"
)
source_binary = find_server_binary(binaries_dir, platform)
if not source_binary:
log_warning(f"Binary not found for {platform['name']}, skipping")
continue
staging_resources = temp_dir / platform["name"] / "resources"
shutil.copytree(source_resources, staging_resources)
# Copy binary to temp to preserve original
temp_binary = temp_dir / platform["binary"]
shutil.copy2(source_binary, temp_binary)
if not self._sign_bundle(staging_resources, platform, ctx):
raise RuntimeError(f"Signing failed for {platform['name']}")
if not self._sign_binary(temp_binary, platform, ctx):
log_warning(f"Skipping {platform['name']} due to signing failure")
continue
zip_name = f"browseros_server_{self.version}_{platform['name']}.zip"
zip_path = temp_dir / zip_name
is_windows = platform["os"] == "windows"
if not create_server_bundle_zip(staging_resources, zip_path):
raise RuntimeError(f"Failed to create bundle for {platform['name']}")
if platform["os"] == "macos" and IS_MACOS():
if not notarize_macos_zip(zip_path, ctx.env):
raise RuntimeError(
f"Notarization failed for {platform['name']}"
)
if not create_server_zip(temp_binary, zip_path, is_windows):
log_error(f"Failed to create zip for {platform['name']}")
continue
log_info(f"Signing {zip_name} with Sparkle...")
signature, length = sparkle_sign_file(zip_path, ctx.env)
if not signature:
raise RuntimeError(f"Sparkle signing failed for {platform['name']}")
log_error(f"Failed to sign zip for {platform['name']}")
continue
log_success(f" {platform['name']}: {length} bytes")
signed_artifacts.append(SignedArtifact(
artifact = SignedArtifact(
platform=platform["name"],
zip_path=zip_path,
signature=signature,
length=length,
os=platform["os"],
arch=platform["arch"],
))
)
signed_artifacts.append(artifact)
if not signed_artifacts:
raise RuntimeError("OTA failed - no artifacts processed")
return signed_artifacts
log_error("No artifacts were processed successfully")
raise RuntimeError("OTA failed - no artifacts")
def _finalize_release(
self, ctx: Context, signed_artifacts: List[SignedArtifact]
) -> None:
"""Write the appcast, upload every signed zip to R2, and surface URLs."""
log_info("\n📝 Generating appcast...")
appcast_path = get_appcast_path(self.channel)
existing_appcast = parse_existing_appcast(appcast_path)
@@ -230,27 +219,27 @@ class ServerOTAModule(CommandModule):
log_info(f"\nAppcast saved to: {appcast_path}")
log_info("\n📋 Next step: Run 'browseros ota server release-appcast' to make the release live")
def _sign_bundle(
self, staging_resources: Path, platform: dict, ctx: Context
) -> bool:
"""Codesign every binary in the staged resources tree for a platform.
macOS notarization happens separately, on the outer Sparkle zip.
"""
def _sign_binary(self, binary_path: Path, platform: dict, ctx: Context) -> bool:
"""Sign binary based on platform"""
os_type = platform["os"]
if os_type == "macos":
if not IS_MACOS():
log_warning(
f"macOS signing requires macOS - leaving {platform['name']} unsigned"
)
log_warning(f"macOS signing requires macOS - skipping {platform['name']}")
return True
return sign_server_bundle_macos(
staging_resources, ctx.env, ctx.get_entitlements_dir()
)
if os_type == "windows":
return sign_server_bundle_windows(staging_resources, ctx.env)
entitlements = get_entitlements_path(ctx.root_dir)
if not sign_macos_binary(binary_path, ctx.env, entitlements):
return False
log_info("Notarizing...")
return notarize_macos_binary(binary_path, ctx.env)
elif os_type == "windows":
return sign_windows_binary(binary_path, ctx.env)
elif os_type == "linux":
log_info(f"No code signing for Linux binaries")
return True
log_info("No code signing for Linux binaries")
return True

View File

@@ -1,18 +1,12 @@
#!/usr/bin/env python3
"""Platform-specific binary signing for OTA binaries"""
import os
import shutil
import subprocess
import tempfile
from pathlib import Path
from typing import List, Optional
from typing import Optional
from ...common.env import EnvConfig
from ...common.server_binaries import (
expected_windows_binary_paths,
macos_sign_spec_for,
)
from ...common.utils import (
log_info,
log_error,
@@ -27,17 +21,16 @@ def sign_macos_binary(
binary_path: Path,
env: Optional[EnvConfig] = None,
entitlements_path: Optional[Path] = None,
*,
identifier: Optional[str] = None,
options: str = "runtime",
) -> bool:
"""Sign a macOS binary with codesign.
"""Sign a macOS binary with codesign
``identifier`` defaults to ``com.browseros.<stem>`` to preserve the
previous single-binary signature shape. Callers that have a shared sign
table (see ``common/server_binaries.py``) should pass identifier and
options derived from that table so OTA-signed and Chromium-build-signed
binaries share the same code identifier.
Args:
binary_path: Path to binary to sign
env: Environment config with certificate name
entitlements_path: Optional path to entitlements plist
Returns:
True on success, False on failure
"""
if not IS_MACOS():
log_error("macOS signing requires macOS")
@@ -53,14 +46,13 @@ def sign_macos_binary(
log_info(f"Signing {binary_path.name}...")
resolved_identifier = identifier or f"com.browseros.{binary_path.stem}"
cmd = [
"codesign",
"--sign", certificate_name,
"--force",
"--timestamp",
"--identifier", resolved_identifier,
"--options", options,
"--identifier", f"com.browseros.{binary_path.stem}",
"--options", "runtime",
]
if entitlements_path and entitlements_path.exists():
@@ -99,91 +91,48 @@ def verify_macos_signature(binary_path: Path) -> bool:
return False
def _resolve_notarization_credentials(
env: Optional[EnvConfig],
) -> Optional[EnvConfig]:
if env is None:
env = EnvConfig()
missing: List[str] = []
if not env.macos_notarization_apple_id:
missing.append("PROD_MACOS_NOTARIZATION_APPLE_ID")
if not env.macos_notarization_team_id:
missing.append("PROD_MACOS_NOTARIZATION_TEAM_ID")
if not env.macos_notarization_password:
missing.append("PROD_MACOS_NOTARIZATION_PWD")
if missing:
log_error("Missing notarization credentials:")
for name in missing:
log_error(f" {name} not set")
return None
return env
def _submit_notarization(submission_path: Path, env: EnvConfig) -> bool:
assert env.macos_notarization_apple_id is not None
assert env.macos_notarization_team_id is not None
assert env.macos_notarization_password is not None
subprocess.run(
[
"xcrun", "notarytool", "store-credentials", "notarytool-profile",
"--apple-id", env.macos_notarization_apple_id,
"--team-id", env.macos_notarization_team_id,
"--password", env.macos_notarization_password,
],
capture_output=True,
text=True,
check=False,
)
log_info("Submitting for notarization (this may take a while)...")
result = subprocess.run(
[
"xcrun", "notarytool", "submit", str(submission_path),
"--keychain-profile", "notarytool-profile",
"--wait",
],
capture_output=True,
text=True,
check=False,
)
if result.returncode != 0:
log_error(f"Notarization failed: {result.stderr}")
log_error(result.stdout)
return False
if "status: Accepted" not in result.stdout:
log_error("Notarization was not accepted")
log_error(result.stdout)
return False
return True
def notarize_macos_binary(
binary_path: Path,
env: Optional[EnvConfig] = None,
) -> bool:
"""Notarize a single macOS binary with Apple.
"""Notarize a macOS binary with Apple
The binary is first wrapped in a zip via ``ditto --keepParent`` because
``notarytool`` does not accept bare executables. For an already-zipped
Sparkle bundle, call :func:`notarize_macos_zip` instead — double-wrapping
nests zips and notarytool does not descend into nested archives.
The binary must be zipped for notarization submission.
Args:
binary_path: Path to binary to notarize (will be zipped internally)
env: Environment config with notarization credentials
Returns:
True on success, False on failure
"""
if not IS_MACOS():
log_error("macOS notarization requires macOS")
return False
env = _resolve_notarization_credentials(env)
if env is None:
env = EnvConfig()
apple_id = env.macos_notarization_apple_id
team_id = env.macos_notarization_team_id
password = env.macos_notarization_password
if not all([apple_id, team_id, password]):
log_error("Missing notarization credentials:")
if not apple_id:
log_error(" PROD_MACOS_NOTARIZATION_APPLE_ID not set")
if not team_id:
log_error(" PROD_MACOS_NOTARIZATION_TEAM_ID not set")
if not password:
log_error(" PROD_MACOS_NOTARIZATION_PWD not set")
return False
log_info(f"Notarizing {binary_path.name}...")
notarize_zip: Optional[Path] = None
notarize_zip = None
try:
fd, tmp_path = tempfile.mkstemp(suffix=".zip")
import os
os.close(fd)
notarize_zip = Path(tmp_path)
@@ -197,7 +146,41 @@ def notarize_macos_binary(
log_error(f"Failed to create zip: {result.stderr}")
return False
if not _submit_notarization(notarize_zip, env):
assert apple_id is not None
assert team_id is not None
assert password is not None
subprocess.run(
[
"xcrun", "notarytool", "store-credentials", "notarytool-profile",
"--apple-id", apple_id,
"--team-id", team_id,
"--password", password,
],
capture_output=True,
text=True,
check=False,
)
log_info("Submitting for notarization (this may take a while)...")
result = subprocess.run(
[
"xcrun", "notarytool", "submit", str(notarize_zip),
"--keychain-profile", "notarytool-profile",
"--wait",
],
capture_output=True,
text=True,
check=False,
)
if result.returncode != 0:
log_error(f"Notarization failed: {result.stderr}")
log_error(result.stdout)
return False
if "status: Accepted" not in result.stdout:
log_error("Notarization was not accepted")
log_error(result.stdout)
return False
log_success(f"Notarized {binary_path.name}")
@@ -211,33 +194,6 @@ def notarize_macos_binary(
notarize_zip.unlink()
def notarize_macos_zip(zip_path: Path, env: Optional[EnvConfig] = None) -> bool:
"""Notarize a pre-built Sparkle bundle zip by submitting it directly.
``notarytool`` accepts ``.zip`` submissions and recursively scans the
Mach-O binaries inside. No extra wrapping — passing this zip through
``ditto --keepParent`` would nest zips and Apple's service would not
descend into the inner archive.
"""
if not IS_MACOS():
log_error("macOS notarization requires macOS")
return False
env = _resolve_notarization_credentials(env)
if env is None:
return False
log_info(f"Notarizing {zip_path.name}...")
try:
if not _submit_notarization(zip_path, env):
return False
log_success(f"Notarized {zip_path.name}")
return True
except Exception as e:
log_error(f"Notarization failed: {e}")
return False
def sign_windows_binary(
binary_path: Path,
env: Optional[EnvConfig] = None,
@@ -308,6 +264,7 @@ def sign_windows_binary(
signed_file = temp_output_dir / binary_path.name
if signed_file.exists():
import shutil
shutil.move(str(signed_file), str(binary_path))
try:
@@ -337,81 +294,15 @@ def sign_windows_binary(
return False
def sign_server_bundle_macos(
resources_dir: Path,
env: EnvConfig,
entitlements_root: Path,
) -> bool:
"""Codesign every known binary under ``resources_dir/bin/**``.
Unknown executables are a hard error: every regular file under
``resources/bin/`` must have an entry in ``MACOS_SERVER_BINARIES``.
This prevents silently shipping an unsigned binary when a new
third-party dep is added to the agent build without being registered
in the shared sign table. The unknown-file check runs before any
codesign call so a bad release fails in seconds rather than after
several minutes of signing.
"""
bin_dir = resources_dir / "bin"
if not bin_dir.is_dir():
log_error(f"bin dir not found: {bin_dir}")
return False
# Only Mach-O-style executables need signing; any future data/config file
# shipped under resources/bin/ (plists, shell completion, etc.) is not a
# codesign target and must not trigger the unknown-binary guard.
executables = [
p
for p in sorted(bin_dir.rglob("*"))
if p.is_file() and not p.is_symlink() and os.access(p, os.X_OK)
def get_entitlements_path(root_dir: Path) -> Optional[Path]:
"""Get path to server binary entitlements file"""
candidates = [
root_dir / "resources" / "entitlements" / "browseros-executable-entitlements.plist",
root_dir / "packages" / "browseros" / "resources" / "entitlements" / "browseros-executable-entitlements.plist",
]
unknowns = [p for p in executables if macos_sign_spec_for(p) is None]
if unknowns:
log_error(
"Unknown executables found under resources/bin/ not registered in "
"MACOS_SERVER_BINARIES (see build/common/server_binaries.py):"
)
for path in unknowns:
log_error(f" - {path.relative_to(resources_dir)}")
return False
for path in executables:
spec = macos_sign_spec_for(path)
assert spec is not None # unknowns filtered above
for candidate in candidates:
if candidate.exists():
return candidate
entitlements_path: Optional[Path] = None
if spec.entitlements:
entitlements_path = entitlements_root / spec.entitlements
if not entitlements_path.exists():
log_error(
f"Missing entitlements for {path.name}: {entitlements_path}"
)
return False
if not sign_macos_binary(
path,
env,
entitlements_path,
identifier=f"com.browseros.{spec.identifier_suffix}",
options=spec.options,
):
return False
return True
def sign_server_bundle_windows(resources_dir: Path, env: EnvConfig) -> bool:
"""Sign each Windows binary enumerated in ``WINDOWS_SERVER_BINARIES``.
A missing expected binary is a hard error: publishing an incomplete
Windows bundle would ship a broken OTA update without a pipeline signal.
Symmetric with the macOS bundle's unknown-file guard.
"""
bin_dir = resources_dir / "bin"
for path in expected_windows_binary_paths(bin_dir):
if not path.exists():
log_error(f"Windows binary missing (cannot sign): {path}")
return False
if not sign_windows_binary(path, env):
return False
return True
return None

View File

@@ -10,7 +10,6 @@ from typing import Optional, List, Dict, Tuple
from ...common.module import CommandModule, ValidationError
from ...common.context import Context
from ...common.env import EnvConfig
from ...common.server_binaries import macos_sign_spec_for
from ...common.utils import (
run_command as utils_run_command,
log_info,
@@ -21,19 +20,32 @@ from ...common.utils import (
join_paths,
)
# Central list of BrowserOS Server binaries we need to sign explicitly.
# Each entry controls identifiers, signing options, and entitlement files so
# adding a new binary is a one-line update here rather than scattered changes.
BROWSEROS_SERVER_BINARIES: Dict[str, Dict[str, str]] = {
"browseros_server": {
"identifier_suffix": "browseros_server",
"options": "runtime",
"entitlements": "browseros-executable-entitlements.plist",
},
"codex": {
"identifier_suffix": "codex",
"options": "runtime",
"entitlements": "browseros-executable-entitlements.plist",
},
"bun": {
"identifier_suffix": "bun",
"options": "runtime",
"entitlements": "browseros-executable-entitlements.plist",
},
}
def get_browseros_server_binary_info(component_path: Path) -> Optional[Dict[str, str]]:
"""Return metadata for known BrowserOS Server binaries, if applicable."""
spec = macos_sign_spec_for(component_path)
if spec is None:
return None
info: Dict[str, str] = {
"identifier_suffix": spec.identifier_suffix,
"options": spec.options,
}
if spec.entitlements:
info["entitlements"] = spec.entitlements
return info
name = component_path.stem.lower()
return BROWSEROS_SERVER_BINARIES.get(name)
def run_command(

View File

@@ -7,7 +7,6 @@ from typing import List, Optional
from ...common.module import CommandModule, ValidationError
from ...common.context import Context
from ...common.env import EnvConfig
from ...common.server_binaries import expected_windows_binary_paths
from ...common.utils import (
log_info,
log_error,
@@ -17,6 +16,12 @@ from ...common.utils import (
IS_WINDOWS,
)
BROWSEROS_SERVER_BINARIES: List[str] = [
"browseros_server.exe",
"codex.exe",
"bun.exe",
]
class WindowsSignModule(CommandModule):
produces = ["signed_installer"]
@@ -97,7 +102,7 @@ class WindowsSignModule(CommandModule):
def get_browseros_server_binary_paths(build_output_dir: Path) -> List[Path]:
"""Return absolute paths to BrowserOS Server binaries for signing."""
server_dir = build_output_dir / "BrowserOSServer" / "default" / "resources" / "bin"
return expected_windows_binary_paths(server_dir)
return [server_dir / binary for binary in BROWSEROS_SERVER_BINARIES]
def build_mini_installer(ctx: Context) -> bool:

Some files were not shown because too many files have changed in this diff Show More