Compare commits

..

5 Commits

Author SHA1 Message Date
shivammittal274
3cd7fa2c06 ci(cli): change release workflow to manual dispatch from main
- Trigger via Actions UI with a version input (e.g. "0.1.0")
- Only runs on main branch
- Creates git tag cli/v<version> automatically
- Then GoReleaser builds all 6 binaries and creates the GitHub Release
2026-03-27 00:29:23 +05:30
shivammittal274
ac900b6b07 fix(cli): platform-native detection, launch, and install for all OSes
Detection (isBrowserOSInstalled):
- macOS: uses `open -Ra` to query Launch Services (no hardcoded paths)
- Linux: checks /usr/bin/browseros (.deb), browseros.desktop, AppImage search
- Windows: checks %LOCALAPPDATA%\BrowserOS\Application\BrowserOS.exe
  and HKCU/HKLM uninstall registry keys

Launch (startBrowserOS):
- macOS: `open -b com.browseros.BrowserOS` (bundle ID, not path)
- Linux: `browseros` binary, AppImage, or `gtk-launch browseros`
  (fixed: was using xdg-open which opens by MIME type, not desktop files)
- Windows: runs BrowserOS.exe from known Chromium per-user install path
  (fixed: was using `cmd /c start BrowserOS` which doesn't resolve)

Install (runPostInstall):
- macOS: hdiutil attach → cp -R to /Applications → hdiutil detach
- Linux: chmod +x for AppImage, dpkg -i instruction for .deb
- Windows: launches installer exe
- --deb flag now errors on non-Linux platforms

Removed auto-launch from newClient() — CLI never does surprising things.

Sources verified from:
- packages/browseros/build/common/context.py (binary names per platform)
- packages/browseros/build/modules/package/linux.py (.deb structure, .desktop file)
- packages/browseros/chromium_patches/chrome/install_static/chromium_install_modes.h
  (Windows base_app_name="BrowserOS", registry GUID, install paths)
- /Applications/BrowserOS.app/Contents/Info.plist (bundle ID)
2026-03-26 02:00:01 +05:30
shivammittal274
cb54d3aa7a refactor(cli): make launch an explicit command, remove auto-launch from newClient
- launch: new explicit command to find and open BrowserOS app
- launch: probes server.json, config, and common ports before launching
- launch: if already running, reports URL instead of launching again
- init --auto: uses port probing to find running servers
- install --deb: errors on non-Linux instead of silently downloading DMG
- error messages: guide users to launch/install/init explicitly
- removed: auto-launch from newClient() — CLI never does something surprising
2026-03-26 01:36:27 +05:30
shivammittal274
46ce8755c3 fix(cli): check health status code and add progress dots during launch
- Health check in newClient() now verifies HTTP 200, not just no error
- waitForServer prints dots during the 30s poll so users know it's working
2026-03-26 01:19:13 +05:30
shivammittal274
d80167d806 feat(cli): production-ready CLI with auto-launch, install, and cross-platform builds
- init: accept URL argument and --auto flag for non-interactive setup
- install: new command to download BrowserOS app for current platform
- launch: auto-detect and launch BrowserOS when server is not running
- discovery: prefer server.json (live) over config.yaml (may be stale)
- errors: actionable messages guiding users to init/install
- goreleaser: cross-platform builds for 6 targets (darwin/linux/windows × amd64/arm64)
- ci: GitHub Actions workflow to release CLI binaries on cli/v* tag push
2026-03-26 00:55:41 +05:30
152 changed files with 4713 additions and 12087 deletions

View File

@@ -2,7 +2,7 @@ name: PR Conventional Commit Validation
on:
pull_request:
types: [opened, edited]
types: [opened, synchronize, reopened, edited]
permissions:
pull-requests: write

View File

@@ -1,148 +0,0 @@
name: Release Agent Extension
on:
workflow_dispatch:
concurrency:
group: release-agent-extension
cancel-in-progress: false
jobs:
release:
if: github.ref == 'refs/heads/main'
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
defaults:
run:
working-directory: packages/browseros-agent/apps/agent
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 0
- uses: oven-sh/setup-bun@v2
- name: Install dependencies
run: bun ci
working-directory: packages/browseros-agent
- name: Build and zip extension
run: bun run codegen && bun run zip
env:
VITE_PUBLIC_BROWSEROS_API: https://api.browseros.com
- name: Get version and zip path
id: version
run: |
echo "version=$(node -p "require('./package.json').version")" >> "$GITHUB_OUTPUT"
echo "release_sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT"
ZIP_FILE=$(ls "$(pwd)/dist/"*-chrome.zip | head -n 1)
echo "zip_path=$ZIP_FILE" >> "$GITHUB_OUTPUT"
echo "zip_name=$(basename "$ZIP_FILE")" >> "$GITHUB_OUTPUT"
- name: Generate release notes
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
AGENT_PATH="packages/browseros-agent/apps/agent"
CURRENT_TAG="agent-extension-v${{ steps.version.outputs.version }}"
PREV_TAG=$(git tag -l "agent-extension-v*" --sort=-v:refname | grep -v "^${CURRENT_TAG}$" | head -n 1)
if [ -z "$PREV_TAG" ]; then
echo "Initial release" > /tmp/release-notes.md
else
COMMITS=$(git log "$PREV_TAG"..HEAD --pretty=format:"%H" -- "$AGENT_PATH")
if [ -z "$COMMITS" ]; then
echo "No notable changes." > /tmp/release-notes.md
else
echo "## What's Changed" > /tmp/release-notes.md
echo "" >> /tmp/release-notes.md
while IFS= read -r SHA; do
SUBJECT=$(git log -1 --pretty=format:"%s" "$SHA")
PR_NUM=$(gh api "/repos/${{ github.repository }}/commits/${SHA}/pulls" --jq '.[0].number // empty' 2>/dev/null)
# Skip PR number if already in the commit subject (squash merges include it)
if [ -n "$PR_NUM" ] && ! echo "$SUBJECT" | grep -qF "(#${PR_NUM})"; then
echo "- ${SUBJECT} (#${PR_NUM})" >> /tmp/release-notes.md
else
echo "- ${SUBJECT}" >> /tmp/release-notes.md
fi
done <<< "$COMMITS"
fi
fi
working-directory: ${{ github.workspace }}
- name: Create GitHub release
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
TAG="agent-extension-v${{ steps.version.outputs.version }}"
RELEASE_SHA="${{ steps.version.outputs.release_sha }}"
TITLE="BrowserOS Agent Extension v${{ steps.version.outputs.version }}"
if git rev-parse "$TAG" >/dev/null 2>&1; then
echo "Tag $TAG already exists, skipping tag creation"
else
git tag "$TAG" "$RELEASE_SHA"
fi
if git ls-remote --tags origin "$TAG" | grep -q "$TAG"; then
echo "Tag $TAG already on remote, skipping push"
else
git push origin "$TAG"
fi
if gh release view "$TAG" >/dev/null 2>&1; then
echo "Release $TAG already exists, updating"
gh release edit "$TAG" --title "$TITLE" --notes-file /tmp/release-notes.md
gh release upload "$TAG" "${{ steps.version.outputs.zip_path }}" --clobber
else
gh release create "$TAG" \
--title "$TITLE" \
--notes-file /tmp/release-notes.md \
"${{ steps.version.outputs.zip_path }}"
fi
working-directory: ${{ github.workspace }}
- name: Update CHANGELOG.md via PR
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
VERSION="${{ steps.version.outputs.version }}"
DATE=$(date -u +"%Y-%m-%d")
BRANCH="docs/agent-extension-changelog-v${VERSION}"
CHANGELOG="packages/browseros-agent/apps/agent/CHANGELOG.md"
git checkout main
{
head -n 1 "$CHANGELOG"
echo ""
echo "## v${VERSION} (${DATE})"
echo ""
cat /tmp/release-notes.md
echo ""
tail -n +2 "$CHANGELOG"
} > /tmp/new-changelog.md
mv /tmp/new-changelog.md "$CHANGELOG"
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git checkout -b "$BRANCH"
git add "$CHANGELOG"
git commit -m "docs: update agent extension changelog for v${VERSION}"
git push origin "$BRANCH"
gh pr create \
--title "docs: update agent extension changelog for v${VERSION}" \
--body "Auto-generated changelog update for BrowserOS Agent Extension v${VERSION}." \
--base main \
--head "$BRANCH"
gh pr merge "$BRANCH" --squash --auto || true
working-directory: ${{ github.workspace }}

View File

@@ -3,25 +3,16 @@ name: Release Agent SDK
on:
workflow_dispatch:
concurrency:
group: release-agent-sdk
cancel-in-progress: false
jobs:
publish:
if: github.ref == 'refs/heads/main'
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
defaults:
run:
working-directory: packages/browseros-agent/packages/agent-sdk
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 0
- uses: oven-sh/setup-bun@v2
@@ -40,129 +31,7 @@ jobs:
- name: Test
run: bun test
- name: Get version
id: version
run: |
echo "version=$(node -p "require('./package.json').version")" >> "$GITHUB_OUTPUT"
echo "release_sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT"
- name: Generate release notes
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
SDK_PATH="packages/browseros-agent/packages/agent-sdk"
CURRENT_TAG="agent-sdk-v${{ steps.version.outputs.version }}"
# Find the previous tag, excluding the current version's tag
# (which may already exist from a prior failed run)
PREV_TAG=$(git tag -l "agent-sdk-v*" --sort=-v:refname | grep -v "^${CURRENT_TAG}$" | head -n 1)
if [ -z "$PREV_TAG" ]; then
echo "Initial release" > /tmp/release-notes.md
else
# Get commits scoped to the SDK directory
COMMITS=$(git log "$PREV_TAG"..HEAD --pretty=format:"%H" -- "$SDK_PATH")
if [ -z "$COMMITS" ]; then
echo "No notable changes." > /tmp/release-notes.md
else
echo "## What's Changed" > /tmp/release-notes.md
echo "" >> /tmp/release-notes.md
# For each commit, find the associated PR and format with author
CONTRIBUTORS=""
while IFS= read -r SHA; do
# Get commit subject and author
SUBJECT=$(git log -1 --pretty=format:"%s" "$SHA")
AUTHOR=$(git log -1 --pretty=format:"%an" "$SHA")
GITHUB_USER=$(gh api "/repos/${{ github.repository }}/commits/${SHA}" --jq '.author.login // empty' 2>/dev/null)
# Find associated PR number
PR_NUM=$(gh api "/repos/${{ github.repository }}/commits/${SHA}/pulls" --jq '.[0].number // empty' 2>/dev/null)
# Format line: skip PR number if already in the commit subject
# (squash merges include "(#123)" in the subject automatically)
if [ -n "$PR_NUM" ] && ! echo "$SUBJECT" | grep -qF "(#${PR_NUM})"; then
echo "- ${SUBJECT} (#${PR_NUM})" >> /tmp/release-notes.md
else
echo "- ${SUBJECT}" >> /tmp/release-notes.md
fi
done <<< "$COMMITS"
fi
fi
working-directory: ${{ github.workspace }}
- name: Publish
run: npm publish --access public
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Create GitHub release
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
TAG="agent-sdk-v${{ steps.version.outputs.version }}"
RELEASE_SHA="${{ steps.version.outputs.release_sha }}"
TITLE="@browseros-ai/agent-sdk v${{ steps.version.outputs.version }}"
# Create or reuse tag (idempotent for re-runs)
if git rev-parse "$TAG" >/dev/null 2>&1; then
echo "Tag $TAG already exists, skipping tag creation"
else
git tag "$TAG" "$RELEASE_SHA"
fi
# Push tag (skip if already on remote)
if git ls-remote --tags origin "$TAG" | grep -q "$TAG"; then
echo "Tag $TAG already on remote, skipping push"
else
git push origin "$TAG"
fi
# Create or update release
if gh release view "$TAG" >/dev/null 2>&1; then
echo "Release $TAG already exists, updating"
gh release edit "$TAG" --title "$TITLE" --notes-file /tmp/release-notes.md
else
gh release create "$TAG" --title "$TITLE" --notes-file /tmp/release-notes.md
fi
working-directory: ${{ github.workspace }}
- name: Update CHANGELOG.md via PR
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
VERSION="${{ steps.version.outputs.version }}"
DATE=$(date -u +"%Y-%m-%d")
BRANCH="docs/agent-sdk-changelog-v${VERSION}"
CHANGELOG="packages/browseros-agent/packages/agent-sdk/CHANGELOG.md"
# Return to main before branching
git checkout main
# Use head/tail to safely insert without sed quoting issues
{
head -n 1 "$CHANGELOG"
echo ""
echo "## v${VERSION} (${DATE})"
echo ""
cat /tmp/release-notes.md
echo ""
tail -n +2 "$CHANGELOG"
} > /tmp/new-changelog.md
mv /tmp/new-changelog.md "$CHANGELOG"
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git checkout -b "$BRANCH"
git add "$CHANGELOG"
git commit -m "docs: update agent-sdk changelog for v${VERSION}"
git push origin "$BRANCH"
gh pr create \
--title "docs: update agent-sdk changelog for v${VERSION}" \
--body "Auto-generated changelog update for @browseros-ai/agent-sdk v${VERSION}." \
--base main \
--head "$BRANCH"
gh pr merge "$BRANCH" --squash --auto || true
working-directory: ${{ github.workspace }}

View File

@@ -8,18 +8,13 @@ on:
required: true
type: string
concurrency:
group: release-cli
cancel-in-progress: false
permissions:
contents: write
jobs:
release:
if: github.ref == 'refs/heads/main'
runs-on: ubuntu-latest
environment: release-core
permissions:
contents: write
pull-requests: write
defaults:
run:
working-directory: packages/browseros-agent/apps/cli
@@ -33,135 +28,23 @@ jobs:
with:
go-version-file: packages/browseros-agent/apps/cli/go.mod
- uses: oven-sh/setup-bun@v2
with:
bun-version: "1.3.6"
- name: Run tests
run: go test ./... -v
- name: Run vet
run: go vet ./...
- name: Build all platforms
- name: Create tag
run: |
VERSION="${{ inputs.version }}"
LDFLAGS="-s -w -X main.version=${VERSION}"
DIST="dist"
mkdir -p "$DIST"
for pair in darwin/amd64 darwin/arm64 linux/amd64 linux/arm64 windows/amd64 windows/arm64; do
OS="${pair%/*}"
ARCH="${pair#*/}"
BIN="browseros-cli"
EXT=""
if [ "$OS" = "windows" ]; then EXT=".exe"; fi
echo "Building ${OS}/${ARCH}..."
GOOS=$OS GOARCH=$ARCH CGO_ENABLED=0 go build -trimpath -ldflags "$LDFLAGS" -o "${DIST}/${BIN}${EXT}" .
ARCHIVE="browseros-cli_${VERSION}_${OS}_${ARCH}"
if [ "$OS" = "windows" ]; then
(cd "$DIST" && zip "${ARCHIVE}.zip" "${BIN}${EXT}")
else
(cd "$DIST" && tar czf "${ARCHIVE}.tar.gz" "${BIN}")
fi
rm "${DIST}/${BIN}${EXT}"
done
(cd "$DIST" && sha256sum *.tar.gz *.zip > checksums.txt)
echo "=== Built artifacts ==="
ls -lh "$DIST"
- name: Install dependencies
run: bun install
working-directory: packages/browseros-agent
- name: Upload to CDN
env:
R2_ACCOUNT_ID: ${{ secrets.R2_ACCOUNT_ID }}
R2_ACCESS_KEY_ID: ${{ secrets.R2_ACCESS_KEY_ID }}
R2_SECRET_ACCESS_KEY: ${{ secrets.R2_SECRET_ACCESS_KEY }}
R2_BUCKET: ${{ secrets.R2_BUCKET }}
R2_UPLOAD_PREFIX: cli
CLI_VERSION: ${{ inputs.version }}
run: |
bun scripts/build/cli.ts \
--release \
--version "$CLI_VERSION" \
--binaries-dir apps/cli/dist
working-directory: packages/browseros-agent
- name: Generate release notes
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
CLI_PATH="packages/browseros-agent/apps/cli"
TAG="browseros-cli-v${{ inputs.version }}"
CHANGELOG_FILE="/tmp/release-changelog.md"
PREV_TAG=$(git tag -l "browseros-cli-v*" --sort=-v:refname | grep -v "^${TAG}$" | head -n 1)
if [ -z "$PREV_TAG" ]; then
echo "Initial release of browseros-cli." > "$CHANGELOG_FILE"
else
COMMITS=$(git log "$PREV_TAG"..HEAD --pretty=format:"%H" -- "$CLI_PATH")
if [ -z "$COMMITS" ]; then
echo "No notable changes." > "$CHANGELOG_FILE"
else
echo "## What's Changed" > "$CHANGELOG_FILE"
echo "" >> "$CHANGELOG_FILE"
while IFS= read -r SHA; do
SUBJECT=$(git log -1 --pretty=format:"%s" "$SHA")
PR_NUM=$(gh api "/repos/${{ github.repository }}/commits/${SHA}/pulls" --jq '.[0].number // empty' 2>/dev/null)
if [ -n "$PR_NUM" ] && ! echo "$SUBJECT" | grep -qF "(#${PR_NUM})"; then
echo "- ${SUBJECT} (#${PR_NUM})" >> "$CHANGELOG_FILE"
else
echo "- ${SUBJECT}" >> "$CHANGELOG_FILE"
fi
done <<< "$COMMITS"
fi
fi
cat "$CHANGELOG_FILE" > /tmp/release-notes.md
cat >> /tmp/release-notes.md <<'EOF'
## Install `browseros-cli`
### macOS / Linux
```bash
curl -fsSL https://cdn.browseros.com/cli/install.sh | bash
```
### Windows
```powershell
irm https://cdn.browseros.com/cli/install.ps1 | iex
```
After install, run `browseros-cli init` to point the CLI at your BrowserOS MCP server.
EOF
working-directory: ${{ github.workspace }}
- name: Create tag and release
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
TAG="browseros-cli-v${{ inputs.version }}"
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git tag -a "cli/v${{ inputs.version }}" -m "browseros-cli v${{ inputs.version }}"
git push origin "cli/v${{ inputs.version }}"
if ! git rev-parse "$TAG" >/dev/null 2>&1; then
git tag -a "$TAG" -m "browseros-cli v${{ inputs.version }}"
git push origin "$TAG"
fi
CLI_DIST="packages/browseros-agent/apps/cli/dist"
gh release create "$TAG" \
--title "browseros-cli v${{ inputs.version }}" \
--notes-file /tmp/release-notes.md \
${CLI_DIST}/*
working-directory: ${{ github.workspace }}
- uses: goreleaser/goreleaser-action@v6
with:
version: "~> v2"
args: release --clean
workdir: packages/browseros-agent/apps/cli
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

214
README.md
View File

@@ -6,7 +6,6 @@
[![Slack](https://img.shields.io/badge/Slack-Join%20us-4A154B?logo=slack&logoColor=white)](https://dub.sh/browserOS-slack)
[![Twitter](https://img.shields.io/twitter/follow/browserOS_ai?style=social)](https://twitter.com/browseros_ai)
[![License: AGPL v3](https://img.shields.io/badge/License-AGPL%20v3-blue.svg)](LICENSE)
[![Docs](https://img.shields.io/badge/Docs-docs.browseros.com-blue)](https://docs.browseros.com)
<br></br>
<a href="https://files.browseros.com/download/BrowserOS.dmg">
<img src="https://img.shields.io/badge/Download-macOS-black?style=flat&logo=apple&logoColor=white" alt="Download for macOS (beta)" />
@@ -23,183 +22,146 @@
<br />
</div>
BrowserOS is an open-source Chromium fork that runs AI agents natively. **The privacy-first alternative to ChatGPT Atlas, Perplexity Comet, and Dia.**
##
🌐 BrowserOS is an open-source Chromium fork that runs AI agents natively. **The privacy-first alternative to ChatGPT Atlas, Perplexity Comet, and Dia.**
Use your own API keys or run local models with Ollama. Your data never leaves your machine.
🔒 Use your own API keys or run local models with Ollama. Your data never leaves your machine.
> **[Documentation](https://docs.browseros.com)** · **[Discord](https://discord.gg/YKwjt5vuKr)** · **[Slack](https://dub.sh/browserOS-slack)** · **[Twitter](https://x.com/browserOS_ai)** · **[Feature Requests](https://github.com/browseros-ai/BrowserOS/issues/99)**
💡 Join our [Discord](https://discord.gg/YKwjt5vuKr) or [Slack](https://dub.sh/browserOS-slack) and help us build! Have feature requests? [Suggest here](https://github.com/browseros-ai/BrowserOS/issues/99).
## Quick Start
## Quick start
1. **Download and install** BrowserOS — [macOS](https://files.browseros.com/download/BrowserOS.dmg) · [Windows](https://files.browseros.com/download/BrowserOS_installer.exe) · [Linux (AppImage)](https://files.browseros.com/download/BrowserOS.AppImage) · [Linux (Debian)](https://cdn.browseros.com/download/BrowserOS.deb)
2. **Import your Chrome data** (optional) — bookmarks, passwords, extensions all carry over
3. **Connect your AI provider** — Claude, OpenAI, Gemini, ChatGPT Pro via OAuth, or local models via Ollama/LM Studio
1. Download and install BrowserOS:
- [macOS](https://files.browseros.com/download/BrowserOS.dmg)
- [Windows](https://files.browseros.com/download/BrowserOS_installer.exe)
- [Linux (AppImage)](https://files.browseros.com/download/BrowserOS.AppImage)
- [Linux (Debian)](https://cdn.browseros.com/download/BrowserOS.deb)
## Features
2. Import your Chrome data (optional)
| Feature | Description | Docs |
|---------|-------------|------|
| **AI Agent** | 53+ browser automation tools — navigate, click, type, extract data, all with natural language | [Guide](https://docs.browseros.com/getting-started) |
| **MCP Server** | Control the browser from Claude Code, Gemini CLI, or any MCP client | [Setup](https://docs.browseros.com/features/use-with-claude-code) |
| **Workflows** | Build repeatable browser automations with a visual graph builder | [Docs](https://docs.browseros.com/features/workflows) |
| **Cowork** | Combine browser automation with local file operations — research the web, save reports to your folder | [Docs](https://docs.browseros.com/features/cowork) |
| **Scheduled Tasks** | Run agents on autopilot — daily, hourly, or every few minutes | [Docs](https://docs.browseros.com/features/scheduled-tasks) |
| **Memory** | Persistent memory across conversations — your assistant remembers context over time | [Docs](https://docs.browseros.com/features/memory) |
| **SOUL.md** | Define your AI's personality and instructions in a single markdown file | [Docs](https://docs.browseros.com/features/soul-md) |
| **LLM Hub** | Compare Claude, ChatGPT, and Gemini responses side-by-side on any page | [Docs](https://docs.browseros.com/features/llm-chat-hub) |
| **40+ App Integrations** | Gmail, Slack, GitHub, Linear, Notion, Figma, Salesforce, and more via MCP | [Docs](https://docs.browseros.com/features/connect-apps) |
| **Vertical Tabs** | Side-panel tab management — stay organized even with 100+ tabs open | [Docs](https://docs.browseros.com/features/vertical-tabs) |
| **Ad Blocking** | uBlock Origin + Manifest V2 support — [10x more protection](https://docs.browseros.com/features/ad-blocking) than Chrome | [Docs](https://docs.browseros.com/features/ad-blocking) |
| **Cloud Sync** | Sync browser config and agent history across devices | [Docs](https://docs.browseros.com/features/sync) |
| **Skills** | Custom instruction sets that shape how your AI assistant behaves | [Docs](https://docs.browseros.com/features/skills) |
| **Smart Nudges** | Contextual suggestions to connect apps and use features at the right moment | [Docs](https://docs.browseros.com/features/smart-nudges) |
3. Connect your AI provider — use Claude, OpenAI, Gemini, or local models via Ollama and LMStudio.
4. Start automating!
## What makes BrowserOS special
- 🏠 Feels like home — same Chrome interface, all your extensions just work
- 🤖 AI agents that run on YOUR browser, not in the cloud
- 🔒 Privacy first — bring your own keys or run local models with Ollama. Your browsing history stays on your machine
- 🤝 [BrowserOS as MCP server](https://docs.browseros.com/features/use-with-claude-code) — control the browser from `claude-code`, `gemini-cli`, or any MCP client (31 tools)
- 🔄 [Workflows](https://docs.browseros.com/features/workflows) — build repeatable browser automations with a visual graph builder
- 📂 [Cowork](https://docs.browseros.com/features/cowork) — combine browser automation with local file operations. Research the web, save reports to your folder
- ⏰ [Scheduled Tasks](https://docs.browseros.com/features/scheduled-tasks) — run the agent on autopilot, daily or every few minutes
- 💬 [LLM Hub](https://docs.browseros.com/features/llm-chat-hub) — compare Claude, ChatGPT, and Gemini side-by-side on any page
- 🛡️ Built-in ad blocker — [10x more protection than Chrome](https://docs.browseros.com/features/ad-blocking) with uBlock Origin + Manifest V2 support
- 🚀 100% open source under AGPL-3.0
## Demos
### BrowserOS agent in action
### 🤖 BrowserOS agent in action
[![BrowserOS agent in action](docs/videos/browserOS-agent-in-action.gif)](https://www.youtube.com/watch?v=SoSFev5R5dI)
<br/><br/>
### Install [BrowserOS as MCP](https://docs.browseros.com/features/use-with-claude-code) and control it from `claude-code`
### 🎇 Install [BrowserOS as MCP](https://docs.browseros.com/features/use-with-claude-code) and control it from `claude-code`
https://github.com/user-attachments/assets/c725d6df-1a0d-40eb-a125-ea009bf664dc
<br/><br/>
### Use BrowserOS to chat
### 💬 Use BrowserOS to chat
https://github.com/user-attachments/assets/726803c5-8e36-420e-8694-c63a2607beca
<br/><br/>
### Use BrowserOS to scrape data
### Use BrowserOS to scrape data
https://github.com/user-attachments/assets/9f038216-bc24-4555-abf1-af2adcb7ebc0
<br/><br/>
## Install `browseros-cli`
## Why We're Building BrowserOS
Use `browseros-cli` to launch and control BrowserOS from the terminal or from AI coding agents like Claude Code.
For the first time since Netscape pioneered the web in 1994, AI gives us the chance to completely reimagine the browser. We've seen tools like Cursor deliver 10x productivity gains for developers—yet everyday browsing remains frustratingly archaic.
**macOS / Linux:**
You're likely juggling 70+ tabs, battling your browser instead of having it assist you. Routine tasks, like ordering something from amazon or filling a form should be handled seamlessly by AI agents.
```bash
curl -fsSL https://cdn.browseros.com/cli/install.sh | bash
```
At BrowserOS, we're convinced that AI should empower you by automating tasks locally and securely—keeping your data private. We are building the best browser for this future!
**Windows:**
## How we compare
```powershell
irm https://cdn.browseros.com/cli/install.ps1 | iex
```
<details>
<summary><b>vs Chrome</b></summary>
<br>
While we're grateful for Google open-sourcing Chromium, but Chrome hasn't evolved much in 10 years. No AI features, no automation, no MCP support.
</details>
After install, run `browseros-cli init` to connect the CLI to your running BrowserOS instance.
<details>
<summary><b>vs Brave</b></summary>
<br>
We love what Brave started, but they've spread themselves too thin with crypto, search, VPNs. We're laser-focused on AI-powered browsing.
</details>
## LLM Providers
<details>
<summary><b>vs Arc/Dia</b></summary>
<br>
Many loved Arc, but it was closed source. When they abandoned users, there was no recourse. We're 100% open source - fork it anytime!
</details>
BrowserOS works with any LLM. Bring your own keys, use OAuth, or run models locally.
<details>
<summary><b>vs Perplexity Comet</b></summary>
<br>
They're a search/ad company. Your browser history becomes their product. We keep everything local.
</details>
| Provider | Type | Auth |
|----------|------|------|
| Kimi K2.5 | Cloud (default) | Built-in |
| ChatGPT Pro/Plus | Cloud | [OAuth](https://docs.browseros.com/features/chatgpt) |
| GitHub Copilot | Cloud | [OAuth](https://docs.browseros.com/features/github-copilot) |
| Qwen Code | Cloud | [OAuth](https://docs.browseros.com/features/qwen-code) |
| Claude (Anthropic) | Cloud | API key |
| GPT-4o / o3 (OpenAI) | Cloud | API key |
| Gemini (Google) | Cloud | API key |
| Azure OpenAI | Cloud | API key |
| AWS Bedrock | Cloud | IAM credentials |
| OpenRouter | Cloud | API key |
| Ollama | Local | [Setup](https://docs.browseros.com/features/ollama) |
| LM Studio | Local | [Setup](https://docs.browseros.com/features/lm-studio) |
## How We Compare
| | BrowserOS | Chrome | Brave | Dia | Comet | Atlas |
|---|:---:|:---:|:---:|:---:|:---:|:---:|
| Open Source | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ |
| AI Agent | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ |
| MCP Server | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Visual Workflows | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Cowork (files + browser) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Scheduled Tasks | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Bring Your Own Keys | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ |
| Local Models (Ollama) | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ |
| Local-first Privacy | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ |
| Ad Blocking (MV2) | ✅ | ❌ | ✅ | ❌ | ✅ | ❌ |
**Detailed comparisons:**
- [BrowserOS vs Chrome DevTools MCP](https://docs.browseros.com/comparisons/chrome-devtools-mcp) — developer-focused comparison for browser automation
- [BrowserOS vs Claude Cowork](https://docs.browseros.com/comparisons/claude-cowork) — getting real work done with AI
- [BrowserOS vs OpenClaw](https://docs.browseros.com/comparisons/openclaw) — everyday AI assistance
## Architecture
BrowserOS is a monorepo with two main subsystems: the **browser** (Chromium fork) and the **agent platform** (TypeScript/Go).
```
BrowserOS/
├── packages/browseros/ # Chromium fork + build system (Python)
│ ├── chromium_patches/ # Patches applied to Chromium source
│ ├── build/ # Build CLI and modules
│ └── resources/ # Icons, entitlements, signing
├── packages/browseros-agent/ # Agent platform (TypeScript/Go)
│ ├── apps/
│ │ ├── server/ # MCP server + AI agent loop (Bun)
│ │ ├── agent/ # Browser extension UI (WXT + React)
│ │ ├── cli/ # CLI tool (Go)
│ │ ├── eval/ # Benchmark framework
│ │ └── controller-ext/ # Chrome API bridge extension
│ │
│ └── packages/
│ ├── agent-sdk/ # Node.js SDK (npm: @browseros-ai/agent-sdk)
│ ├── cdp-protocol/ # CDP type bindings
│ └── shared/ # Shared constants
```
| Package | What it does |
|---------|-------------|
| [`packages/browseros`](packages/browseros/) | Chromium fork — patches, build system, signing |
| [`apps/server`](packages/browseros-agent/apps/server/) | Bun server exposing 53+ MCP tools and running the AI agent loop |
| [`apps/agent`](packages/browseros-agent/apps/agent/) | Browser extension — new tab, side panel chat, onboarding, settings |
| [`apps/cli`](packages/browseros-agent/apps/cli/) | Go CLI — control BrowserOS from the terminal or AI coding agents |
| [`apps/eval`](packages/browseros-agent/apps/eval/) | Benchmark framework — WebVoyager, Mind2Web evaluation |
| [`agent-sdk`](packages/browseros-agent/packages/agent-sdk/) | Node.js SDK for browser automation with natural language |
| [`cdp-protocol`](packages/browseros-agent/packages/cdp-protocol/) | Type-safe Chrome DevTools Protocol bindings |
<details>
<summary><b>vs ChatGPT Atlas</b></summary>
<br>
Your browsing data could be used for ads or to train their models. We keep your history and agent interactions strictly local.
</details>
## Contributing
We'd love your help making BrowserOS better! See our [Contributing Guide](CONTRIBUTING.md) for details.
We'd love your help making BrowserOS better!
- [Report bugs](https://github.com/browseros-ai/BrowserOS/issues)
- [Suggest features](https://github.com/browseros-ai/BrowserOS/issues/99)
- [Join Discord](https://discord.gg/YKwjt5vuKr) · [Join Slack](https://dub.sh/browserOS-slack)
- [Follow on Twitter](https://x.com/browserOS_ai)
**Agent development** (TypeScript/Go) — see the [agent monorepo README](packages/browseros-agent/README.md) for setup instructions.
**Browser development** (C++/Python) — requires ~100GB disk space. See [`packages/browseros`](packages/browseros/) for build instructions.
## Credits
- [ungoogled-chromium](https://github.com/ungoogled-software/ungoogled-chromium) — BrowserOS uses some patches for enhanced privacy. Thanks to everyone behind this project!
- [The Chromium Project](https://www.chromium.org/) — at the core of BrowserOS, making it possible to exist in the first place.
- 🐛 [Report bugs](https://github.com/browseros-ai/BrowserOS/issues)
- 💡 [Suggest features](https://github.com/browseros-ai/BrowserOS/issues/99)
- 💬 [Join Discord](https://discord.gg/YKwjt5vuKr)
- 🐦 [Follow on Twitter](https://x.com/browserOS_ai)
## License
BrowserOS is open source under the [AGPL-3.0 license](LICENSE).
## Credits
- [ungoogled-chromium](https://github.com/ungoogled-software/ungoogled-chromium) - BrowserOS uses some patches for enhanced privacy. Thanks to everyone behind this project!
- [The Chromium Project](https://www.chromium.org/) - At the core of BrowserOS, making it possible to exist in the first place.
## Citation
If you use BrowserOS in your research or project, please cite:
```bibtex
@software{browseros2025,
author = {Sonti, Nithin and Sonti, Nikhil and {BrowserOS-team}},
title = {BrowserOS: The open-source Agentic browser},
url = {https://github.com/browseros-ai/BrowserOS},
year = {2025},
publisher = {GitHub},
license = {AGPL-3.0},
}
```
Copyright &copy; 2025 Felafax, Inc.
## Stargazers
Thank you to all our supporters!
[![Star History Chart](https://api.star-history.com/svg?repos=browseros-ai/BrowserOS&type=Date)](https://www.star-history.com/#browseros-ai/BrowserOS&Date)
##
<p align="center">
Built with ❤️ from San Francisco
</p>

View File

@@ -195,4 +195,3 @@ test-results/
.agent/
.llm/
.grove/
docs/plans/2026-03-24-models-dev-integration.md

View File

@@ -81,9 +81,6 @@ bun run dev:server # Build server for development
bun run dev:ext # Build extension for development
bun run dist:server # Build server for production (all targets)
bun run dist:ext # Build extension for production
# Refresh models.dev data
bun run generate:models # Fetches latest from models.dev/api.json
```
## Architecture

View File

@@ -1,6 +1,8 @@
# BrowserOS Agent
The agent platform powering [BrowserOS](https://github.com/browseros-ai/BrowserOS) — contains the MCP server, agent UI, CLI, evaluation framework, and SDK.
Monorepo for the BrowserOS-agent -- contains 3 packages: agent-UI, server (which contains the agent loop) and controller-extension (which is used by the tools within the agent loop).
> **⚠️ NOTE:** This is only a submodule, the main project is at -- https://github.com/browseros-ai/BrowserOS
## Monorepo Structure
@@ -8,25 +10,17 @@ The agent platform powering [BrowserOS](https://github.com/browseros-ai/BrowserO
apps/
server/ # Bun server - MCP endpoints + agent loop
agent/ # Agent UI (Chrome extension)
cli/ # Go CLI for controlling BrowserOS from the terminal
eval/ # Evaluation framework for benchmarking agents
controller-ext/ # BrowserOS Controller (Chrome extension for chrome.* APIs)
packages/
agent-sdk/ # Node.js SDK (@browseros-ai/agent-sdk)
cdp-protocol/ # Type-safe Chrome DevTools Protocol bindings
shared/ # Shared constants (ports, timeouts, limits)
```
| Package | Description |
|---------|-------------|
| `apps/server` | Bun server exposing MCP tools and running the agent loop |
| `apps/agent` | Agent UI Chrome extension for the chat interface |
| `apps/cli` | Go CLI — control BrowserOS from the terminal or AI coding agents |
| `apps/eval` | Benchmark framework — WebVoyager, Mind2Web evaluation |
| `apps/controller-ext` | BrowserOS Controller — bridges `chrome.*` APIs to the server via WebSocket |
| `packages/agent-sdk` | Node.js SDK for browser automation with natural language |
| `packages/cdp-protocol` | Auto-generated CDP type bindings used by the server |
| `apps/agent` | Agent UI - Chrome extension for the chat interface |
| `apps/controller-ext` | BrowserOS Controller - Chrome extension that bridges `chrome.*` APIs (tabs, bookmarks, history) to the server via WebSocket |
| `packages/shared` | Shared constants used across packages |
## Architecture

View File

@@ -1,6 +0,0 @@
# BrowserOS Agent Extension
## v0.0.52 (2026-03-26)
Initial release

View File

@@ -1,24 +1,16 @@
# BrowserOS Agent Extension
# BrowserOS Agent Chrome Extension
[![License: AGPL v3](https://img.shields.io/badge/License-AGPL%20v3-blue.svg)](../../../../LICENSE)
The built-in browser extension that powers BrowserOS's AI interface — new tab with unified search, side panel chat, onboarding, and settings. Built with [WXT](https://wxt.dev) and React.
> For user-facing feature documentation, see [docs.browseros.com](https://docs.browseros.com).
The official Chrome extension for BrowserOS Agent, providing the UI layer for interacting with BrowserOS Core and Controllers. This extension enables intelligent browser automation, AI-powered search, and seamless integration with multiple LLM providers.
## Features
- **AI-Powered New Tab**: Custom new tab page with unified search across Google and AI assistants
- **Side Panel Chat**: Full-featured chat interface for interacting with BrowserOS
- **Side Panel Chat**: Full-featured chat interface for interacting with BrowserOS Core
- **Multi-Provider Support**: Connect to various LLM providers (OpenAI, Anthropic, Azure, Bedrock, and more)
- **MCP Integration**: Model Context Protocol support for extending AI capabilities
- **Visual Feedback**: Animated glow effect on tabs during AI agent operations
- **Privacy-First**: Local data handling with configurable provider settings
## How It Connects
The extension communicates with the [BrowserOS Server](../../apps/server/) running locally. The server handles the AI agent loop, MCP tools, and CDP connections — the extension provides the UI layer.
## Project Structure
```
@@ -88,20 +80,47 @@ Settings dashboard with multiple sections:
Content script that creates a visual indicator (pulsing orange glow) around the browser viewport when an AI agent is actively working on a tab.
## How Tools Are Used
### Bun
Bun is the exclusive runtime and package manager:
- All scripts use `bun run <script>` instead of npm
- Package installation via `bun install`
- Environment files automatically loaded (no dotenv needed)
- Enforced via `engines` field in `package.json`
```bash
bun install # Install dependencies
bun run dev # Development mode
bun run build # Production build
bun run lint # Run Biome linting
```
### Biome
Unified linter and formatter configured in `biome.json`:
- **Formatting**: 2-space indentation, single quotes, no semicolons
- **Linting**: Recommended rules plus custom rules for unused imports/variables
- **CSS Support**: Tailwind directives parsing enabled
- **Import Organization**: Automatic import sorting via assist actions
```bash
bun run lint # Check for issues
bun run lint:fix # Auto-fix issues
```
## Development
### Prerequisites
- [Bun](https://bun.sh) installed
- Chrome or Chromium-based browser
- BrowserOS Server running locally (for full functionality)
- BrowserOS Core running locally (for full functionality)
### Setup
```bash
# Copy environment file
cp .env.example .env.development
# Install dependencies
bun install
@@ -134,30 +153,12 @@ SENTRY_AUTH_TOKEN=your-token
### GraphQL Schema
Codegen requires a GraphQL schema. By default it uses the bundled `schema/schema.graphql`, so no extra setup is needed. If you have access to the original API source, you can set the following environment variable:
Codegen requires a GraphQL schema. By default it uses the bundled `schema/schema.graphql`, so no extra setup is needed. If you have access to the original API source, you can set the following environment variable
```env
GRAPHQL_SCHEMA_PATH=/path/to/api-repo/.../schema.graphql
```
## Development Tooling
### Bun
Bun is the exclusive runtime and package manager:
- All scripts use `bun run <script>` instead of npm
- Package installation via `bun install`
- Environment files automatically loaded (no dotenv needed)
- Enforced via `engines` field in `package.json`
### Biome
Unified linter and formatter configured in `biome.json`:
- **Formatting**: 2-space indentation, single quotes, no semicolons
- **Linting**: Recommended rules plus custom rules for unused imports/variables
- **CSS Support**: Tailwind directives parsing enabled
- **Import Organization**: Automatic import sorting via assist actions
## Scripts
| Script | Description |
@@ -168,5 +169,4 @@ Unified linter and formatter configured in `biome.json`:
| `bun run lint` | Run Biome linter |
| `bun run lint:fix` | Auto-fix linting issues |
| `bun run typecheck` | Run TypeScript type checking |
| `bun run codegen` | Generate GraphQL types |
| `bun run clean:cache` | Clear build caches |

View File

@@ -66,7 +66,7 @@ export const RunResultDialog: FC<RunResultDialogProps> = ({
return (
<Dialog open={!!run} onOpenChange={onOpenChange}>
<DialogContent className="sm:w-[70vw] sm:max-w-4xl">
<DialogContent className="sm:max-w-2xl">
<DialogHeader>
<DialogTitle className="flex items-center gap-2">
{run.status === 'completed' ? (
@@ -94,7 +94,7 @@ export const RunResultDialog: FC<RunResultDialogProps> = ({
<p className="text-destructive text-sm">{run.result}</p>
</div>
) : run.result ? (
<div className="prose prose-sm dark:prose-invert [&_[data-streamdown='code-block']]:!w-full [&_[data-streamdown='table-wrapper']]:!w-full max-w-none break-words rounded-lg border border-border bg-muted/50 p-4 [&_[data-streamdown='table-wrapper']]:overflow-x-auto">
<div className="prose prose-sm dark:prose-invert [&_[data-streamdown='code-block']]:!w-full [&_[data-streamdown='table-wrapper']]:!w-full max-w-none break-words rounded-lg border border-border bg-muted/50 p-4">
<MessageResponse>{run.result}</MessageResponse>
</div>
) : (

View File

@@ -14,7 +14,7 @@ export const CreditBadge: FC<CreditBadgeProps> = ({ credits, onClick }) => {
type="button"
onClick={onClick}
className={cn(
'inline-flex cursor-pointer items-center gap-1 rounded-md px-1.5 py-0.5 font-medium text-xs transition-colors hover:bg-muted/50',
'inline-flex items-center gap-1 rounded-md px-1.5 py-0.5 font-medium text-xs transition-colors hover:bg-muted/50',
getCreditTextColor(credits),
)}
title={`${credits} credits remaining`}

View File

@@ -17,7 +17,7 @@ export const McpPromoBanner: FC = () => {
}
return (
<div className="flex items-center gap-4 rounded-xl border border-border bg-card p-4 shadow-sm transition-all hover:shadow-md">
<div className="relative flex items-center gap-4 rounded-xl border border-border bg-card p-4 shadow-sm transition-all hover:shadow-md">
<div className="flex h-10 w-10 shrink-0 items-center justify-center rounded-lg bg-[var(--accent-orange)]/10">
<Server className="h-5 w-5 text-[var(--accent-orange)]" />
</div>
@@ -48,7 +48,7 @@ export const McpPromoBanner: FC = () => {
<button
type="button"
onClick={() => setDismissed(true)}
className="shrink-0 rounded-sm p-1 text-muted-foreground opacity-50 transition-opacity hover:opacity-100"
className="absolute top-2 right-2 rounded-sm p-1 text-muted-foreground opacity-50 transition-opacity hover:opacity-100"
>
<X className="h-3.5 w-3.5" />
</button>

View File

@@ -1,13 +1,6 @@
import { zodResolver } from '@hookform/resolvers/zod'
import {
CheckCircle2,
ChevronDown,
ExternalLink,
Loader2,
SearchIcon,
XCircle,
} from 'lucide-react'
import { type FC, useEffect, useRef, useState } from 'react'
import { CheckCircle2, ExternalLink, Loader2, XCircle } from 'lucide-react'
import { type FC, useEffect, useState } from 'react'
import { useForm } from 'react-hook-form'
import { z } from 'zod/v3'
import { Button } from '@/components/ui/button'
@@ -54,12 +47,7 @@ import {
import { type TestResult, testProvider } from '@/lib/llm-providers/testProvider'
import type { LlmProviderConfig, ProviderType } from '@/lib/llm-providers/types'
import { track } from '@/lib/metrics/track'
import { cn } from '@/lib/utils'
import {
getModelContextLength,
getModelsForProvider,
type ModelInfo,
} from './models'
import { getModelContextLength, getModelOptions } from './models'
const providerTypeEnum = z.enum([
'moonshot',
@@ -175,107 +163,6 @@ export const providerFormSchema = z
*/
export type ProviderFormValues = z.infer<typeof providerFormSchema>
function formatContextWindow(tokens: number): string {
if (tokens >= 1000000)
return `${(tokens / 1000000).toFixed(tokens % 1000000 === 0 ? 0 : 1)}M`
if (tokens >= 1000) return `${Math.round(tokens / 1000)}K`
return `${tokens}`
}
function ModelPickerList({
models,
selectedModelId,
onSelect,
onCustomSubmit,
onClose,
}: {
models: ModelInfo[]
selectedModelId: string
onSelect: (modelId: string) => void
onCustomSubmit: (modelId: string) => void
onClose: () => void
}) {
const [search, setSearch] = useState('')
const inputRef = useRef<HTMLInputElement>(null)
const containerRef = useRef<HTMLDivElement>(null)
useEffect(() => {
inputRef.current?.focus()
}, [])
useEffect(() => {
const handleClickOutside = (e: MouseEvent) => {
if (
containerRef.current &&
!containerRef.current.contains(e.target as Node)
) {
onClose()
}
}
document.addEventListener('mousedown', handleClickOutside)
return () => document.removeEventListener('mousedown', handleClickOutside)
}, [onClose])
const query = search.toLowerCase()
const filtered = query
? models.filter((m) => m.modelId.toLowerCase().includes(query))
: models
const handleKeyDown = (e: React.KeyboardEvent) => {
if (e.key === 'Enter' && search) {
e.preventDefault()
onCustomSubmit(search)
}
if (e.key === 'Escape') {
onClose()
}
}
return (
<div ref={containerRef} className="rounded-md border">
<div className="flex items-center gap-2 border-b px-3">
<SearchIcon className="h-4 w-4 shrink-0 text-muted-foreground opacity-50" />
<input
ref={inputRef}
type="text"
value={search}
onChange={(e) => setSearch(e.target.value)}
onKeyDown={handleKeyDown}
placeholder="Search or type a custom model ID..."
className="flex h-9 w-full bg-transparent py-2 text-sm outline-none placeholder:text-muted-foreground"
/>
</div>
<div className="max-h-[200px] overflow-y-auto">
{filtered.length > 0 ? (
filtered.map((model) => {
const isSelected = selectedModelId === model.modelId
return (
<button
key={model.modelId}
type="button"
onClick={() => onSelect(model.modelId)}
className={cn(
'flex w-full items-center justify-between px-3 py-2 text-left text-sm transition-colors hover:bg-accent',
isSelected && 'bg-accent font-medium',
)}
>
<span className="truncate">{model.modelId}</span>
<span className="ml-2 shrink-0 rounded-md bg-muted px-1.5 py-0.5 font-mono text-[10px] text-muted-foreground">
{formatContextWindow(model.contextLength)}
</span>
</button>
)
})
) : (
<div className="px-3 py-6 text-center text-muted-foreground text-sm">
No models match. Press Enter to use &quot;{search}&quot;
</div>
)}
</div>
</div>
)
}
/**
* Props for NewProviderDialog
* @public
@@ -301,9 +188,9 @@ export const NewProviderDialog: FC<NewProviderDialogProps> = ({
initialValues,
onSave,
}) => {
const [isCustomModel, setIsCustomModel] = useState(false)
const [isTesting, setIsTesting] = useState(false)
const [testResult, setTestResult] = useState<TestResult | null>(null)
const [modelListOpen, setModelListOpen] = useState(false)
const { supports } = useCapabilities()
const { baseUrl: agentServerUrl } = useAgentServerUrl()
const kimiLaunch = useKimiLaunch()
@@ -374,7 +261,8 @@ export const NewProviderDialog: FC<NewProviderDialogProps> = ({
watchedSessionToken,
])
const modelInfoList = getModelsForProvider(watchedType as ProviderType)
// Get model options for current provider type
const modelOptions = getModelOptions(watchedType as ProviderType)
// Handle provider type change (user-initiated via Select)
const handleTypeChange = (newType: ProviderType) => {
@@ -384,13 +272,14 @@ export const NewProviderDialog: FC<NewProviderDialogProps> = ({
form.setValue('baseUrl', defaultUrl)
}
form.setValue('modelId', '')
setIsCustomModel(false)
}
// Auto-fill context window when model changes (only for new providers)
useEffect(() => {
if (initialValues?.id) return
if (watchedModelId) {
if (watchedModelId && watchedModelId !== 'custom') {
const contextLength = getModelContextLength(
watchedType as ProviderType,
watchedModelId,
@@ -401,6 +290,17 @@ export const NewProviderDialog: FC<NewProviderDialogProps> = ({
}
}, [watchedModelId, watchedType, form, initialValues?.id])
// Handle model selection (including custom option)
const handleModelChange = (value: string) => {
if (value === 'custom') {
setIsCustomModel(true)
form.setValue('modelId', '')
} else {
setIsCustomModel(false)
form.setValue('modelId', value)
}
}
// Reset form when initialValues change
useEffect(() => {
if (initialValues) {
@@ -425,6 +325,7 @@ export const NewProviderDialog: FC<NewProviderDialogProps> = ({
reasoningEffort: initialValues.reasoningEffort || 'high',
reasoningSummary: initialValues.reasoningSummary || 'auto',
})
setIsCustomModel(false)
}
}, [initialValues, form])
@@ -451,6 +352,7 @@ export const NewProviderDialog: FC<NewProviderDialogProps> = ({
reasoningEffort: 'high',
reasoningSummary: 'auto',
})
setIsCustomModel(false)
}
// Clear test result when dialog opens/closes
setTestResult(null)
@@ -909,51 +811,52 @@ export const NewProviderDialog: FC<NewProviderDialogProps> = ({
control={form.control}
name="modelId"
render={({ field }) => (
<FormItem className="flex flex-col">
<FormItem>
<FormLabel>Model *</FormLabel>
{modelInfoList.length === 0 ? (
<FormControl>
<Input
placeholder={
watchedType === 'azure'
? 'Enter your deployment name'
: watchedType === 'bedrock'
? 'e.g., anthropic.claude-3-5-sonnet-20241022-v2:0'
: 'Enter model ID'
}
{...field}
/>
</FormControl>
) : modelListOpen ? (
<ModelPickerList
models={modelInfoList}
selectedModelId={field.value}
onSelect={(modelId) => {
form.setValue('modelId', modelId)
setModelListOpen(false)
}}
onCustomSubmit={(modelId) => {
form.setValue('modelId', modelId)
setModelListOpen(false)
}}
onClose={() => setModelListOpen(false)}
/>
) : (
<button
type="button"
onClick={() => setModelListOpen(true)}
className={cn(
'flex h-9 w-full items-center justify-between rounded-md border border-input bg-transparent px-3 py-1 text-sm shadow-xs',
field.value
? 'text-foreground'
: 'text-muted-foreground',
{isCustomModel || modelOptions.length === 1 ? (
<>
<FormControl>
<Input
placeholder={
watchedType === 'azure'
? 'Enter your deployment name'
: watchedType === 'bedrock'
? 'e.g., anthropic.claude-3-5-sonnet-20241022-v2:0'
: 'Enter custom model ID'
}
{...field}
/>
</FormControl>
{modelOptions.length > 1 && (
<Button
type="button"
variant="link"
size="sm"
className="h-auto p-0 text-xs"
onClick={() => setIsCustomModel(false)}
>
Back to model list
</Button>
)}
</>
) : (
<Select
onValueChange={handleModelChange}
value={field.value}
>
<span className="truncate">
{field.value || 'Select a model...'}
</span>
<ChevronDown className="ml-2 h-4 w-4 shrink-0 opacity-50" />
</button>
<FormControl>
<SelectTrigger className="w-full">
<SelectValue placeholder="Select a model" />
</SelectTrigger>
</FormControl>
<SelectContent>
{modelOptions.map((modelId) => (
<SelectItem key={modelId} value={modelId}>
{modelId === 'custom' ? '+ Custom model' : modelId}
</SelectItem>
))}
</SelectContent>
</Select>
)}
<FormMessage />
</FormItem>

View File

@@ -1,21 +1,98 @@
import {
getModelsDevModels,
type ModelsDevModel,
} from '@/lib/llm-providers/models-dev'
import type { ProviderType } from '@/lib/llm-providers/types'
/**
* Model information with context length
*/
export interface ModelInfo {
modelId: string
contextLength: number
supportsImages?: boolean
supportsReasoning?: boolean
supportsToolCall?: boolean
}
const CUSTOM_PROVIDER_MODELS: Partial<Record<ProviderType, ModelInfo[]>> = {
browseros: [{ modelId: 'browseros-auto', contextLength: 200000 }],
/**
* Models data organized by provider type (matches backend AIProvider enum)
*/
export interface ModelsData {
anthropic: ModelInfo[]
openai: ModelInfo[]
'openai-compatible': ModelInfo[]
google: ModelInfo[]
openrouter: ModelInfo[]
azure: ModelInfo[]
ollama: ModelInfo[]
lmstudio: ModelInfo[]
bedrock: ModelInfo[]
browseros: ModelInfo[]
moonshot: ModelInfo[]
'chatgpt-pro': ModelInfo[]
'github-copilot': ModelInfo[]
'qwen-code': ModelInfo[]
}
/**
* Available models per provider with context lengths
* Based on: https://github.com/browseros-ai/BrowserOS-agent/blob/main/src/options/data/models.ts
*/
export const MODELS_DATA: ModelsData = {
moonshot: [{ modelId: 'kimi-k2.5', contextLength: 200000 }],
anthropic: [
{ modelId: 'claude-opus-4-5-20251101', contextLength: 200000 },
{ modelId: 'claude-haiku-4-5-20251001', contextLength: 200000 },
{ modelId: 'claude-sonnet-4-5-20250929', contextLength: 200000 },
{ modelId: 'claude-sonnet-4-20250514', contextLength: 200000 },
{ modelId: 'claude-opus-4-20250514', contextLength: 200000 },
{ modelId: 'claude-3-7-sonnet-20250219', contextLength: 200000 },
{ modelId: 'claude-3-5-haiku-20241022', contextLength: 200000 },
],
openai: [
{ modelId: 'gpt-5.2', contextLength: 200000 },
{ modelId: 'gpt-5.2-pro', contextLength: 200000 },
{ modelId: 'gpt-5', contextLength: 200000 },
{ modelId: 'gpt-5-mini', contextLength: 200000 },
{ modelId: 'gpt-5-nano', contextLength: 200000 },
{ modelId: 'gpt-4.1', contextLength: 200000 },
{ modelId: 'gpt-4.1-mini', contextLength: 200000 },
{ modelId: 'o4-mini', contextLength: 200000 },
{ modelId: 'o3-mini', contextLength: 200000 },
{ modelId: 'gpt-4o', contextLength: 128000 },
{ modelId: 'gpt-4o-mini', contextLength: 128000 },
],
'openai-compatible': [],
ollama: [],
google: [
{ modelId: 'gemini-3-pro-preview', contextLength: 1048576 },
{ modelId: 'gemini-3-flash-preview', contextLength: 1048576 },
{ modelId: 'gemini-2.5-flash', contextLength: 1048576 },
{ modelId: 'gemini-2.5-pro', contextLength: 1048576 },
],
openrouter: [
{ modelId: 'google/gemini-3-pro-preview', contextLength: 1048576 },
{ modelId: 'google/gemini-3-flash-preview', contextLength: 1048576 },
{ modelId: 'google/gemini-2.5-flash', contextLength: 1048576 },
{ modelId: 'anthropic/claude-opus-4.5', contextLength: 200000 },
{ modelId: 'anthropic/claude-haiku-4.5', contextLength: 200000 },
{ modelId: 'anthropic/claude-sonnet-4.5', contextLength: 200000 },
{ modelId: 'anthropic/claude-sonnet-4', contextLength: 200000 },
{ modelId: 'anthropic/claude-3.7-sonnet', contextLength: 200000 },
{ modelId: 'openai/gpt-4o', contextLength: 128000 },
{ modelId: 'openai/gpt-oss-120b', contextLength: 128000 },
{ modelId: 'openai/gpt-oss-20b', contextLength: 128000 },
{ modelId: 'qwen/qwen3-14b', contextLength: 131072 },
{ modelId: 'qwen/qwen3-8b', contextLength: 131072 },
],
azure: [],
ollama: [
{ modelId: 'qwen3:4b', contextLength: 262144 },
{ modelId: 'qwen3:8b', contextLength: 40960 },
{ modelId: 'qwen3:14b', contextLength: 40960 },
{ modelId: 'gpt-oss:20b', contextLength: 128000 },
{ modelId: 'gpt-oss:120b', contextLength: 128000 },
],
lmstudio: [
{ modelId: 'openai/gpt-oss-20b', contextLength: 128000 },
{ modelId: 'openai/gpt-oss-120b', contextLength: 128000 },
{ modelId: 'qwen/qwen3-vl-8b', contextLength: 131072 },
],
bedrock: [],
browseros: [{ modelId: 'browseros-auto', contextLength: 200000 }],
'chatgpt-pro': [
{ modelId: 'gpt-5.4', contextLength: 400000 },
{ modelId: 'gpt-5.3-codex', contextLength: 400000 },
@@ -26,6 +103,32 @@ const CUSTOM_PROVIDER_MODELS: Partial<Record<ProviderType, ModelInfo[]>> = {
{ modelId: 'gpt-5.1-codex-mini', contextLength: 400000 },
{ modelId: 'gpt-5.1', contextLength: 200000 },
],
'github-copilot': [
// Free tier (unlimited with Pro)
{ modelId: 'gpt-5-mini', contextLength: 128000 },
{ modelId: 'claude-haiku-4.5', contextLength: 128000 },
{ modelId: 'gpt-4o', contextLength: 64000 },
{ modelId: 'gpt-4.1', contextLength: 64000 },
// Premium models (Pro: 300/mo, Pro+: 1500/mo)
{ modelId: 'claude-sonnet-4.6', contextLength: 128000 },
{ modelId: 'claude-sonnet-4.5', contextLength: 128000 },
{ modelId: 'claude-sonnet-4', contextLength: 128000 },
{ modelId: 'claude-opus-4.6', contextLength: 128000 },
{ modelId: 'claude-opus-4.5', contextLength: 128000 },
{ modelId: 'gemini-2.5-pro', contextLength: 128000 },
{ modelId: 'gemini-3-pro-preview', contextLength: 128000 },
{ modelId: 'gemini-3-flash-preview', contextLength: 128000 },
{ modelId: 'gemini-3.1-pro-preview', contextLength: 128000 },
{ modelId: 'gpt-5.4', contextLength: 272000 },
{ modelId: 'gpt-5.4-mini', contextLength: 128000 },
{ modelId: 'gpt-5.3-codex', contextLength: 272000 },
{ modelId: 'gpt-5.2-codex', contextLength: 272000 },
{ modelId: 'gpt-5.2', contextLength: 128000 },
{ modelId: 'gpt-5.1-codex', contextLength: 128000 },
{ modelId: 'gpt-5.1-codex-max', contextLength: 128000 },
{ modelId: 'gpt-5.1', contextLength: 128000 },
{ modelId: 'grok-code-fast-1', contextLength: 128000 },
],
'qwen-code': [
{ modelId: 'coder-model', contextLength: 1000000 },
{ modelId: 'qwen3-coder-plus', contextLength: 1000000 },
@@ -34,23 +137,25 @@ const CUSTOM_PROVIDER_MODELS: Partial<Record<ProviderType, ModelInfo[]>> = {
],
}
function fromModelsDevModel(m: ModelsDevModel): ModelInfo {
return {
modelId: m.id,
contextLength: m.contextWindow,
supportsImages: m.supportsImages,
supportsReasoning: m.supportsReasoning,
supportsToolCall: m.supportsToolCall,
}
}
/**
* Get models for a specific provider type
*/
export function getModelsForProvider(providerType: ProviderType): ModelInfo[] {
const custom = CUSTOM_PROVIDER_MODELS[providerType]
if (custom !== undefined) return custom
return getModelsDevModels(providerType).map(fromModelsDevModel)
return MODELS_DATA[providerType] || []
}
/**
* Get model options for select dropdown (model IDs + custom option)
*/
export function getModelOptions(providerType: ProviderType): string[] {
const models = getModelsForProvider(providerType)
const modelIds = models.map((m) => m.modelId)
return modelIds.length > 0 ? [...modelIds, 'custom'] : ['custom']
}
/**
* Get context length for a specific model
*/
export function getModelContextLength(
providerType: ProviderType,
modelId: string,
@@ -59,3 +164,14 @@ export function getModelContextLength(
const model = models.find((m) => m.modelId === modelId)
return model?.contextLength
}
/**
* Check if model ID is a custom (user-entered) value
*/
export function isCustomModel(
providerType: ProviderType,
modelId: string,
): boolean {
const models = getModelsForProvider(providerType)
return !models.some((m) => m.modelId === modelId)
}

View File

@@ -1,5 +1,5 @@
import { useQueryClient } from '@tanstack/react-query'
import { clear } from 'idb-keyval'
import localforage from 'localforage'
import { Loader2 } from 'lucide-react'
import type { FC } from 'react'
import { useEffect } from 'react'
@@ -25,7 +25,7 @@ export const LogoutPage: FC = () => {
await providersStorage.removeValue()
await scheduledJobStorage.removeValue()
queryClient.clear()
await clear()
await localforage.clear()
resetIdentity()
await signOut()

View File

@@ -169,15 +169,8 @@ export const NewTabChat: FC = () => {
onDismissJtbdPopup={() => {}}
/>
)}
{agentUrlError && (
<ChatError
error={agentUrlError}
providerType={selectedProvider?.type}
/>
)}
{chatError && (
<ChatError error={chatError} providerType={selectedProvider?.type} />
)}
{agentUrlError && <ChatError error={agentUrlError} />}
{chatError && <ChatError error={chatError} />}
</main>
<div className="mx-auto w-full max-w-3xl flex-shrink-0 px-4 pb-2">

View File

@@ -32,7 +32,6 @@ const RemoteChatHistory: FC<{ userId: string }> = ({ userId }) => {
const {
data: graphqlData,
isLoading: isLoadingConversations,
isFetching,
hasNextPage,
isFetchingNextPage,
fetchNextPage,
@@ -113,7 +112,6 @@ const RemoteChatHistory: FC<{ userId: string }> = ({ userId }) => {
hasNextPage={hasNextPage}
isFetchingNextPage={isFetchingNextPage}
onLoadMore={fetchNextPage}
isRefreshing={isFetching && !isLoadingConversations}
/>
)
}

View File

@@ -12,7 +12,6 @@ interface ConversationListProps {
hasNextPage?: boolean
isFetchingNextPage?: boolean
onLoadMore?: () => void
isRefreshing?: boolean
}
export const ConversationList: FC<ConversationListProps> = ({
@@ -22,7 +21,6 @@ export const ConversationList: FC<ConversationListProps> = ({
hasNextPage,
isFetchingNextPage,
onLoadMore,
isRefreshing,
}) => {
const loadMoreRef = useRef<HTMLDivElement>(null)
@@ -59,12 +57,6 @@ export const ConversationList: FC<ConversationListProps> = ({
return (
<main className="mt-4 flex h-full flex-1 flex-col space-y-4 overflow-y-auto">
<div className="w-full p-3">
{isRefreshing && (
<div className="flex items-center justify-center gap-2 pb-3 text-muted-foreground text-xs">
<Loader2 className="h-3 w-3 animate-spin" />
<span>Fetching latest conversations</span>
</div>
)}
{!hasConversations ? (
<div className="flex flex-col items-center justify-center py-12 text-center">
<MessageSquare className="mb-3 h-10 w-10 text-muted-foreground/50" />

View File

@@ -11,7 +11,7 @@ export const GetConversationsForHistoryDocument = graphql(`
nodes {
rowId
lastMessagedAt
conversationMessages(first: 2, orderBy: ORDER_INDEX_DESC) {
conversationMessages(last: 5, orderBy: ORDER_INDEX_ASC) {
nodes {
message
}

View File

@@ -224,12 +224,7 @@ export const Chat = () => {
onDismissJtbdPopup={onDismissJtbdPopup}
/>
)}
{agentUrlError && (
<ChatError
error={agentUrlError}
providerType={selectedProvider?.type}
/>
)}
{agentUrlError && <ChatError error={agentUrlError} />}
{chatError && (
<ChatError error={chatError} providerType={selectedProvider?.type} />
)}

View File

@@ -34,9 +34,11 @@ function parseErrorMessage(
} {
const isBrowserosProvider = providerType === 'browseros'
// All chat requests go through the local BrowserOS agent server, so any
// fetch failure is always a local connection issue.
if (message.includes('Failed to fetch') || message.includes('fetch failed')) {
// Detect MCP server connection failures (universal — affects all providers)
if (
(message.includes('Failed to fetch') || message.includes('fetch failed')) &&
message.includes('127.0.0.1')
) {
return {
text: 'Unable to connect to BrowserOS agent. Follow below instructions.',
url: 'https://docs.browseros.com/troubleshooting/connection-issues',

View File

@@ -76,6 +76,8 @@ export interface ChatSessionOptions {
isIntegrationsSynced?: boolean
}
const NEWTAB_SYSTEM_PROMPT = `IMPORTANT: The user is chatting from the New Tab page. When performing browser actions, ALWAYS open content in a NEW TAB rather than navigating the current tab. The user's new tab page should remain accessible.`
export const useChatSession = (options?: ChatSessionOptions) => {
const {
selectedLlmProviderRef,
@@ -342,8 +344,12 @@ export const useChatSession = (options?: ChatSessionOptions) => {
reasoningEffort: provider?.reasoningEffort,
reasoningSummary: provider?.reasoningSummary,
browserContext,
origin: options?.origin ?? 'sidepanel',
userSystemPrompt: personalizationRef.current,
userSystemPrompt:
options?.origin === 'newtab'
? [personalizationRef.current, NEWTAB_SYSTEM_PROMPT]
.filter(Boolean)
.join('\n\n')
: personalizationRef.current,
userWorkingDir: workingDirRef.current,
supportsImages: provider?.supportsImages,
previousConversation,

View File

@@ -1,10 +1,7 @@
import { createAsyncStoragePersister } from '@tanstack/query-async-storage-persister'
import { QueryClient } from '@tanstack/react-query'
import {
type AsyncStorage,
PersistQueryClientProvider,
} from '@tanstack/react-query-persist-client'
import { del, get, set } from 'idb-keyval'
import { PersistQueryClientProvider } from '@tanstack/react-query-persist-client'
import localforage from 'localforage'
import type { FC, ReactNode } from 'react'
const queryClient = new QueryClient({
@@ -15,14 +12,8 @@ const queryClient = new QueryClient({
},
})
const idbStorage: AsyncStorage<string> = {
getItem: (key: string) => get<string>(key).then((v) => v ?? null),
setItem: (key: string, value: string) => set(key, value),
removeItem: (key: string) => del(key),
}
const asyncStoragePersister = createAsyncStoragePersister({
storage: idbStorage,
storage: localforage,
})
export const QueryProvider: FC<{ children: ReactNode }> = ({ children }) => {

View File

@@ -1,35 +0,0 @@
import data from './models-dev-data.json'
export interface ModelsDevModel {
id: string
name: string
contextWindow: number
maxOutput: number
supportsImages: boolean
supportsReasoning: boolean
supportsToolCall: boolean
inputCost?: number
outputCost?: number
}
export interface ModelsDevProvider {
name: string
api?: string
doc: string
models: ModelsDevModel[]
}
const modelsDevData: Record<string, ModelsDevProvider> = data as Record<
string,
ModelsDevProvider
>
export function getModelsDevProvider(
providerId: string,
): ModelsDevProvider | undefined {
return modelsDevData[providerId]
}
export function getModelsDevModels(providerId: string): ModelsDevModel[] {
return modelsDevData[providerId]?.models ?? []
}

View File

@@ -1,4 +1,3 @@
import { getModelsDevProvider } from './models-dev'
import type { ProviderType } from './types'
/**
@@ -16,30 +15,6 @@ export interface ProviderTemplate {
apiKeyUrl?: string
}
function enrichTemplate(
providerId: ProviderType,
overrides: {
defaultModelId: string
defaultBaseUrl?: string
apiKeyUrl?: string
setupGuideUrl?: string
},
): ProviderTemplate {
const provider = getModelsDevProvider(providerId)
const model = provider?.models.find((m) => m.id === overrides.defaultModelId)
return {
id: providerId,
name: provider?.name ?? providerId,
defaultBaseUrl: overrides.defaultBaseUrl ?? provider?.api ?? '',
defaultModelId: overrides.defaultModelId,
supportsImages: model?.supportsImages ?? true,
contextWindow: model?.contextWindow ?? 128000,
...(overrides.apiKeyUrl && { apiKeyUrl: overrides.apiKeyUrl }),
...(overrides.setupGuideUrl && { setupGuideUrl: overrides.setupGuideUrl }),
}
}
/**
* Available provider templates for quick setup
* @public
@@ -82,12 +57,17 @@ export const providerTemplates: ProviderTemplate[] = [
apiKeyUrl: 'https://platform.moonshot.ai/console/api-keys',
setupGuideUrl: 'https://platform.moonshot.ai/console/api-keys',
},
enrichTemplate('openai', {
defaultModelId: 'gpt-5',
{
id: 'openai',
name: 'OpenAI',
defaultBaseUrl: 'https://api.openai.com/v1',
defaultModelId: 'gpt-4',
supportsImages: true,
contextWindow: 128000,
apiKeyUrl: 'https://platform.openai.com/api-keys',
setupGuideUrl:
'https://docs.browseros.com/features/bring-your-own-llm#openai',
}),
},
{
id: 'openai-compatible',
name: 'OpenAI Compatible',
@@ -96,18 +76,28 @@ export const providerTemplates: ProviderTemplate[] = [
supportsImages: true,
contextWindow: 128000,
},
enrichTemplate('anthropic', {
defaultModelId: 'claude-sonnet-4-6',
{
id: 'anthropic',
name: 'Anthropic',
defaultBaseUrl: 'https://api.anthropic.com/v1',
defaultModelId: 'claude-3-5-sonnet-20241022',
supportsImages: true,
contextWindow: 200000,
apiKeyUrl: 'https://console.anthropic.com/settings/keys',
setupGuideUrl:
'https://docs.browseros.com/features/bring-your-own-llm#claude',
}),
enrichTemplate('google', {
defaultModelId: 'gemini-2.5-flash',
},
{
id: 'google',
name: 'Gemini',
defaultBaseUrl: 'https://generativelanguage.googleapis.com/v1beta',
defaultModelId: 'gemini-1.5-pro',
supportsImages: true,
contextWindow: 1000000,
apiKeyUrl: 'https://aistudio.google.com/app/apikey',
setupGuideUrl:
'https://docs.browseros.com/features/bring-your-own-llm#gemini',
}),
},
{
id: 'ollama',
name: 'Ollama',
@@ -118,28 +108,47 @@ export const providerTemplates: ProviderTemplate[] = [
setupGuideUrl:
'https://docs.browseros.com/features/bring-your-own-llm#ollama',
},
enrichTemplate('openrouter', {
defaultModelId: 'anthropic/claude-sonnet-4.5',
{
id: 'openrouter',
name: 'OpenRouter',
defaultBaseUrl: 'https://openrouter.ai/api/v1',
defaultModelId: 'openai/gpt-4-turbo',
supportsImages: true,
contextWindow: 128000,
apiKeyUrl: 'https://openrouter.ai/keys',
setupGuideUrl:
'https://docs.browseros.com/features/bring-your-own-llm#openrouter',
}),
enrichTemplate('lmstudio', {
defaultModelId: 'openai/gpt-oss-20b',
},
{
id: 'lmstudio',
name: 'LM Studio',
defaultBaseUrl: 'http://localhost:1234/v1',
defaultModelId: 'local-model',
supportsImages: false,
contextWindow: 32000,
setupGuideUrl:
'https://docs.browseros.com/features/bring-your-own-llm#lmstudio',
}),
enrichTemplate('azure', {
},
{
id: 'azure',
name: 'Azure',
defaultBaseUrl: '',
defaultModelId: '',
supportsImages: true,
contextWindow: 128000,
apiKeyUrl:
'https://portal.azure.com/#view/Microsoft_Azure_ProjectOxford/CognitiveServicesHub/~/OpenAI',
}),
enrichTemplate('bedrock', {
defaultModelId: 'anthropic.claude-sonnet-4-6',
},
{
id: 'bedrock',
name: 'AWS Bedrock',
defaultBaseUrl: '',
defaultModelId: '',
supportsImages: true,
contextWindow: 200000,
setupGuideUrl:
'https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html',
}),
},
]
/**

View File

@@ -44,9 +44,9 @@
"@radix-ui/react-use-controllable-state": "^1.2.2",
"@sentry/react": "^10.31.0",
"@sentry/vite-plugin": "^4.6.1",
"@tanstack/query-async-storage-persister": "^5.95.2",
"@tanstack/react-query": "^5.95.2",
"@tanstack/react-query-persist-client": "^5.95.2",
"@tanstack/query-async-storage-persister": "^5.90.21",
"@tanstack/react-query": "^5.90.19",
"@tanstack/react-query-persist-client": "^5.90.21",
"@types/cytoscape": "^3.31.0",
"@types/dompurify": "^3.2.0",
"@webext-core/messaging": "^2.3.0",
@@ -69,8 +69,8 @@
"eventsource-parser": "^3.0.6",
"graphql": "^16.12.0",
"hono": "^4.12.3",
"idb-keyval": "^6.2.2",
"klavis": "^2.15.0",
"localforage": "^1.10.0",
"lucide-react": "^0.562.0",
"motion": "^12.23.24",
"nanoid": "^5.1.6",

View File

@@ -1,7 +0,0 @@
# Production upload env for CLI installer scripts
R2_ACCOUNT_ID=
R2_ACCESS_KEY_ID=
R2_SECRET_ACCESS_KEY=
R2_BUCKET=browseros
R2_UPLOAD_PREFIX=cli

View File

@@ -2,9 +2,6 @@ version: 2
project_name: browseros-cli
monorepo:
tag_prefix: browseros-cli-
builds:
- main: .
binary: browseros-cli

View File

@@ -1 +0,0 @@
# BrowserOS CLI

View File

@@ -1,58 +1,25 @@
# browseros-cli
[![License: AGPL v3](https://img.shields.io/badge/License-AGPL%20v3-blue.svg)](../../../../LICENSE)
Command-line interface for controlling BrowserOS via MCP. Talks to the BrowserOS MCP server over JSON-RPC 2.0 / StreamableHTTP.
Command-line interface for controlling BrowserOS — launch and automate the browser from the terminal or from AI coding agents like Claude Code and Gemini CLI.
Communicates with the BrowserOS MCP server over JSON-RPC 2.0 / StreamableHTTP. All 53+ MCP tools are mapped to CLI commands.
## Install
### macOS / Linux
```bash
curl -fsSL https://cdn.browseros.com/cli/install.sh | bash
```
### Windows
```powershell
irm https://cdn.browseros.com/cli/install.ps1 | iex
```
### Build from Source
## Setup
Requires Go 1.25+.
```bash
make # Build binary
make install # Install to $GOPATH/bin
# Build
make
# First run — configure server connection
./browseros-cli init
```
## Quick Start
The `init` command prompts for your MCP server URL. Find it in:
**BrowserOS → Settings → BrowserOS MCP → Server URL**
```bash
# If BrowserOS is not installed yet
browseros-cli install # downloads BrowserOS for your platform
The port varies per installation (e.g., `http://127.0.0.1:9004/mcp`).
# If BrowserOS is installed but not running
browseros-cli launch # opens BrowserOS, waits for server
# Configure the CLI (auto-discovers running BrowserOS)
browseros-cli init --auto # detects server URL and saves config
# Verify connection
browseros-cli health
```
### Other init modes
```bash
browseros-cli init <url> # non-interactive — pass URL directly
browseros-cli init # interactive — prompts for URL
```
Config is saved to `~/.config/browseros-cli/config.yaml`. The CLI also auto-discovers the server from `~/.browseros/server.json` (written by BrowserOS on startup).
Config is saved to `~/.config/browseros-cli/config.yaml`.
## Usage
@@ -100,12 +67,6 @@ browseros-cli history recent
browseros-cli group list
```
## Use as MCP Server
BrowserOS exposes an MCP server that AI coding agents can connect to directly. The CLI is the easiest way to verify the connection and interact with tools from the terminal.
To connect Claude Code, Gemini CLI, or any MCP client, see the [MCP setup guide](https://docs.browseros.com/features/use-with-claude-code).
## Global Flags
| Flag | Env Var | Description |
@@ -116,9 +77,9 @@ To connect Claude Code, Gemini CLI, or any MCP client, see the [MCP setup guide]
| `--debug` | `BOS_DEBUG=1` | Debug output |
| `--timeout, -t` | | Request timeout (default: 2m) |
Priority for server URL: `--server` flag > `BROWSEROS_URL` env > `~/.browseros/server.json` > config file
Priority for server URL: `--server` flag > `BROWSEROS_URL` env > config file
If no server URL is configured, the CLI exits with setup instructions pointing to `install`, `launch`, and `init`.
If no server URL is configured, the CLI exits with setup instructions instead of assuming a localhost port.
## Testing
@@ -169,9 +130,7 @@ apps/cli/
│ └── config.go # Config file (~/.config/browseros-cli/config.yaml)
├── cmd/
│ ├── root.go # Root command, global flags
│ ├── init.go # Server URL configuration (URL arg, --auto, interactive)
│ ├── install.go # install (download BrowserOS for current platform)
│ ├── launch.go # launch (find and start BrowserOS, wait for server)
│ ├── init.go # Server URL configuration
│ ├── open.go # open (new_page / new_hidden_page)
│ ├── nav.go # nav, back, forward, reload
│ ├── pages.go # pages, active, close
@@ -204,8 +163,4 @@ The CLI communicates with BrowserOS via two HTTP POST requests per command:
1. `initialize` — MCP handshake
2. `tools/call` — execute the actual tool
## Links
- [Documentation](https://docs.browseros.com)
- [MCP Setup Guide](https://docs.browseros.com/features/use-with-claude-code)
- [Changelog](./CHANGELOG.md)
All 54 MCP tools are mapped to CLI commands.

View File

@@ -148,7 +148,7 @@ func runPostInstall(path string, deb bool, dim *color.Color) {
// installMacOS mounts the DMG and copies BrowserOS.app to /Applications.
func installMacOS(dmgPath string, dim *color.Color) {
fmt.Println("Mounting disk image...")
mountOut, err := exec.Command("hdiutil", "attach", dmgPath, "-nobrowse").Output()
mountOut, err := exec.Command("hdiutil", "attach", dmgPath, "-nobrowse", "-quiet").Output()
if err != nil {
dim.Println("Could not mount DMG automatically.")
dim.Printf(" Open it manually: open %s\n", dmgPath)

View File

@@ -1,115 +0,0 @@
#
# Install browseros-cli for Windows — downloads the latest release binary.
#
# Usage (PowerShell — save and run):
# Invoke-WebRequest -Uri "https://cdn.browseros.com/cli/install.ps1" -OutFile install.ps1
# .\install.ps1
# .\install.ps1 -Version "0.1.0" -Dir "C:\tools\browseros"
#
# Usage (one-liner, uses env vars for options):
# & { $env:BROWSEROS_VERSION="0.1.0"; irm https://cdn.browseros.com/cli/install.ps1 | iex }
#
param(
[string]$Version = "",
[string]$Dir = ""
)
$ErrorActionPreference = "Stop"
# TLS 1.2 — older PS 5.1 defaults to TLS 1.0
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
$CdnBase = "https://cdn.browseros.com/cli"
$Binary = "browseros-cli"
# When piped via irm | iex, param() is ignored — fall back to env vars
if (-not $Version) { $Version = $env:BROWSEROS_VERSION }
if (-not $Dir) { $Dir = if ($env:BROWSEROS_DIR) { $env:BROWSEROS_DIR } else { "$env:LOCALAPPDATA\browseros-cli\bin" } }
# ── Resolve latest version ───────────────────────────────────────────────────
if (-not $Version) {
Write-Host "Fetching latest version..."
$Version = (Invoke-WebRequest -Uri "$CdnBase/latest/version.txt" -UseBasicParsing).Content.Trim()
if (-not $Version) {
Write-Error "Could not determine latest version. Try: -Version 0.1.0"
exit 1
}
}
if ($Version -notmatch '^\d+\.\d+\.\d+(-[a-zA-Z0-9.]+)?$') {
Write-Error "Unexpected version format: '$Version'"
exit 1
}
Write-Host "Installing browseros-cli v$Version..."
# ── Detect architecture ──────────────────────────────────────────────────────
# $env:PROCESSOR_ARCHITECTURE lies under x64 emulation on ARM64 Windows.
# Use .NET RuntimeInformation when available, fall back to PROCESSOR_ARCHITEW6432.
$Arch = "amd64"
try {
$osArch = [System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture
if ($osArch -eq [System.Runtime.InteropServices.Architecture]::Arm64) { $Arch = "arm64" }
} catch {
if ($env:PROCESSOR_ARCHITEW6432 -eq "ARM64" -or $env:PROCESSOR_ARCHITECTURE -eq "ARM64") {
$Arch = "arm64"
}
}
if (-not [Environment]::Is64BitOperatingSystem) {
Write-Error "32-bit Windows is not supported."
exit 1
}
# ── Download and extract ─────────────────────────────────────────────────────
$Filename = "${Binary}_${Version}_windows_${Arch}.zip"
$Url = "$CdnBase/v$Version/$Filename"
$TmpDir = Join-Path ([System.IO.Path]::GetTempPath()) ("browseros-cli-install-" + [System.IO.Path]::GetRandomFileName())
try {
New-Item -ItemType Directory -Path $TmpDir | Out-Null
$ZipPath = Join-Path $TmpDir $Filename
Write-Host "Downloading $Url..."
Invoke-WebRequest -Uri $Url -OutFile $ZipPath -UseBasicParsing
Expand-Archive -Path $ZipPath -DestinationPath $TmpDir -Force
$Exe = Get-ChildItem -Path $TmpDir -Filter "$Binary.exe" -File -Recurse | Select-Object -First 1
if (-not $Exe) {
Write-Error "Binary not found in archive."
exit 1
}
# ── Install ──────────────────────────────────────────────────────────────
if (-not (Test-Path $Dir)) {
New-Item -ItemType Directory -Path $Dir -Force | Out-Null
}
Move-Item -Force $Exe.FullName (Join-Path $Dir "$Binary.exe")
Write-Host "Installed $Binary.exe to $Dir"
} finally {
if (Test-Path $TmpDir) { Remove-Item -Recurse -Force $TmpDir -ErrorAction SilentlyContinue }
}
# ── PATH ─────────────────────────────────────────────────────────────────────
$UserPath = [Environment]::GetEnvironmentVariable("Path", "User")
$PathEntries = $UserPath -split ";" | Where-Object { $_ -ne "" }
if ($Dir -notin $PathEntries) {
Write-Host ""
Write-Host "Adding $Dir to your user PATH..."
[Environment]::SetEnvironmentVariable("Path", "$Dir;$UserPath", "User")
$env:Path = "$Dir;$env:Path"
Write-Host "Done. Restart your terminal for PATH changes to take effect."
}
Write-Host ""
Write-Host "Run 'browseros-cli --help' to get started."

View File

@@ -1,151 +0,0 @@
#!/usr/bin/env bash
#
# Install browseros-cli — downloads the latest release binary for your platform.
#
# Usage:
# curl -fsSL https://cdn.browseros.com/cli/install.sh | bash
#
# # Or with options:
# curl -fsSL https://cdn.browseros.com/cli/install.sh | bash -s -- --version 0.1.0 --dir /usr/local/bin
set -euo pipefail
CDN_BASE="https://cdn.browseros.com/cli"
BINARY="browseros-cli"
INSTALL_DIR="${HOME}/.browseros/bin"
# ── Parse arguments ──────────────────────────────────────────────────────────
VERSION=""
CUSTOM_DIR=""
while [[ $# -gt 0 ]]; do
case "$1" in
--version)
[[ $# -lt 2 ]] && { echo "Error: --version requires a value" >&2; exit 1; }
VERSION="$2"; shift 2 ;;
--dir)
[[ $# -lt 2 ]] && { echo "Error: --dir requires a value" >&2; exit 1; }
CUSTOM_DIR="$2"; shift 2 ;;
--help)
echo "Usage: install.sh [--version VERSION] [--dir INSTALL_DIR]"
echo ""
echo " --version Install a specific version (default: latest)"
echo " --dir Install directory (default: ~/.browseros/bin)"
exit 0
;;
*) echo "Unknown option: $1" >&2; exit 1 ;;
esac
done
[[ -n "$CUSTOM_DIR" ]] && INSTALL_DIR="$CUSTOM_DIR"
# ── Resolve latest version ───────────────────────────────────────────────────
if [[ -z "$VERSION" ]]; then
VERSION=$(curl -fsSL "${CDN_BASE}/latest/version.txt" | tr -d '[:space:]')
if [[ -z "$VERSION" ]]; then
echo "Error: could not determine latest version." >&2
echo " Try: install.sh --version 0.1.0" >&2
exit 1
fi
fi
if [[ ! "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.]+)?$ ]]; then
echo "Error: unexpected version format: '$VERSION'" >&2
exit 1
fi
echo "Installing browseros-cli v${VERSION}..."
# ── Detect platform ──────────────────────────────────────────────────────────
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
ARCH=$(uname -m)
case "$OS" in
darwin) OS="darwin" ;;
linux) OS="linux" ;;
*) echo "Error: unsupported OS: $OS" >&2; exit 1 ;;
esac
case "$ARCH" in
x86_64|amd64) ARCH="amd64" ;;
arm64|aarch64) ARCH="arm64" ;;
*) echo "Error: unsupported architecture: $ARCH" >&2; exit 1 ;;
esac
# ── Download and extract ─────────────────────────────────────────────────────
FILENAME="${BINARY}_${VERSION}_${OS}_${ARCH}.tar.gz"
URL="${CDN_BASE}/v${VERSION}/${FILENAME}"
CHECKSUM_URL="${CDN_BASE}/v${VERSION}/checksums.txt"
TMPDIR_DL=$(mktemp -d)
trap 'rm -rf "$TMPDIR_DL"' EXIT
echo "Downloading ${URL}..."
curl -fSL --progress-bar -o "${TMPDIR_DL}/${FILENAME}" "$URL"
# Verify checksum if sha256sum/shasum is available
if curl -fsSL -o "${TMPDIR_DL}/checksums.txt" "$CHECKSUM_URL" 2>/dev/null; then
expected=$(awk -v filename="$FILENAME" '$2 == filename { print $1; exit }' "${TMPDIR_DL}/checksums.txt")
if [[ -n "$expected" ]]; then
if command -v sha256sum >/dev/null 2>&1; then
actual=$(sha256sum "${TMPDIR_DL}/${FILENAME}" | awk '{print $1}')
elif command -v shasum >/dev/null 2>&1; then
actual=$(shasum -a 256 "${TMPDIR_DL}/${FILENAME}" | awk '{print $1}')
else
actual=""
echo "Warning: no sha256sum/shasum found; skipping checksum verification." >&2
fi
if [[ -n "$actual" && "$actual" != "$expected" ]]; then
echo "Error: checksum mismatch (expected ${expected}, got ${actual})" >&2
exit 1
fi
[[ -n "$actual" ]] && echo "Checksum verified."
else
echo "Warning: checksum not found in checksums.txt; skipping verification." >&2
fi
else
echo "Warning: could not fetch checksums.txt; skipping checksum verification." >&2
fi
tar -xzf "${TMPDIR_DL}/${FILENAME}" -C "$TMPDIR_DL"
BINARY_PATH="${TMPDIR_DL}/${BINARY}"
if [[ ! -f "$BINARY_PATH" ]]; then
BINARY_PATH=$(find "$TMPDIR_DL" -type f -name "$BINARY" -print -quit)
fi
if [[ -z "$BINARY_PATH" || ! -f "$BINARY_PATH" ]]; then
echo "Error: binary not found in archive." >&2
exit 1
fi
# ── Install ──────────────────────────────────────────────────────────────────
mkdir -p "$INSTALL_DIR"
mv "$BINARY_PATH" "${INSTALL_DIR}/${BINARY}"
chmod +x "${INSTALL_DIR}/${BINARY}"
echo "Installed ${BINARY} to ${INSTALL_DIR}/${BINARY}"
# ── PATH hint ────────────────────────────────────────────────────────────────
if ! echo "$PATH" | tr ':' '\n' | grep -qx "$INSTALL_DIR"; then
echo ""
echo "Add browseros-cli to your PATH:"
echo ""
SHELL_NAME=$(basename "${SHELL:-/bin/bash}")
case "$SHELL_NAME" in
zsh) echo " echo 'export PATH=\"${INSTALL_DIR}:\$PATH\"' >> ~/.zshrc && source ~/.zshrc" ;;
fish) echo " fish_add_path ${INSTALL_DIR}" ;;
*) echo " echo 'export PATH=\"${INSTALL_DIR}:\$PATH\"' >> ~/.bashrc && source ~/.bashrc" ;;
esac
fi
echo ""
echo "Run 'browseros-cli --help' to get started."

View File

@@ -1,8 +1,6 @@
# BrowserOS Eval
[![License: AGPL v3](https://img.shields.io/badge/License-AGPL%20v3-blue.svg)](../../../../LICENSE)
Evaluation framework for benchmarking BrowserOS browser automation agents. Runs tasks from standard datasets ([WebVoyager](https://arxiv.org/abs/2401.13919), [Mind2Web](https://arxiv.org/abs/2306.06070)), captures trajectories with screenshots, and grades results automatically.
Evaluation framework for benchmarking BrowserOS browser automation agents. Runs tasks from standard datasets (WebVoyager, Mind2Web), captures trajectories with screenshots, and grades results automatically.
## Prerequisites

View File

@@ -1225,7 +1225,7 @@
const score = graders[firstKey].score;
if (typeof score === 'number') {
const pct = Math.round(score * 100);
return { label: `${pct}%`, cls: pct >= 75 ? 'pass' : 'fail' };
return { label: pct + '%', cls: pct >= 75 ? 'pass' : 'fail' };
}
const anyPass = keys.some((k) => graders[k].pass);
return { label: anyPass ? 'PASS' : 'FAIL', cls: anyPass ? 'pass' : 'fail' };

View File

@@ -1,181 +0,0 @@
# BrowserOS Server
MCP server and AI agent loop powering BrowserOS browser automation. This is the core backend — it connects to Chromium via CDP, exposes 53+ MCP tools, and runs the AI agent that interprets natural language into browser actions.
> **Runtime:** [Bun](https://bun.sh) · **Framework:** [Hono](https://hono.dev) · **AI:** [Vercel AI SDK](https://sdk.vercel.ai) · **License:** [AGPL-3.0](../../../../LICENSE)
## Architecture
```
┌──────────────────────────────────────────────────────────────────────┐
│ MCP Clients │
│ (Agent UI, Claude Code, Gemini CLI, browseros-cli) │
└──────────────────────────────────────────────────────────────────────┘
│ HTTP / SSE / StreamableHTTP
┌──────────────────────────────────────────────────────────────────────┐
│ BrowserOS Server (Bun) │
│ │
│ /mcp ─────── MCP tool endpoints (53+ tools) │
│ /chat ────── Agent streaming (AI SDK) │
│ /health ─── Health check │
│ │
│ ┌─────────────────────────────────────────────────────────────┐ │
│ │ Agent Loop │ │
│ │ ├── Multi-provider AI SDK (OpenAI, Anthropic, Google, ...) │ │
│ │ ├── Session & conversation management │ │
│ │ ├── Context overflow handling + compaction │ │
│ │ └── MCP client for external tool servers │ │
│ └─────────────────────────────────────────────────────────────┘ │
│ │
│ ┌────────────────────┐ ┌────────────────────────────────────┐ │
│ │ CDP Tools │ │ Controller Tools │ │
│ │ (screenshots, │ │ (tabs, bookmarks, history, │ │
│ │ DOM, network, │ │ navigation, tab groups) │ │
│ │ console, input) │ │ │ │
│ └────────────────────┘ └────────────────────────────────────┘ │
└──────────────────────────────────────────────────────────────────────┘
│ │
│ Chrome DevTools Protocol │ WebSocket
▼ ▼
┌─────────────────────┐ ┌─────────────────────────────────┐
│ Chromium CDP │ │ Controller Extension │
│ (port 9000) │ │ (port 9300) │
│ │ │ │
│ DOM, network, │ │ chrome.tabs, chrome.history, │
│ input, screenshots │ │ chrome.bookmarks │
└─────────────────────┘ └─────────────────────────────────┘
```
## MCP Tools
53+ tools organized by category:
| Category | Tools |
|----------|-------|
| **Navigation** | `new_page`, `navigate`, `go_back`, `go_forward`, `reload` |
| **Input** | `click`, `type`, `press_key`, `hover`, `scroll`, `drag`, `fill`, `clear`, `focus`, `check`, `uncheck`, `select_option`, `upload_file` |
| **Observation** | `take_snapshot`, `take_enhanced_snapshot`, `extract_text`, `extract_links` |
| **Screenshots** | `take_screenshot`, `save_screenshot` |
| **Evaluation** | `evaluate_script` |
| **Pages** | `list_pages`, `active_page`, `close_page`, `new_hidden_page` |
| **Windows** | `window_list`, `window_create`, `window_close`, `window_activate` |
| **Bookmarks** | `bookmark_list`, `bookmark_create`, `bookmark_remove`, `bookmark_update`, `bookmark_move`, `bookmark_search` |
| **History** | `history_search`, `history_recent`, `history_delete`, `history_delete_range` |
| **Tab Groups** | `group_list`, `group_create`, `group_update`, `group_ungroup`, `group_close` |
| **Filesystem** | `ls`, `read`, `write`, `edit`, `find`, `grep`, `bash` |
| **Memory** | `read_core`, `update_core`, `read_soul`, `update_soul`, `search_memory`, `write_memory` |
| **DOM** | `dom`, `dom_search` |
| **Console** | `get_console_messages` |
| **Other** | `browseros_info`, `handle_dialog`, `wait_for`, `download`, `export_pdf`, `output_file`, `nudges` |
## Agent Loop
The agent loop uses the [Vercel AI SDK](https://sdk.vercel.ai) to orchestrate multi-step browser automation:
- **Multi-provider support** — OpenAI, Anthropic, Google, Azure, Bedrock, OpenRouter, Ollama, LM Studio, and any OpenAI-compatible endpoint
- **Session management** — conversations persist in a local SQLite database
- **Context overflow handling** — automatic message compaction when context windows fill up
- **MCP client** — connects to external MCP servers for additional tool access (40+ app integrations)
- **Tool adapter** — bridges MCP tool definitions to AI SDK tool format
### Provider Factory
The provider factory (`src/agent/provider-factory.ts`) creates AI SDK providers from runtime configuration, supporting hot-swapping between providers without restart.
## Skills System
Skills are custom instruction sets that shape agent behavior:
- **Catalog** (`src/skills/catalog.ts`) — registry of available skills
- **Defaults** (`src/skills/defaults/`) — built-in skill definitions
- **Loader** (`src/skills/loader.ts`) — loads skills from local and remote sources
- **Remote sync** (`src/skills/remote-sync.ts`) — syncs skills from the BrowserOS cloud
## Graph Executor (Workflows)
The graph executor (`src/graph/executor.ts`) runs visual workflow graphs built in the BrowserOS workflow editor. Each node in the graph maps to agent actions, conditionals, or data transformations.
## Directory Structure
```
apps/server/
├── src/
│ ├── index.ts # Server entry point
│ ├── main.ts # Server initialization
│ ├── api/ # HTTP route handlers
│ ├── agent/ # Agent loop
│ │ ├── ai-sdk-agent.ts # Main agent implementation
│ │ ├── provider-factory.ts# LLM provider factory
│ │ ├── session-store.ts # Conversation persistence
│ │ ├── compaction.ts # Context window management
│ │ ├── mcp-builder.ts # External MCP client setup
│ │ └── tool-adapter.ts # MCP → AI SDK tool bridge
│ ├── browser/ # Browser connection layer
│ ├── tools/ # MCP tool implementations
│ │ ├── navigation.ts
│ │ ├── input.ts
│ │ ├── snapshot.ts
│ │ ├── memory/
│ │ ├── filesystem/
│ │ └── ...
│ ├── skills/ # Skills system
│ ├── graph/ # Workflow graph executor
│ ├── lib/ # Shared utilities
│ └── rpc.ts # JSON-RPC type definitions
├── tests/
│ ├── tools/ # Tool-level tests
│ ├── sdk/ # SDK integration tests
│ └── server.integration.test.ts
├── graph/ # Workflow graph definitions
└── package.json
```
## Development
### Prerequisites
- [Bun](https://bun.sh) runtime
- A running BrowserOS instance (for CDP and controller connections)
### Setup
```bash
# Copy environment files
cp .env.example .env.development
# Start the server (with hot reload)
bun run start
```
See the [agent monorepo README](../../README.md) for full environment variable reference and `process-compose` setup.
### Testing
```bash
bun run test:tools # Tool-level tests
bun run test:integration # Full integration tests (requires running BrowserOS)
bun run test:sdk # SDK integration tests
```
### Building
```bash
# Build cross-platform server binaries
bun run build
# Build for specific targets
bun scripts/build/server.ts --target=darwin-arm64,linux-x64
# Build without uploading to R2
bun scripts/build/server.ts --target=all --no-upload
```
## Ports
| Port | Env Variable | Purpose |
|------|-------------|---------|
| 9100 | `BROWSEROS_SERVER_PORT` | HTTP server (MCP, chat, health) |
| 9000 | `BROWSEROS_CDP_PORT` | Chromium CDP (server connects as client) |
| 9300 | `BROWSEROS_EXTENSION_PORT` | WebSocket for controller extension |

View File

@@ -1,6 +1,6 @@
{
"name": "@browseros/server",
"version": "0.0.80",
"version": "0.0.79",
"description": "BrowserOS server",
"type": "module",
"main": "./src/index.ts",

View File

@@ -54,14 +54,8 @@ export class AiSdkAgent {
private _messages: UIMessage[],
private _mcpClients: Array<{ close(): Promise<void> }>,
private conversationId: string,
private _toolNames: Set<string>,
) {}
/** Tool names registered on this agent — used to sanitize messages during session rebuilds. */
get toolNames(): Set<string> {
return this._toolNames
}
static async create(config: AiSdkAgentConfig): Promise<AiSdkAgent> {
const contextWindow =
config.resolvedConfig.contextWindowSize ??
@@ -98,15 +92,10 @@ export class AiSdkAgent {
}
// Build browser tools from the unified tool registry
const originPageId = config.browserContext?.activeTab?.pageId
const allBrowserTools = buildBrowserToolSet(
config.registry,
config.browser,
config.resolvedConfig.workingDir,
{
origin: config.resolvedConfig.origin,
originPageId,
},
)
const browserTools = config.resolvedConfig.chatMode
? Object.fromEntries(
@@ -166,11 +155,10 @@ export class AiSdkAgent {
}
}
// Add filesystem tools — skip in chat mode (read-only) and when no workspace is selected
const filesystemTools =
!config.resolvedConfig.chatMode && config.resolvedConfig.workingDir
? buildFilesystemToolSet(config.resolvedConfig.workingDir)
: {}
// Add filesystem tools (Pi coding agent) — skip in chat mode (read-only)
const filesystemTools = config.resolvedConfig.chatMode
? {}
: buildFilesystemToolSet(config.resolvedConfig.workingDir)
const memoryTools = config.resolvedConfig.chatMode
? {}
: buildMemoryToolSet()
@@ -217,7 +205,6 @@ export class AiSdkAgent {
connectedApps: config.browserContext?.enabledMcpServers,
declinedApps: config.resolvedConfig.declinedApps,
skillsCatalog,
origin: config.resolvedConfig.origin,
})
// Configure compaction for context window management
@@ -276,7 +263,6 @@ export class AiSdkAgent {
[],
clients,
config.resolvedConfig.conversationId,
new Set(Object.keys(tools)),
)
}

View File

@@ -44,37 +44,3 @@ export function hasMessageContent(message: UIMessage): boolean {
export function filterValidMessages(messages: UIMessage[]): UIMessage[] {
return messages.filter(hasMessageContent)
}
/**
* Remove tool parts that reference tools not present in the given toolset.
*
* When a session is rebuilt with a different set of tools (e.g., workspace
* removed mid-conversation or MCP server disconnected), the carried-over
* message history may contain tool parts for tools that no longer exist.
* The AI SDK validates messages against the current toolset and rejects
* parts with no matching schema.
*
* Tool parts use the type format `tool-${toolName}` (static tools) or
* `dynamic-tool` (dynamic tools). This function filters out static tool
* parts whose tool name is not in the provided set.
*/
export function sanitizeMessagesForToolset(
messages: UIMessage[],
toolNames: Set<string>,
): UIMessage[] {
return messages
.map((msg) => {
const filteredParts = msg.parts.filter((part) => {
// Static tool parts have type `tool-${toolName}`
if (typeof part.type === 'string' && part.type.startsWith('tool-')) {
const toolName = part.type.slice(5)
if (!toolNames.has(toolName)) return false
}
return true
})
if (filteredParts.length === msg.parts.length) return msg
return { ...msg, parts: filteredParts }
})
.filter(hasMessageContent)
}

View File

@@ -193,41 +193,19 @@ You have persistent memory across sessions and an evolving personality. See the
function getExecution(
_exclude: Set<string>,
options?: BuildSystemPromptOptions,
_options?: BuildSystemPromptOptions,
): string {
const isNewTab = options?.origin === 'newtab'
let executionContent = `<execution>
return `<execution>
## Execution
### Philosophy
- Execute tasks end-to-end. Don't delegate ("I found the button, you can click it").
- Don't ask permission for routine steps. Act, then report.
- Do not refuse by default, attempt tasks even when outcomes are uncertain.
- For ambiguous/unclear requests, ask one targeted clarifying question.`
if (isNewTab) {
executionContent += `
### New-Tab Origin Rules
You are operating from the user's **New Tab page**. The active tab (Page ID from Browser Context) is the chat UI itself.
**CRITICAL RULES:**
1. **NEVER call \`navigate_page\` on the active tab** — this would destroy the chat UI and navigate the user away.
2. **NEVER call \`close_page\` on the active tab** — same reason.
3. For ALL browsing tasks (including single-page lookups), use \`new_page\` (background) to open URLs.
4. For single-page lookups, open a background tab, extract data, then close it.
5. For multi-page research, open background tabs and group them with \`group_tabs\`.
### Multi-tab workflow`
} else {
executionContent += `
- For ambiguous/unclear requests, ask one targeted clarifying question.
- Stay on the current page for single-page tasks. Use \`navigate_page\` to move within one tab.
### Multi-tab workflow`
}
executionContent += `
### Multi-tab workflow
When a task requires working on multiple pages simultaneously:
1. **Inform the user** that you're creating background tabs for the task.
2. **Open new tabs in background** using \`new_page\` (opens in background by default) — never steal focus from the user's current tab.
@@ -238,23 +216,15 @@ When a task requires working on multiple pages simultaneously:
7. **Never force-switch the user's active tab.** If you need user interaction on a background tab (e.g., login, CAPTCHA), tell the user which tab needs attention and let them switch manually.
8. **Never navigate the user's current tab** during a multi-tab task. The current tab is the user's anchor — use it only for reading (snapshots, content extraction). All navigation should happen on background tabs.
**Do NOT use \`create_hidden_window\` or \`new_hidden_page\` for user-requested tasks.** Hidden windows are invisible to the user and cannot be screenshotted. Use \`new_page\` (background mode) instead — tabs appear in the user's tab strip and can be inspected. Reserve hidden windows for automated/scheduled runs only.`
**Do NOT use \`create_hidden_window\` or \`new_hidden_page\` for user-requested tasks.** Hidden windows are invisible to the user and cannot be screenshotted. Use \`new_page\` (background mode) instead — tabs appear in the user's tab strip and can be inspected. Reserve hidden windows for automated/scheduled runs only.
if (!isNewTab) {
executionContent += `
For single-page lookups (e.g., "go to X and read Y"), use \`navigate_page\` on the current tab. Only create new tabs when the task requires multiple pages open simultaneously.`
}
executionContent += `
For single-page lookups (e.g., "go to X and read Y"), use \`navigate_page\` on the current tab. Only create new tabs when the task requires multiple pages open simultaneously.
### Tab retry discipline
When a background tab fails (404, wrong content, unexpected redirect):
- **Navigate the existing tab** to the correct URL with \`navigate_page\` — do NOT open a new tab for retries.
- If you must abandon a tab, close it with \`close_page\` before opening a replacement.
- Never let orphan tabs accumulate — each task should end with only the tabs that contain useful content.`
executionContent += `
- Never let orphan tabs accumulate — each task should end with only the tabs that contain useful content.
### Observe → Act → Verify
- **Before acting**: Take a snapshot to get interactive element IDs.
@@ -271,38 +241,13 @@ Some tools automatically include a fresh snapshot in their response (labeled "Ad
- 2FA → notify user, pause for completion
- Page not found (404) or server error (500) → report the error to the user
</execution>`
return executionContent
}
// -----------------------------------------------------------------------------
// section: tool-selection
// -----------------------------------------------------------------------------
function getToolSelection(
_exclude: Set<string>,
options?: BuildSystemPromptOptions,
): string {
const isNewTab = options?.origin === 'newtab'
const navTable = isNewTab
? `### Navigation: single-tab vs multi-tab
| Task | Approach |
|------|----------|
| Look up one page | \`new_page\` (background) → extract data → \`close_page\` |
| Research across multiple sites | \`new_page\` (background) for each site + \`group_tabs\` |
| Compare two pages side by side | \`new_page\` (background) × 2 + \`group_tabs\` |
| User says "open a new tab" | \`new_page\` (background) |
**Remember:** The active tab is the New Tab chat UI. Never navigate or close it.`
: `### Navigation: single-tab vs multi-tab
| Task | Approach |
|------|----------|
| Look up one page | \`navigate_page\` on current tab |
| Research across multiple sites | \`new_page\` (background) for each site + \`group_tabs\` |
| Compare two pages side by side | \`new_page\` (background) × 2 + \`group_tabs\` |
| User says "open a new tab" | \`new_page\` (background) — don't steal focus |`
function getToolSelection(): string {
return `<tool_selection>
## Tool Selection
@@ -323,7 +268,13 @@ function getToolSelection(
- Prefer \`fill\` over \`press_key\` for text input. Use \`press_key\` for keyboard shortcuts (Enter, Escape, Tab, Ctrl+A, etc.).
- Prefer clicking links over \`navigate_page\` when the link is visible. Use \`navigate_page\` for direct URL access, back/forward, or reload.
${navTable}
### Navigation: single-tab vs multi-tab
| Task | Approach |
|------|----------|
| Look up one page | \`navigate_page\` on current tab |
| Research across multiple sites | \`new_page\` (background) for each site + \`group_tabs\` |
| Compare two pages side by side | \`new_page\` (background) × 2 + \`group_tabs\` |
| User says "open a new tab" | \`new_page\` (background) — don't steal focus |
### Connected apps: Strata vs browser
When an app is Connected, prefer Strata tools over browser automation. Strata is faster, more reliable, and works without navigating away from the user's current page.
@@ -717,10 +668,7 @@ const promptSections: Record<string, PromptSectionFn> = {
security: getSecurity,
capabilities: getCapabilities,
execution: getExecution,
'tool-selection': (
_exclude: Set<string>,
options?: BuildSystemPromptOptions,
) => getToolSelection(_exclude, options),
'tool-selection': getToolSelection,
'external-integrations': getExternalIntegrations,
'error-recovery': getErrorRecovery,
'memory-and-identity': getMemoryAndIdentity,
@@ -747,8 +695,6 @@ export interface BuildSystemPromptOptions {
/** Apps the user previously declined to connect (chose "do it manually"). */
declinedApps?: string[]
skillsCatalog?: string
/** Where the chat session originates from — determines navigation behavior. */
origin?: 'sidepanel' | 'newtab'
}
export function buildSystemPrompt(options?: BuildSystemPromptOptions): string {

View File

@@ -9,8 +9,6 @@ export interface AgentSession {
browserContext?: BrowserContext
/** MCP server names used when the session was created, for change detection. */
mcpServerKey?: string
/** Workspace directory when the session was created, for change detection. */
workingDir?: string
}
export class SessionStore {

View File

@@ -38,14 +38,12 @@ function contentToModelOutput(
export function buildBrowserToolSet(
registry: ToolRegistry,
browser: Browser,
workingDir: string | undefined,
session?: { origin?: 'sidepanel' | 'newtab'; originPageId?: number },
workingDir: string,
): ToolSet {
const toolSet: ToolSet = {}
const ctx: ToolContext = {
browser,
directories: { workingDir },
session,
}
for (const def of registry.all()) {

View File

@@ -35,7 +35,7 @@ export interface ResolvedAgentConfig {
reasoningSummary?: string
contextWindowSize?: number
userSystemPrompt?: string
workingDir?: string
workingDir: string
/** Whether the model supports image inputs (vision). Defaults to true. */
supportsImages?: boolean
/** Eval mode - enables window management tools. Defaults to false. */
@@ -46,8 +46,6 @@ export interface ResolvedAgentConfig {
isScheduledTask?: boolean
/** Apps the user previously declined to connect via MCP (chose "do it manually"). */
declinedApps?: string[]
/** Where the chat session originates from — determines navigation behavior. */
origin?: 'sidepanel' | 'newtab'
/** BrowserOS installation ID for credit-based tracking. */
browserosId?: string
}

View File

@@ -4,16 +4,16 @@
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
import { mkdir, utimes } from 'node:fs/promises'
import path from 'node:path'
import { createAgentUIStreamResponse, type UIMessage } from 'ai'
import { AiSdkAgent } from '../../agent/ai-sdk-agent'
import { formatUserMessage } from '../../agent/format-message'
import {
filterValidMessages,
sanitizeMessagesForToolset,
} from '../../agent/message-validation'
import type { AgentSession, SessionStore } from '../../agent/session-store'
import { filterValidMessages } from '../../agent/message-validation'
import type { SessionStore } from '../../agent/session-store'
import type { ResolvedAgentConfig } from '../../agent/types'
import type { Browser } from '../../browser/browser'
import { getSessionsDir } from '../../lib/browseros-dir'
import type { KlavisClient } from '../../lib/clients/klavis/klavis-client'
import { resolveLLMConfig } from '../../lib/clients/llm/config'
import { logger } from '../../lib/logger'
@@ -40,6 +40,8 @@ export class ChatService {
const llmConfig = await resolveLLMConfig(request, this.deps.browserosId)
const workingDir = await this.resolveSessionDir(request)
const agentConfig: ResolvedAgentConfig = {
conversationId: request.conversationId,
provider: llmConfig.provider,
@@ -57,18 +59,16 @@ export class ChatService {
reasoningSummary: request.reasoningSummary,
contextWindowSize: request.contextWindowSize,
userSystemPrompt: request.userSystemPrompt,
workingDir: request.userWorkingDir,
workingDir,
supportsImages: request.supportsImages,
chatMode: request.mode === 'chat',
isScheduledTask: request.isScheduledTask,
origin: request.origin,
declinedApps: request.declinedApps,
browserosId: this.deps.browserosId,
}
let session = sessionStore.get(request.conversationId)
let isNewSession = false
const contextChanges: string[] = []
// Build a stable key from enabled MCP servers for change detection
const mcpServerKey = this.buildMcpServerKey(request.browserContext)
@@ -80,68 +80,23 @@ export class ChatService {
previous: session.mcpServerKey,
current: mcpServerKey,
})
const previousMcpKey = session.mcpServerKey
session = await this.rebuildSession(
session,
request,
agentConfig,
mcpServerKey,
)
const previousMessages = session.agent.messages
await session.agent.dispose()
sessionStore.remove(request.conversationId)
const oldServers = new Set(
(previousMcpKey ?? '').split(',').filter(Boolean),
)
const newServers = new Set(mcpServerKey.split(',').filter(Boolean))
const added = [...newServers].filter((s) => !oldServers.has(s))
const removed = [...oldServers].filter((s) => !newServers.has(s))
const parts: string[] = []
if (removed.length > 0) {
parts.push(
`The following app integrations were disconnected: ${removed.join(', ')}. Their tools are no longer available.`,
)
}
if (added.length > 0) {
parts.push(
`The following app integrations were connected: ${added.join(', ')}. Their tools are now available.`,
)
}
if (parts.length === 0) {
parts.push(
'Connected app integrations changed during this conversation. Use only tools that are currently registered.',
)
}
contextChanges.push(parts.join(' '))
}
// Detect workspace change mid-conversation → rebuild session
if (session && session.workingDir !== request.userWorkingDir) {
logger.info('Workspace changed mid-conversation, rebuilding session', {
conversationId: request.conversationId,
previous: session.workingDir ?? '(none)',
current: request.userWorkingDir ?? '(none)',
const browserContext = await this.resolvePageIds(request.browserContext)
const agent = await AiSdkAgent.create({
resolvedConfig: agentConfig,
browser: this.deps.browser,
registry: this.deps.registry,
browserContext,
klavisClient: this.deps.klavisClient,
browserosId: this.deps.browserosId,
aiSdkDevtoolsEnabled: this.deps.aiSdkDevtoolsEnabled,
})
const previousWorkingDir = session.workingDir
session = await this.rebuildSession(
session,
request,
agentConfig,
mcpServerKey,
)
if (!request.userWorkingDir) {
contextChanges.push(
'The user disconnected the workspace during this conversation. Filesystem tools (filesystem_read, filesystem_write, filesystem_edit, filesystem_bash, filesystem_grep, filesystem_find, filesystem_ls) are no longer available. Return all output directly in chat. If the user asks for file operations, suggest they select a working directory from the chat toolbar.',
)
} else if (!previousWorkingDir) {
contextChanges.push(
`The user connected a workspace during this conversation. Filesystem tools are now available. Working directory: ${request.userWorkingDir}`,
)
} else {
contextChanges.push(
`The user switched workspace during this conversation. Filesystem tools now use the new working directory: ${request.userWorkingDir}`,
)
}
session = { agent, browserContext, mcpServerKey }
session.agent.messages = previousMessages
sessionStore.set(request.conversationId, session)
}
if (!session) {
@@ -186,13 +141,7 @@ export class ChatService {
browserosId: this.deps.browserosId,
aiSdkDevtoolsEnabled: this.deps.aiSdkDevtoolsEnabled,
})
session = {
agent,
hiddenWindowId,
browserContext,
mcpServerKey,
workingDir: request.userWorkingDir,
}
session = { agent, hiddenWindowId, browserContext, mcpServerKey }
sessionStore.set(request.conversationId, session)
}
@@ -226,13 +175,7 @@ export class ChatService {
request.selectedText,
request.selectedTextSource,
)
// Prepend tool-change context when session was rebuilt mid-conversation
const contextPrefix =
contextChanges.length > 0
? `${contextChanges.map((c) => `[Context: ${c}]`).join('\n')}\n\n`
: ''
session.agent.appendUserMessage(contextPrefix + userContent)
session.agent.appendUserMessage(userContent)
return createAgentUIStreamResponse({
agent: session.agent.toolLoopAgent,
@@ -319,44 +262,22 @@ export class ChatService {
})
}
private async rebuildSession(
session: AgentSession,
request: ChatRequest,
agentConfig: ResolvedAgentConfig,
mcpServerKey: string,
): Promise<AgentSession> {
const previousMessages = session.agent.messages
await session.agent.dispose()
this.deps.sessionStore.remove(request.conversationId)
const browserContext = await this.resolvePageIds(request.browserContext)
const agent = await AiSdkAgent.create({
resolvedConfig: agentConfig,
browser: this.deps.browser,
registry: this.deps.registry,
browserContext,
klavisClient: this.deps.klavisClient,
browserosId: this.deps.browserosId,
aiSdkDevtoolsEnabled: this.deps.aiSdkDevtoolsEnabled,
})
const newSession: AgentSession = {
agent,
browserContext,
mcpServerKey,
workingDir: request.userWorkingDir,
}
newSession.agent.messages = sanitizeMessagesForToolset(
previousMessages,
agent.toolNames,
)
this.deps.sessionStore.set(request.conversationId, newSession)
return newSession
}
private buildMcpServerKey(browserContext?: BrowserContext): string {
const managed = browserContext?.enabledMcpServers?.slice().sort() ?? []
const custom =
browserContext?.customMcpServers?.map((s) => s.url).sort() ?? []
return [...managed, ...custom].join(',')
}
private async resolveSessionDir(request: ChatRequest): Promise<string> {
const dir = request.userWorkingDir
? request.userWorkingDir
: path.join(getSessionsDir(), request.conversationId)
await mkdir(dir, { recursive: true })
if (!request.userWorkingDir) {
const now = new Date()
await utimes(dir, now, now).catch(() => {})
}
return dir
}
}

View File

@@ -45,7 +45,6 @@ export const ChatRequestSchema = AgentLLMConfigSchema.extend({
userWorkingDir: z.string().min(1).optional(),
supportsImages: z.boolean().optional().default(true),
mode: z.enum(['chat', 'agent']).optional().default('agent'),
origin: z.enum(['sidepanel', 'newtab']).optional().default('sidepanel'),
declinedApps: z.array(z.string()).optional(),
selectedText: z.string().optional(),
selectedTextSource: z

View File

@@ -20,7 +20,6 @@ import './lib/polyfill'
import { EXIT_CODES } from '@browseros/shared/constants/exit-codes'
import { CommanderError } from 'commander'
import { loadServerConfig } from './config'
import { isPortInUseError } from './lib/port-binding'
import { Sentry } from './lib/sentry'
import { Application } from './main'
@@ -40,9 +39,6 @@ try {
if (error instanceof CommanderError) {
process.exit(error.exitCode)
}
if (isPortInUseError(error)) {
process.exit(EXIT_CODES.PORT_CONFLICT)
}
Sentry.captureException(error)
console.error('Failed to start server:', error)
process.exit(EXIT_CODES.GENERAL_ERROR)

View File

@@ -231,6 +231,7 @@ export class Application {
console.error(
`[FATAL] Failed to start ${serverName} on port ${port}: ${errorMsg}`,
)
Sentry.captureException(error)
if (isPortInUseError(error)) {
console.error(
@@ -239,7 +240,6 @@ export class Application {
process.exit(EXIT_CODES.PORT_CONFLICT)
}
Sentry.captureException(error)
process.exit(EXIT_CODES.GENERAL_ERROR)
}
@@ -255,9 +255,7 @@ export class Application {
{ port },
)
}
if (!isPortInUseError(error)) {
Sentry.captureException(error)
}
Sentry.captureException(error)
}
private logStartupSummary(controllerServerStarted: boolean): void {

View File

@@ -1,4 +1,3 @@
import { tmpdir } from 'node:os'
import { resolve } from 'node:path'
import type { z } from 'zod'
import type { Browser } from '../browser/browser'
@@ -19,19 +18,13 @@ export type ToolHandler = (
) => Promise<void>
export interface ToolDirectories {
workingDir?: string
workingDir: string
resourcesDir?: string
}
export interface ToolSessionContext {
origin?: 'sidepanel' | 'newtab'
originPageId?: number
}
export type ToolContext = {
browser: Browser
directories: ToolDirectories
session?: ToolSessionContext
}
export function resolveWorkingPath(
@@ -39,7 +32,7 @@ export function resolveWorkingPath(
targetPath: string,
cwd?: string,
): string {
return resolve(cwd ?? ctx.directories.workingDir ?? tmpdir(), targetPath)
return resolve(cwd ?? ctx.directories.workingDir, targetPath)
}
export function defineTool<

View File

@@ -88,17 +88,6 @@ export const navigate_page = defineTool({
return
}
if (
ctx.session?.origin === 'newtab' &&
ctx.session.originPageId !== undefined &&
args.page === ctx.session.originPageId
) {
response.error(
'Cannot navigate the origin tab in new-tab mode — this would destroy the chat UI. Use `new_page` to open a background tab instead.',
)
return
}
switch (args.action) {
case 'url':
await ctx.browser.goto(args.page, args.url as string)
@@ -277,17 +266,6 @@ export const close_page = defineTool({
action: z.literal('close_page'),
}),
handler: async (args, ctx, response) => {
if (
ctx.session?.origin === 'newtab' &&
ctx.session.originPageId !== undefined &&
args.page === ctx.session.originPageId
) {
response.error(
'Cannot close the origin tab in new-tab mode — this would destroy the chat UI.',
)
return
}
await ctx.browser.closePage(args.page)
response.text(`Closed page ${args.page}`)
response.data({ page: args.page, action: 'close_page' })

View File

@@ -1,5 +1,4 @@
import { mkdir, mkdtemp, rename, rm } from 'node:fs/promises'
import { tmpdir } from 'node:os'
import { join } from 'node:path'
import { z } from 'zod'
import { defineTool, resolveWorkingPath } from './framework'
@@ -122,9 +121,10 @@ export const download_file = defineTool({
}),
handler: async (args, ctx, response) => {
const resolvedDir = resolveWorkingPath(ctx, args.path, args.cwd)
const baseDir = ctx.directories.workingDir ?? tmpdir()
await mkdir(baseDir, { recursive: true })
const tempDir = await mkdtemp(join(baseDir, 'browseros-dl-'))
await mkdir(ctx.directories.workingDir, { recursive: true })
const tempDir = await mkdtemp(
join(ctx.directories.workingDir, 'browseros-dl-'),
)
try {
const { filePath, suggestedFilename } =

View File

@@ -1,299 +0,0 @@
/**
* @license
* Copyright 2025 BrowserOS
*
* Message Validation — Test Suite
*
* Tests for sanitizeMessagesForToolset, which strips tool parts from
* carried-over messages when a session is rebuilt with a different toolset
* (e.g., workspace removed or MCP server disconnected mid-conversation).
*
* Without this sanitization, the AI SDK throws a validation error because
* it finds tool parts in the message history that have no matching schema.
*/
import { describe, expect, it } from 'bun:test'
import type { UIMessage } from 'ai'
import {
hasMessageContent,
sanitizeMessagesForToolset,
} from '../../src/agent/message-validation'
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function makeUserMessage(text: string, id?: string): UIMessage {
return {
id: id ?? crypto.randomUUID(),
role: 'user',
parts: [{ type: 'text', text }],
}
}
function makeAssistantMessage(
parts: UIMessage['parts'],
id?: string,
): UIMessage {
return {
id: id ?? crypto.randomUUID(),
role: 'assistant',
parts,
}
}
// ---------------------------------------------------------------------------
// sanitizeMessagesForToolset
// ---------------------------------------------------------------------------
describe('sanitizeMessagesForToolset', () => {
const allTools = new Set([
'navigate_page',
'click',
'take_snapshot',
'filesystem_read',
'filesystem_write',
'memory_search',
])
const noFilesystemTools = new Set([
'navigate_page',
'click',
'take_snapshot',
'memory_search',
])
it('preserves messages with no tool parts', () => {
const messages: UIMessage[] = [
makeUserMessage('Hello'),
makeAssistantMessage([{ type: 'text', text: 'Hi there!' }]),
]
const result = sanitizeMessagesForToolset(messages, noFilesystemTools)
expect(result).toHaveLength(2)
expect(result[0].parts).toHaveLength(1)
expect(result[1].parts).toHaveLength(1)
})
it('preserves tool parts when tool is in the toolset', () => {
const messages: UIMessage[] = [
makeAssistantMessage([
{ type: 'text', text: 'Taking a snapshot...' },
{
type: 'tool-take_snapshot',
toolCallId: 'call-1',
toolName: 'take_snapshot',
state: 'result',
input: { page: 1 },
output: { content: 'snapshot data' },
} as unknown as UIMessage['parts'][number],
]),
]
const result = sanitizeMessagesForToolset(messages, allTools)
expect(result).toHaveLength(1)
expect(result[0].parts).toHaveLength(2)
})
it('strips tool parts when tool is NOT in the toolset', () => {
const messages: UIMessage[] = [
makeAssistantMessage([
{ type: 'text', text: 'Reading file...' },
{
type: 'tool-filesystem_read',
toolCallId: 'call-1',
toolName: 'filesystem_read',
state: 'result',
input: { path: '/tmp/test.txt' },
output: { content: 'file data' },
} as unknown as UIMessage['parts'][number],
]),
]
const result = sanitizeMessagesForToolset(messages, noFilesystemTools)
expect(result).toHaveLength(1)
// Only the text part should remain
expect(result[0].parts).toHaveLength(1)
expect(result[0].parts[0].type).toBe('text')
})
it('strips multiple removed tool parts from same message', () => {
const messages: UIMessage[] = [
makeAssistantMessage([
{ type: 'text', text: 'Working on files...' },
{
type: 'tool-filesystem_read',
toolCallId: 'call-1',
toolName: 'filesystem_read',
state: 'result',
input: { path: '/tmp/a.txt' },
output: {},
} as unknown as UIMessage['parts'][number],
{
type: 'tool-filesystem_write',
toolCallId: 'call-2',
toolName: 'filesystem_write',
state: 'result',
input: { path: '/tmp/b.txt', content: 'data' },
output: {},
} as unknown as UIMessage['parts'][number],
]),
]
const result = sanitizeMessagesForToolset(messages, noFilesystemTools)
expect(result).toHaveLength(1)
expect(result[0].parts).toHaveLength(1)
expect(result[0].parts[0].type).toBe('text')
})
it('keeps browser tool parts while removing filesystem tool parts', () => {
const messages: UIMessage[] = [
makeAssistantMessage([
{
type: 'tool-take_snapshot',
toolCallId: 'call-1',
toolName: 'take_snapshot',
state: 'result',
input: { page: 1 },
output: {},
} as unknown as UIMessage['parts'][number],
{
type: 'tool-filesystem_read',
toolCallId: 'call-2',
toolName: 'filesystem_read',
state: 'result',
input: { path: '/tmp/test.txt' },
output: {},
} as unknown as UIMessage['parts'][number],
]),
]
const result = sanitizeMessagesForToolset(messages, noFilesystemTools)
expect(result).toHaveLength(1)
expect(result[0].parts).toHaveLength(1)
expect((result[0].parts[0] as { type: string }).type).toBe(
'tool-take_snapshot',
)
})
it('removes messages that become empty after stripping', () => {
const messages: UIMessage[] = [
makeUserMessage('Read this file'),
makeAssistantMessage([
{
type: 'tool-filesystem_read',
toolCallId: 'call-1',
toolName: 'filesystem_read',
state: 'result',
input: { path: '/tmp/test.txt' },
output: {},
} as unknown as UIMessage['parts'][number],
]),
]
const result = sanitizeMessagesForToolset(messages, noFilesystemTools)
// The assistant message had only a tool part — after stripping, it's empty
// and should be filtered out by hasMessageContent
expect(result).toHaveLength(1)
expect(result[0].role).toBe('user')
})
it('preserves non-tool part types (reasoning, step-start, file)', () => {
const messages: UIMessage[] = [
makeAssistantMessage([
{ type: 'text', text: 'Let me think...' },
{
type: 'reasoning',
reasoning: 'Analyzing the request',
} as unknown as UIMessage['parts'][number],
{
type: 'step-start',
} as unknown as UIMessage['parts'][number],
]),
]
const result = sanitizeMessagesForToolset(messages, noFilesystemTools)
expect(result).toHaveLength(1)
expect(result[0].parts).toHaveLength(3)
})
it('returns same message references when no filtering needed', () => {
const messages: UIMessage[] = [
makeUserMessage('Hello'),
makeAssistantMessage([{ type: 'text', text: 'Hi!' }]),
]
const result = sanitizeMessagesForToolset(messages, noFilesystemTools)
// Messages that don't need filtering should be the same reference
expect(result[0]).toBe(messages[0])
expect(result[1]).toBe(messages[1])
})
it('handles empty message array', () => {
const result = sanitizeMessagesForToolset([], noFilesystemTools)
expect(result).toHaveLength(0)
})
it('handles empty toolset (all tools removed)', () => {
const messages: UIMessage[] = [
makeAssistantMessage([
{ type: 'text', text: 'Working...' },
{
type: 'tool-navigate_page',
toolCallId: 'call-1',
toolName: 'navigate_page',
state: 'result',
input: {},
output: {},
} as unknown as UIMessage['parts'][number],
]),
]
const result = sanitizeMessagesForToolset(messages, new Set())
expect(result).toHaveLength(1)
expect(result[0].parts).toHaveLength(1)
expect(result[0].parts[0].type).toBe('text')
})
})
// ---------------------------------------------------------------------------
// hasMessageContent (existing function, verify edge cases)
// ---------------------------------------------------------------------------
describe('hasMessageContent', () => {
it('rejects messages with empty parts array', () => {
const msg: UIMessage = {
id: '1',
role: 'assistant',
parts: [],
}
expect(hasMessageContent(msg)).toBe(false)
})
it('rejects messages with only whitespace text', () => {
const msg: UIMessage = {
id: '1',
role: 'assistant',
parts: [{ type: 'text', text: ' \n ' }],
}
expect(hasMessageContent(msg)).toBe(false)
})
it('accepts messages with non-text parts', () => {
const msg: UIMessage = {
id: '1',
role: 'assistant',
parts: [
{
type: 'tool-click',
toolCallId: 'call-1',
toolName: 'click',
state: 'result',
input: {},
output: {},
} as unknown as UIMessage['parts'][number],
],
}
expect(hasMessageContent(msg)).toBe(true)
})
})

View File

@@ -1195,120 +1195,3 @@ describe('nudges', () => {
expect(prompt).toContain('at most once')
})
})
// ---------------------------------------------------------------------------
// 15. NEW-TAB ORIGIN
//
// Why: When the user chats from the new-tab page, the active tab IS the chat
// UI. The agent must never navigate or close it. The prompt must adapt its
// execution and tool-selection sections to prohibit origin tab navigation
// and default all lookups to new_page (background).
// ---------------------------------------------------------------------------
describe('new-tab origin', () => {
/** Build a prompt with newtab origin */
function buildNewTab(overrides?: Partial<BuildSystemPromptOptions>): string {
return buildSystemPrompt({
workspaceDir: '/home/user/workspace',
soulContent: 'Be helpful and concise.',
origin: 'newtab',
...overrides,
})
}
// --- Execution section ---
it('includes New-Tab Origin Rules when origin is newtab', () => {
const prompt = buildNewTab()
expect(prompt).toContain('New-Tab Origin Rules')
expect(prompt).toContain('New Tab page')
expect(prompt).toContain('chat UI itself')
})
it('prohibits navigate_page on active tab in newtab mode', () => {
const prompt = buildNewTab()
expect(prompt).toContain('NEVER call `navigate_page` on the active tab')
})
it('prohibits close_page on active tab in newtab mode', () => {
const prompt = buildNewTab()
expect(prompt).toContain('NEVER call `close_page` on the active tab')
})
it('requires new_page for all browsing in newtab mode', () => {
const prompt = buildNewTab()
expect(prompt).toContain(
'For ALL browsing tasks (including single-page lookups), use `new_page`',
)
})
it('does NOT include single-tab navigate_page guidance in newtab mode', () => {
// The sidepanel prompt says "use navigate_page on the current tab" for
// single-page lookups. This must NOT appear in newtab mode.
const prompt = buildNewTab()
expect(prompt).not.toContain(
'For single-page lookups (e.g., "go to X and read Y"), use `navigate_page` on the current tab',
)
})
it('does NOT include "Stay on the current page" in newtab mode', () => {
const prompt = buildNewTab()
expect(prompt).not.toContain(
'Stay on the current page for single-page tasks',
)
})
it('still includes common execution sections in newtab mode', () => {
// Newtab mode should still have multi-tab workflow, observe-act-verify, etc.
const prompt = buildNewTab()
expect(prompt).toContain('Multi-tab workflow')
expect(prompt).toContain('Observe → Act → Verify')
expect(prompt).toContain('Tab retry discipline')
expect(prompt).toContain('CAPTCHA')
})
// --- Sidepanel (default) should NOT have newtab rules ---
it('does NOT include New-Tab Origin Rules in sidepanel mode', () => {
const prompt = buildRegular({ origin: 'sidepanel' })
expect(prompt).not.toContain('New-Tab Origin Rules')
})
it('does NOT include New-Tab Origin Rules when origin is undefined', () => {
const prompt = buildRegular()
expect(prompt).not.toContain('New-Tab Origin Rules')
})
it('includes single-tab navigate_page guidance in sidepanel mode', () => {
const prompt = buildRegular({ origin: 'sidepanel' })
expect(prompt).toContain(
'For single-page lookups (e.g., "go to X and read Y"), use `navigate_page` on the current tab',
)
})
// --- Tool selection section ---
it('tool selection table uses new_page for lookups in newtab mode', () => {
const prompt = buildNewTab()
expect(prompt).toContain(
'`new_page` (background) → extract data → `close_page`',
)
})
it('tool selection includes reminder about active tab in newtab mode', () => {
const prompt = buildNewTab()
expect(prompt).toContain(
'The active tab is the New Tab chat UI. Never navigate or close it.',
)
})
it('tool selection table uses navigate_page for lookups in sidepanel mode', () => {
const prompt = buildRegular({ origin: 'sidepanel' })
expect(prompt).toContain('`navigate_page` on current tab')
})
it('tool selection does NOT have newtab reminder in sidepanel mode', () => {
const prompt = buildRegular({ origin: 'sidepanel' })
expect(prompt).not.toContain('The active tab is the New Tab chat UI')
})
})

View File

@@ -1,270 +0,0 @@
/**
* New-tab origin navigation guards.
*
* When the chat session originates from the new-tab page, navigate_page and
* close_page must reject attempts to act on the origin tab. These are
* integration tests that run against a real browser to verify the guards
* work end-to-end through executeTool.
*/
import { describe, it } from 'bun:test'
import assert from 'node:assert'
import type { ToolContext, ToolDefinition } from '../../src/tools/framework'
import { executeTool } from '../../src/tools/framework'
import { close_page, navigate_page, new_page } from '../../src/tools/navigation'
import type { ToolResult } from '../../src/tools/response'
import { withBrowser } from '../__helpers__/with-browser'
function textOf(result: {
content: { type: string; text?: string }[]
}): string {
return result.content
.filter((c) => c.type === 'text')
.map((c) => c.text)
.join('\n')
}
function structuredOf<T>(result: { structuredContent?: unknown }): T {
assert.ok(result.structuredContent, 'Expected structuredContent')
return result.structuredContent as T
}
describe('new-tab origin navigation guards', () => {
// Helper: execute a tool with newtab session context
function executeWithSession(
ctx: { browser: ToolContext['browser'] },
tool: ToolDefinition,
args: unknown,
session: ToolContext['session'],
): Promise<ToolResult> {
const signal = AbortSignal.timeout(30_000)
return executeTool(
tool,
args,
{
browser: ctx.browser,
directories: { workingDir: process.cwd() },
session,
},
signal,
)
}
// -------------------------------------------------------------------------
// navigate_page guards
// -------------------------------------------------------------------------
it('navigate_page rejects navigation on origin tab in newtab mode', async () => {
await withBrowser(async ({ browser }) => {
// Use a new page as the simulated "origin tab"
const setupResult = await executeTool(
new_page,
{ url: 'about:blank' },
{ browser, directories: { workingDir: process.cwd() } },
AbortSignal.timeout(30_000),
)
const originPageId = structuredOf<{ pageId: number }>(setupResult).pageId
const result = await executeWithSession(
{ browser },
navigate_page,
{ page: originPageId, action: 'url', url: 'https://example.com' },
{ origin: 'newtab', originPageId },
)
assert.ok(result.isError, 'Expected navigate_page to be rejected')
assert.ok(
textOf(result).includes('Cannot navigate the origin tab'),
`Expected origin tab error, got: ${textOf(result)}`,
)
// Cleanup
await executeTool(
close_page,
{ page: originPageId },
{ browser, directories: { workingDir: process.cwd() } },
AbortSignal.timeout(30_000),
)
})
}, 60_000)
it('navigate_page allows navigation on non-origin tab in newtab mode', async () => {
await withBrowser(async ({ browser }) => {
const originResult = await executeTool(
new_page,
{ url: 'about:blank' },
{ browser, directories: { workingDir: process.cwd() } },
AbortSignal.timeout(30_000),
)
const originPageId = structuredOf<{ pageId: number }>(originResult).pageId
// Open a second tab — this is NOT the origin tab
const otherResult = await executeTool(
new_page,
{ url: 'about:blank' },
{ browser, directories: { workingDir: process.cwd() } },
AbortSignal.timeout(30_000),
)
const otherPageId = structuredOf<{ pageId: number }>(otherResult).pageId
const result = await executeWithSession(
{ browser },
navigate_page,
{ page: otherPageId, action: 'url', url: 'https://example.com' },
{ origin: 'newtab', originPageId },
)
assert.ok(
!result.isError,
`Expected success, got error: ${textOf(result)}`,
)
assert.ok(textOf(result).includes('Navigated to'))
// Cleanup
const noSession = { browser, directories: { workingDir: process.cwd() } }
await executeTool(
close_page,
{ page: otherPageId },
noSession,
AbortSignal.timeout(30_000),
)
await executeTool(
close_page,
{ page: originPageId },
noSession,
AbortSignal.timeout(30_000),
)
})
}, 60_000)
it('navigate_page works normally in sidepanel mode', async () => {
await withBrowser(async ({ browser }) => {
const setupResult = await executeTool(
new_page,
{ url: 'about:blank' },
{ browser, directories: { workingDir: process.cwd() } },
AbortSignal.timeout(30_000),
)
const pageId = structuredOf<{ pageId: number }>(setupResult).pageId
const result = await executeWithSession(
{ browser },
navigate_page,
{ page: pageId, action: 'url', url: 'https://example.com' },
{ origin: 'sidepanel', originPageId: pageId },
)
assert.ok(
!result.isError,
`Expected success, got error: ${textOf(result)}`,
)
assert.ok(textOf(result).includes('Navigated to'))
await executeTool(
close_page,
{ page: pageId },
{ browser, directories: { workingDir: process.cwd() } },
AbortSignal.timeout(30_000),
)
})
}, 60_000)
it('navigate_page works when session is undefined (backwards compat)', async () => {
await withBrowser(async ({ browser, execute }) => {
const setupResult = await execute(new_page, { url: 'about:blank' })
const pageId = structuredOf<{ pageId: number }>(setupResult).pageId
// execute() from withBrowser passes no session — simulates old clients
const result = await execute(navigate_page, {
page: pageId,
action: 'url',
url: 'https://example.com',
})
assert.ok(
!result.isError,
`Expected success, got error: ${textOf(result)}`,
)
await execute(close_page, { page: pageId })
})
}, 60_000)
// -------------------------------------------------------------------------
// close_page guards
// -------------------------------------------------------------------------
it('close_page rejects closing origin tab in newtab mode', async () => {
await withBrowser(async ({ browser }) => {
const setupResult = await executeTool(
new_page,
{ url: 'about:blank' },
{ browser, directories: { workingDir: process.cwd() } },
AbortSignal.timeout(30_000),
)
const originPageId = structuredOf<{ pageId: number }>(setupResult).pageId
const result = await executeWithSession(
{ browser },
close_page,
{ page: originPageId },
{ origin: 'newtab', originPageId },
)
assert.ok(result.isError, 'Expected close_page to be rejected')
assert.ok(
textOf(result).includes('Cannot close the origin tab'),
`Expected origin tab error, got: ${textOf(result)}`,
)
// Clean up the page we created (without newtab guard)
await executeTool(
close_page,
{ page: originPageId },
{ browser, directories: { workingDir: process.cwd() } },
AbortSignal.timeout(30_000),
)
})
}, 60_000)
it('close_page allows closing non-origin tab in newtab mode', async () => {
await withBrowser(async ({ browser }) => {
const originResult = await executeTool(
new_page,
{ url: 'about:blank' },
{ browser, directories: { workingDir: process.cwd() } },
AbortSignal.timeout(30_000),
)
const originPageId = structuredOf<{ pageId: number }>(originResult).pageId
const otherResult = await executeTool(
new_page,
{ url: 'about:blank' },
{ browser, directories: { workingDir: process.cwd() } },
AbortSignal.timeout(30_000),
)
const otherPageId = structuredOf<{ pageId: number }>(otherResult).pageId
const result = await executeWithSession(
{ browser },
close_page,
{ page: otherPageId },
{ origin: 'newtab', originPageId },
)
assert.ok(
!result.isError,
`Expected success, got error: ${textOf(result)}`,
)
assert.ok(textOf(result).includes(`Closed page ${otherPageId}`))
// Cleanup origin page
await executeTool(
close_page,
{ page: originPageId },
{ browser, directories: { workingDir: process.cwd() } },
AbortSignal.timeout(30_000),
)
})
}, 60_000)
})

View File

@@ -51,9 +51,9 @@
"@radix-ui/react-use-controllable-state": "^1.2.2",
"@sentry/react": "^10.31.0",
"@sentry/vite-plugin": "^4.6.1",
"@tanstack/query-async-storage-persister": "^5.95.2",
"@tanstack/react-query": "^5.95.2",
"@tanstack/react-query-persist-client": "^5.95.2",
"@tanstack/query-async-storage-persister": "^5.90.21",
"@tanstack/react-query": "^5.90.19",
"@tanstack/react-query-persist-client": "^5.90.21",
"@types/cytoscape": "^3.31.0",
"@types/dompurify": "^3.2.0",
"@webext-core/messaging": "^2.3.0",
@@ -76,8 +76,8 @@
"eventsource-parser": "^3.0.6",
"graphql": "^16.12.0",
"hono": "^4.12.3",
"idb-keyval": "^6.2.2",
"klavis": "^2.15.0",
"localforage": "^1.10.0",
"lucide-react": "^0.562.0",
"motion": "^12.23.24",
"nanoid": "^5.1.6",
@@ -170,7 +170,7 @@
},
"apps/server": {
"name": "@browseros/server",
"version": "0.0.80",
"version": "0.0.79",
"bin": {
"browseros-server": "./src/index.ts",
},
@@ -231,7 +231,7 @@
},
"packages/agent-sdk": {
"name": "@browseros-ai/agent-sdk",
"version": "0.0.7",
"version": "0.0.5",
"dependencies": {
"eventsource-parser": "^3.0.6",
"zod-to-json-schema": "^3.24.1",
@@ -1780,15 +1780,15 @@
"@tailwindcss/vite": ["@tailwindcss/vite@4.1.18", "", { "dependencies": { "@tailwindcss/node": "4.1.18", "@tailwindcss/oxide": "4.1.18", "tailwindcss": "4.1.18" }, "peerDependencies": { "vite": "^5.2.0 || ^6 || ^7" } }, "sha512-jVA+/UpKL1vRLg6Hkao5jldawNmRo7mQYrZtNHMIVpLfLhDml5nMRUo/8MwoX2vNXvnaXNNMedrMfMugAVX1nA=="],
"@tanstack/query-async-storage-persister": ["@tanstack/query-async-storage-persister@5.95.2", "", { "dependencies": { "@tanstack/query-core": "5.95.2", "@tanstack/query-persist-client-core": "5.95.2" } }, "sha512-ZhPIHH8J833OVZhEWwwdOk0uhY94d9Wgdnq97JoQx4Ui4xx4Dh6e7WPUrjlUWo88Yqi4Ij+T1o/VR7Vlbnkbjw=="],
"@tanstack/query-async-storage-persister": ["@tanstack/query-async-storage-persister@5.90.21", "", { "dependencies": { "@tanstack/query-core": "5.90.19", "@tanstack/query-persist-client-core": "5.91.18" } }, "sha512-edpZzybucsMxGiWOMy24io+5l4Lciw4bgv/N2EXQnSp0exS1siTOQbCAQET8jwStCEnaoEiS8ljChnfmnd2pkw=="],
"@tanstack/query-core": ["@tanstack/query-core@5.95.2", "", {}, "sha512-o4T8vZHZET4Bib3jZ/tCW9/7080urD4c+0/AUaYVpIqOsr7y0reBc1oX3ttNaSW5mYyvZHctiQ/UOP2PfdmFEQ=="],
"@tanstack/query-core": ["@tanstack/query-core@5.90.19", "", {}, "sha512-GLW5sjPVIvH491VV1ufddnfldyVB+teCnpPIvweEfkpRx7CfUmUGhoh9cdcUKBh/KwVxk22aNEDxeTsvmyB/WA=="],
"@tanstack/query-persist-client-core": ["@tanstack/query-persist-client-core@5.95.2", "", { "dependencies": { "@tanstack/query-core": "5.95.2" } }, "sha512-Opfj34WZ594YXpEcZEs8WBiyPGrjrKlGILfk/Ss283uwWQ36C5nX3tRY/bBiXmM82KWauUuNvahwGwiyco/8cQ=="],
"@tanstack/query-persist-client-core": ["@tanstack/query-persist-client-core@5.91.18", "", { "dependencies": { "@tanstack/query-core": "5.90.19" } }, "sha512-1FNvccVTFZph07dtA/4p5PRAVKfqVLPPxA8BXUoYjPOZP6T4qY1asItVkUFtUr6kBu48i0DBnEEZQLmK82BIFw=="],
"@tanstack/react-query": ["@tanstack/react-query@5.95.2", "", { "dependencies": { "@tanstack/query-core": "5.95.2" }, "peerDependencies": { "react": "^18 || ^19" } }, "sha512-/wGkvLj/st5Ud1Q76KF1uFxScV7WeqN1slQx5280ycwAyYkIPGaRZAEgHxe3bjirSd5Zpwkj6zNcR4cqYni/ZA=="],
"@tanstack/react-query": ["@tanstack/react-query@5.90.19", "", { "dependencies": { "@tanstack/query-core": "5.90.19" }, "peerDependencies": { "react": "^18 || ^19" } }, "sha512-qTZRZ4QyTzQc+M0IzrbKHxSeISUmRB3RPGmao5bT+sI6ayxSRhn0FXEnT5Hg3as8SBFcRosrXXRFB+yAcxVxJQ=="],
"@tanstack/react-query-persist-client": ["@tanstack/react-query-persist-client@5.95.2", "", { "dependencies": { "@tanstack/query-persist-client-core": "5.95.2" }, "peerDependencies": { "@tanstack/react-query": "^5.95.2", "react": "^18 || ^19" } }, "sha512-i3fvzD8gaLgQyFvRc/+iSUr60aL31tMN+5QM11zdPRg0K9CirIQjHD7WgXFBnD29KJDvcjcv7OrIBaPwZ+H9xw=="],
"@tanstack/react-query-persist-client": ["@tanstack/react-query-persist-client@5.90.21", "", { "dependencies": { "@tanstack/query-persist-client-core": "5.91.18" }, "peerDependencies": { "@tanstack/react-query": "^5.90.19", "react": "^18 || ^19" } }, "sha512-ix9fVeS96QZxaMPRUwf+k6RlNLJxvu0WSjQp9nPiosxRqquxz0tJ5ErMsclZO9Q/jmVhoFm4FKEZ8mfTLBMoiQ=="],
"@theguild/federation-composition": ["@theguild/federation-composition@0.21.3", "", { "dependencies": { "constant-case": "^3.0.4", "debug": "4.4.3", "json5": "^2.2.3", "lodash.sortby": "^4.7.0" }, "peerDependencies": { "graphql": "^16.0.0" } }, "sha512-+LlHTa4UbRpZBog3ggAxjYIFvdfH3UMvvBUptur19TMWkqU4+n3GmN+mDjejU+dyBXIG27c25RsiQP1HyvM99g=="],
@@ -2960,8 +2960,6 @@
"iconv-lite": ["iconv-lite@0.7.2", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw=="],
"idb-keyval": ["idb-keyval@6.2.2", "", {}, "sha512-yjD9nARJ/jb1g+CvD0tlhUHOrJ9Sy0P8T9MF3YaLlHnSRpwPfpTX0XIvpmw3gAJUmEu3FiICLBDPXVwyEvrleg=="],
"ieee754": ["ieee754@1.2.1", "", {}, "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA=="],
"ignore": ["ignore@7.0.5", "", {}, "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg=="],
@@ -3188,7 +3186,7 @@
"lib0": ["lib0@0.2.117", "", { "dependencies": { "isomorphic.js": "^0.2.4" }, "bin": { "0serve": "bin/0serve.js", "0gentesthtml": "bin/gentesthtml.js", "0ecdsa-generate-keypair": "bin/0ecdsa-generate-keypair.js" } }, "sha512-DeXj9X5xDCjgKLU/7RR+/HQEVzuuEUiwldwOGsHK/sfAfELGWEyTcf0x+uOvCvK3O2zPmZePXWL85vtia6GyZw=="],
"lie": ["lie@3.3.0", "", { "dependencies": { "immediate": "~3.0.5" } }, "sha512-UaiMJzeWRlEujzAuw5LokY1L5ecNQYZKfmyZ9L7wDHb/p5etKaxXhohBcrw0EYby+G/NA52vRSN4N39dxHAIwQ=="],
"lie": ["lie@3.1.1", "", { "dependencies": { "immediate": "~3.0.5" } }, "sha512-RiNhHysUjhrDQntfYSfY4MU24coXXdEOgw9WGcKHNeEwffDYbF//u87M1EWaMGzuFoSbqW0C9C6lEEhDOAswfw=="],
"lighthouse-logger": ["lighthouse-logger@2.0.2", "", { "dependencies": { "debug": "^4.4.1", "marky": "^1.2.2" } }, "sha512-vWl2+u5jgOQuZR55Z1WM0XDdrJT6mzMP8zHUct7xTlWhuQs+eV0g+QL0RQdFjT54zVmbhLCP8vIVpy1wGn/gCg=="],
@@ -3234,6 +3232,8 @@
"local-pkg": ["local-pkg@1.1.2", "", { "dependencies": { "mlly": "^1.7.4", "pkg-types": "^2.3.0", "quansync": "^0.2.11" } }, "sha512-arhlxbFRmoQHl33a0Zkle/YWlmNwoyt6QNZEIJcqNbdrsix5Lvc4HyyI3EnwxTYlZYc32EbYrQ8SzEZ7dqgg9A=="],
"localforage": ["localforage@1.10.0", "", { "dependencies": { "lie": "3.1.1" } }, "sha512-14/H1aX7hzBBmmh7sGPd+AOMkkIrHM3Z1PAyGgZigA1H1p5O5ANnMyWzvpAETtG68/dC4pC0ncy3+PPGzXZHPg=="],
"locate-path": ["locate-path@6.0.0", "", { "dependencies": { "p-locate": "^5.0.0" } }, "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw=="],
"lodash": ["lodash@4.17.23", "", {}, "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w=="],
@@ -4552,7 +4552,7 @@
"@better-auth/core/zod": ["zod@4.3.5", "", {}, "sha512-k7Nwx6vuWx1IJ9Bjuf4Zt1PEllcwe7cls3VNzm4CQ1/hgtFUK2bRNG3rvnpPUhFjmqJKAKtjV576KnUkHocg/g=="],
"@browseros/agent/@types/bun": ["@types/bun@1.3.11", "", { "dependencies": { "bun-types": "1.3.11" } }, "sha512-5vPne5QvtpjGpsGYXiFyycfpDF2ECyPcTSsFBMa0fraoxiQyMJ3SmuQIGhzPg2WJuWxVBoxWJ2kClYTcw/4fAg=="],
"@browseros/agent/@types/bun": ["@types/bun@1.3.10", "", { "dependencies": { "bun-types": "1.3.10" } }, "sha512-0+rlrUrOrTSskibryHbvQkDOWRJwJZqZlxrUs1u4oOoTln8+WIXBPmAuCF35SWB2z4Zl3E84Nl/D0P7803nigQ=="],
"@browseros/agent/zod": ["zod@4.3.5", "", {}, "sha512-k7Nwx6vuWx1IJ9Bjuf4Zt1PEllcwe7cls3VNzm4CQ1/hgtFUK2bRNG3rvnpPUhFjmqJKAKtjV576KnUkHocg/g=="],
@@ -5104,6 +5104,8 @@
"jest-worker/supports-color": ["supports-color@8.1.1", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q=="],
"jszip/lie": ["lie@3.3.0", "", { "dependencies": { "immediate": "~3.0.5" } }, "sha512-UaiMJzeWRlEujzAuw5LokY1L5ecNQYZKfmyZ9L7wDHb/p5etKaxXhohBcrw0EYby+G/NA52vRSN4N39dxHAIwQ=="],
"jszip/readable-stream": ["readable-stream@2.3.8", "", { "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", "isarray": "~1.0.0", "process-nextick-args": "~2.0.0", "safe-buffer": "~5.1.1", "string_decoder": "~1.1.1", "util-deprecate": "~1.0.1" } }, "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA=="],
"jwa/safe-buffer": ["safe-buffer@5.2.1", "", {}, "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="],
@@ -5304,7 +5306,7 @@
"@aws-crypto/util/@smithy/util-utf8/@smithy/util-buffer-from": ["@smithy/util-buffer-from@2.2.0", "", { "dependencies": { "@smithy/is-array-buffer": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA=="],
"@browseros/agent/@types/bun/bun-types": ["bun-types@1.3.11", "", { "dependencies": { "@types/node": "*" } }, "sha512-1KGPpoxQWl9f6wcZh57LvrPIInQMn2TQ7jsgxqpRzg+l0QPOFvJVH7HmvHo/AiPgwXy+/Thf6Ov3EdVn1vOabg=="],
"@browseros/agent/@types/bun/bun-types": ["bun-types@1.3.10", "", { "dependencies": { "@types/node": "*" } }, "sha512-tcpfCCl6XWo6nCVnpcVrxQ+9AYN1iqMIzgrSKYMB/fjLtV2eyAVEg7AxQJuCq/26R6HpKWykQXuSOq/21RYcbg=="],
"@browseros/eval/@aws-sdk/client-s3/@aws-sdk/core": ["@aws-sdk/core@3.973.23", "", { "dependencies": { "@aws-sdk/types": "^3.973.6", "@aws-sdk/xml-builder": "^3.972.15", "@smithy/core": "^3.23.12", "@smithy/node-config-provider": "^4.3.12", "@smithy/property-provider": "^4.2.12", "@smithy/protocol-http": "^5.3.12", "@smithy/signature-v4": "^5.3.12", "@smithy/smithy-client": "^4.12.7", "@smithy/types": "^4.13.1", "@smithy/util-base64": "^4.3.2", "@smithy/util-middleware": "^4.2.12", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-aoJncvD1XvloZ9JLnKqTRL9dBy+Szkryoag9VT+V1TqsuUgIxV9cnBVM/hrDi2vE8bDqLiDR8nirdRcCdtJu0w=="],

View File

@@ -19,9 +19,7 @@
"start:agent": "bun ./scripts/build/controller-ext.ts && bun run --filter @browseros/agent dev",
"build": "bun run build:server && bun run build:agent && bun run build:ext",
"build:server": "FORCE_COLOR=1 bun scripts/build/server.ts --target=all",
"build:server:ci": "FORCE_COLOR=1 bun scripts/build/server.ts --target=all --compile-only",
"build:server:test": "FORCE_COLOR=1 bun scripts/build/server.ts --target=darwin-arm64 --no-upload",
"upload:cli-installers": "bun scripts/build/cli.ts",
"start:server:test": "bun run build:server:test && set -a && . apps/server/.env.development && set +a && dist/prod/server/.tmp/binaries/browseros-server-darwin-arm64",
"build:agent:dev": "FORCE_COLOR=1 bun run --filter @browseros/agent --elide-lines=0 build:dev",
"build:agent": "bun run codegen:agent && bun run --filter @browseros/agent build",
@@ -36,7 +34,6 @@
"lint": "bunx biome check",
"lint:fix": "bunx biome check --write --unsafe",
"gen:cdp": "bun scripts/codegen/cdp-protocol.ts",
"generate:models": "bun scripts/generate-models.ts",
"clean": "rimraf dist"
},
"repository": "browseros-ai/BrowserOS-server",

View File

@@ -1,12 +0,0 @@
# @browseros-ai/agent-sdk
## v0.0.7 (2026-03-26)
## What's Changed
- chore: bump @browseros-ai/agent-sdk to 0.0.7 (#569) (#569) @DaniAkash
## Contributors
- @DaniAkash

View File

@@ -1,17 +1,7 @@
# @browseros-ai/agent-sdk
[![npm version](https://img.shields.io/npm/v/@browseros-ai/agent-sdk)](https://www.npmjs.com/package/@browseros-ai/agent-sdk)
[![License: AGPL v3](https://img.shields.io/badge/License-AGPL%20v3-blue.svg)](../../../../LICENSE)
Browser automation SDK for BrowserOS — navigate, interact, extract data, and verify page state using natural language.
Build automations that describe *what* to do, not *how* to do it. The SDK connects to a running BrowserOS instance and translates natural language instructions into browser actions using your choice of LLM provider.
## Prerequisites
- A running [BrowserOS](https://browseros.com) instance
- An API key for at least one [supported LLM provider](#llm-providers)
## Installation
```bash
@@ -27,7 +17,7 @@ import { Agent } from '@browseros-ai/agent-sdk'
import { z } from 'zod'
const agent = new Agent({
url: 'http://localhost:9100',
url: 'http://localhost:3000',
llm: {
provider: 'openai',
apiKey: process.env.OPENAI_API_KEY,
@@ -52,40 +42,6 @@ const { data } = await agent.extract('get all product names and prices', {
const { success, reason } = await agent.verify('user is logged in')
```
## Multi-Step Example
Combine navigation, actions, extraction, and verification for end-to-end automation:
```typescript
import { Agent } from '@browseros-ai/agent-sdk'
import { z } from 'zod'
const agent = new Agent({
url: 'http://localhost:9100',
llm: { provider: 'anthropic', apiKey: process.env.ANTHROPIC_API_KEY },
})
// 1. Navigate
await agent.nav('https://news.ycombinator.com')
// 2. Extract data
const { data: stories } = await agent.extract('get the top 5 stories with title, points, and link', {
schema: z.array(z.object({
title: z.string(),
points: z.number(),
link: z.string(),
})),
})
// 3. Act on extracted data
await agent.act(`click on the story titled "${stories[0].title}"`)
// 4. Verify the result
const { success } = await agent.verify('the story page or external link has loaded')
console.log({ stories, navigationSuccess: success })
```
## API Reference
### `new Agent(options)`
@@ -149,6 +105,8 @@ const { success, reason } = await agent.verify('the form was submitted successfu
## LLM Providers
Supported providers:
| Provider | Config |
|----------|--------|
| OpenAI | `{ provider: 'openai', apiKey: '...' }` |
@@ -163,11 +121,11 @@ const { success, reason } = await agent.verify('the form was submitted successfu
## Progress Events
Track agent operations in real time:
Track agent operations:
```typescript
const agent = new Agent({
url: 'http://localhost:9100',
url: 'http://localhost:3000',
onProgress: (event) => {
console.log(`[${event.type}] ${event.message}`)
},
@@ -196,13 +154,6 @@ try {
}
```
## Links
- [Documentation](https://docs.browseros.com)
- [GitHub](https://github.com/browseros-ai/BrowserOS)
- [Changelog](./CHANGELOG.md)
- [Discord](https://discord.gg/YKwjt5vuKr)
## License
[AGPL-3.0-or-later](../../../../LICENSE)
AGPL-3.0-or-later

View File

@@ -1,6 +1,6 @@
{
"name": "@browseros-ai/agent-sdk",
"version": "0.0.7",
"version": "0.0.5",
"description": "Browser automation SDK for BrowserOS - navigate, interact, extract data with natural language",
"type": "module",
"license": "AGPL-3.0-or-later",

View File

@@ -1,67 +0,0 @@
# @browseros/cdp-protocol
Type-safe Chrome DevTools Protocol bindings for BrowserOS.
> **Internal package** — auto-generated TypeScript types and API wrappers for all CDP domains. Used by `@browseros/server` to communicate with Chromium.
## Usage
Import domain types or domain API wrappers using subpath exports:
```typescript
// Import type definitions for a CDP domain
import type { NavigateParams, NavigateReturn } from '@browseros/cdp-protocol/domains/page'
// Import the API wrapper for a domain
import { PageAPI } from '@browseros/cdp-protocol/domain-apis/page'
// Core protocol API
import { ProtocolAPI } from '@browseros/cdp-protocol/protocol-api'
// Factory function
import { createAPI } from '@browseros/cdp-protocol/create-api'
```
## Supported Domains
All standard Chrome DevTools Protocol domains are supported:
| Category | Domains |
|----------|---------|
| **Page & DOM** | Page, DOM, DOMDebugger, DOMSnapshot, DOMStorage, CSS, Overlay |
| **Network** | Network, Fetch, IO, ServiceWorker, CacheStorage |
| **Input & Interaction** | Input, Emulation, DeviceOrientation, DeviceAccess |
| **JavaScript** | Runtime, Debugger, Console, Profiler, HeapProfiler |
| **Browser** | Browser, Target, Inspector, Extensions, PWA |
| **Performance** | Performance, PerformanceTimeline, Tracing, Memory |
| **Media** | Media, WebAudio, Cast |
| **Security** | Security, WebAuthn, FedCm |
| **Storage** | IndexedDB, Storage, FileSystem |
| **Other** | Accessibility, Animation, Audits, Autofill, BackgroundService, BluetoothEmulation, EventBreakpoints, HeadlessExperimental, LayerTree, Log, Preload, Schema, SystemInfo, Tethering |
| **BrowserOS Custom** | Bookmarks, History |
## Structure
```
src/generated/
├── domains/ # Type definitions for each CDP domain
│ ├── page.ts
│ ├── dom.ts
│ ├── network.ts
│ └── ...
├── domain-apis/ # API wrapper classes for each domain
│ ├── page.ts
│ ├── dom.ts
│ ├── network.ts
│ └── ...
├── protocol-api.ts # Unified protocol API
└── create-api.ts # API factory
```
## Regenerating Types
Types are auto-generated from the CDP protocol specification. The generated output lives in `src/generated/` and should not be edited manually.
## License
[AGPL-3.0-or-later](../../../../LICENSE)

View File

@@ -1,47 +0,0 @@
#!/usr/bin/env bun
import { Command } from 'commander'
import { runCliInstallerUpload, runCliRelease } from './cli/upload'
const program = new Command('cli-upload')
.description('Upload BrowserOS CLI artifacts to CDN')
.option(
'--release',
'Upload full release (binaries + installers + version.txt)',
)
.option('--version <version>', 'Release version (required with --release)')
.option(
'--binaries-dir <dir>',
'Directory containing built archives (required with --release)',
)
.parse()
const opts = program.opts<{
release?: boolean
version?: string
binariesDir?: string
}>()
async function main(): Promise<void> {
if (opts.release) {
if (!opts.version) {
throw new Error('--version is required with --release')
}
if (!opts.binariesDir) {
throw new Error('--binaries-dir is required with --release')
}
await runCliRelease({
version: opts.version,
binariesDir: opts.binariesDir,
})
} else {
await runCliInstallerUpload()
}
}
main().catch((error) => {
const message = error instanceof Error ? error.message : String(error)
console.error(`\n✗ ${message}\n`)
process.exit(1)
})

View File

@@ -1,44 +0,0 @@
import { existsSync, readFileSync } from 'node:fs'
import { join } from 'node:path'
import { parse } from 'dotenv'
import type { R2Config } from '../server/types'
const PROD_ENV_PATH = join('apps', 'cli', '.env.production')
function pickEnv(name: string, fileEnv: Record<string, string>): string {
const value = process.env[name] ?? fileEnv[name]
if (!value || value.trim().length === 0) {
throw new Error(`Missing required environment variable: ${name}`)
}
return value
}
function loadProdEnv(rootDir: string): Record<string, string> {
const prodEnvPath = join(rootDir, PROD_ENV_PATH)
if (!existsSync(prodEnvPath)) {
// In CI, credentials come from process.env — no .env file needed
return {}
}
return parse(readFileSync(prodEnvPath, 'utf-8'))
}
export interface CliUploadConfig {
r2: R2Config
}
export function loadCliUploadConfig(rootDir: string): CliUploadConfig {
const fileEnv = loadProdEnv(rootDir)
return {
r2: {
accountId: pickEnv('R2_ACCOUNT_ID', fileEnv),
accessKeyId: pickEnv('R2_ACCESS_KEY_ID', fileEnv),
secretAccessKey: pickEnv('R2_SECRET_ACCESS_KEY', fileEnv),
bucket: pickEnv('R2_BUCKET', fileEnv),
downloadPrefix: '',
uploadPrefix:
process.env.R2_UPLOAD_PREFIX ?? fileEnv.R2_UPLOAD_PREFIX ?? 'cli',
},
}
}

View File

@@ -1,128 +0,0 @@
import { existsSync, readdirSync } from 'node:fs'
import { writeFile } from 'node:fs/promises'
import { tmpdir } from 'node:os'
import { dirname, join, resolve } from 'node:path'
import { fileURLToPath } from 'node:url'
import { log } from '../log'
import { createR2Client, joinObjectKey, uploadFileToObject } from '../server/r2'
import { loadCliUploadConfig } from './config'
const CDN_BASE_URL = 'https://cdn.browseros.com'
const INSTALLERS = [
{
filePath: join('apps', 'cli', 'scripts', 'install.sh'),
objectName: 'install.sh',
contentType: 'text/x-shellscript; charset=utf-8',
},
{
filePath: join('apps', 'cli', 'scripts', 'install.ps1'),
objectName: 'install.ps1',
contentType: 'text/plain; charset=utf-8',
},
] as const
export interface CliReleaseOptions {
version: string
binariesDir: string
}
function resolveRootDir(): string {
const rootDir = resolve(dirname(fileURLToPath(import.meta.url)), '../../..')
process.chdir(rootDir)
return rootDir
}
export async function runCliInstallerUpload(): Promise<void> {
await uploadCliInstallers(resolveRootDir())
}
export async function runCliRelease(options: CliReleaseOptions): Promise<void> {
await uploadCliRelease(resolveRootDir(), options)
}
export async function uploadCliInstallers(rootDir: string): Promise<void> {
const { r2 } = loadCliUploadConfig(rootDir)
const client = createR2Client(r2)
log.header('Uploading BrowserOS CLI installer scripts')
try {
for (const installer of INSTALLERS) {
const absolutePath = join(rootDir, installer.filePath)
if (!existsSync(absolutePath)) {
throw new Error(`Installer script not found: ${installer.filePath}`)
}
const objectKey = joinObjectKey(r2.uploadPrefix, installer.objectName)
log.step(`Uploading ${installer.filePath}`)
await uploadFileToObject(client, r2, objectKey, absolutePath, {
contentType: installer.contentType,
})
log.success(`Uploaded ${objectKey}`)
log.info(`${CDN_BASE_URL}/${objectKey}`)
}
log.done('CLI installer upload completed')
} finally {
client.destroy()
}
}
async function uploadCliRelease(
rootDir: string,
options: CliReleaseOptions,
): Promise<void> {
const { version, binariesDir } = options
const absoluteBinariesDir = resolve(rootDir, binariesDir)
if (!existsSync(absoluteBinariesDir)) {
throw new Error(`Binaries directory not found: ${binariesDir}`)
}
const archives = readdirSync(absoluteBinariesDir).filter(
(f) => f.endsWith('.tar.gz') || f.endsWith('.zip') || f === 'checksums.txt',
)
if (archives.length === 0) {
throw new Error(`No archives found in ${binariesDir}`)
}
const { r2 } = loadCliUploadConfig(rootDir)
const client = createR2Client(r2)
log.header(`Uploading BrowserOS CLI v${version} release`)
try {
for (const filename of archives) {
const filePath = join(absoluteBinariesDir, filename)
const versionedKey = joinObjectKey(
r2.uploadPrefix,
`v${version}`,
filename,
)
const latestKey = joinObjectKey(r2.uploadPrefix, 'latest', filename)
log.step(`Uploading ${filename}`)
await uploadFileToObject(client, r2, versionedKey, filePath)
await uploadFileToObject(client, r2, latestKey, filePath)
log.success(`Uploaded ${filename}`)
log.info(`${CDN_BASE_URL}/${versionedKey}`)
}
const versionTxtPath = join(tmpdir(), 'browseros-cli-version.txt')
await writeFile(versionTxtPath, version, 'utf-8')
const versionKey = joinObjectKey(r2.uploadPrefix, 'latest', 'version.txt')
await uploadFileToObject(client, r2, versionKey, versionTxtPath, {
contentType: 'text/plain; charset=utf-8',
})
log.success(`Uploaded ${versionKey}`)
log.info(`${CDN_BASE_URL}/${versionKey}`)
log.done('CLI binary upload completed')
} finally {
client.destroy()
}
await uploadCliInstallers(rootDir)
}

View File

@@ -21,24 +21,16 @@ export function parseBuildArgs(argv: string[]): BuildArgs {
)
.option('--upload', 'Upload artifact zips to R2')
.option('--no-upload', 'Skip zip upload to R2')
.option(
'--compile-only',
'Compile binaries only (skip R2 staging and upload)',
)
program.parse(argv, { from: 'user' })
const options = program.opts<{
target: string
manifest: string
upload: boolean
compileOnly: boolean
}>()
const compileOnly = options.compileOnly ?? false
return {
targets: resolveTargets(options.target),
manifestPath: options.manifest,
upload: compileOnly ? false : (options.upload ?? true),
compileOnly,
upload: options.upload ?? true,
}
}

View File

@@ -74,14 +74,7 @@ function validateProductionEnv(envVars: Record<string, string>): void {
}
}
export interface LoadBuildConfigOptions {
compileOnly?: boolean
}
export function loadBuildConfig(
rootDir: string,
options: LoadBuildConfigOptions = {},
): BuildConfig {
export function loadBuildConfig(rootDir: string): BuildConfig {
const fileEnv = loadProdEnv(rootDir)
const envVars = buildInlineEnv(fileEnv)
validateProductionEnv(envVars)
@@ -92,10 +85,6 @@ export function loadBuildConfig(
...process.env,
}
if (options.compileOnly) {
return { version: readServerVersion(rootDir), envVars, processEnv }
}
return {
version: readServerVersion(rootDir),
envVars,

View File

@@ -15,14 +15,19 @@ export async function runProdResourceBuild(argv: string[]): Promise<void> {
process.chdir(rootDir)
const args = parseBuildArgs(argv)
const manifestPath = resolve(rootDir, args.manifestPath)
if (!existsSync(manifestPath)) {
throw new Error(`Manifest not found: ${manifestPath}`)
}
const buildConfig = loadBuildConfig(rootDir, {
compileOnly: args.compileOnly,
})
const buildConfig = loadBuildConfig(rootDir)
const manifest = loadManifest(manifestPath)
const distRoot = getDistProdRoot()
log.header(`Building BrowserOS server artifacts v${buildConfig.version}`)
log.info(`Targets: ${args.targets.map((target) => target.id).join(', ')}`)
log.info(`Mode: ${args.compileOnly ? 'compile-only' : 'full'}`)
log.info(`Manifest: ${manifestPath}`)
log.info(`Upload: ${args.upload ? 'enabled' : 'disabled'}`)
const compiled = await compileServerBinaries(
args.targets,
@@ -31,26 +36,7 @@ export async function runProdResourceBuild(argv: string[]): Promise<void> {
buildConfig.version,
)
if (args.compileOnly) {
log.done('Compile-only build completed')
for (const binary of compiled) {
log.info(`${binary.target.id}: ${binary.binaryPath}`)
}
return
}
const manifestPath = resolve(rootDir, args.manifestPath)
if (!existsSync(manifestPath)) {
throw new Error(`Manifest not found: ${manifestPath}`)
}
const manifest = loadManifest(manifestPath)
const distRoot = getDistProdRoot()
const r2 = buildConfig.r2
if (!r2) {
throw new Error('R2 configuration is required for full builds')
}
const client = createR2Client(r2)
const client = createR2Client(buildConfig.r2)
const stagedArtifacts = []
try {
@@ -65,7 +51,7 @@ export async function runProdResourceBuild(argv: string[]): Promise<void> {
binary.target,
rules,
client,
r2,
buildConfig.r2,
buildConfig.version,
)
stagedArtifacts.push(staged)
@@ -76,7 +62,7 @@ export async function runProdResourceBuild(argv: string[]): Promise<void> {
stagedArtifacts,
buildConfig.version,
client,
r2,
buildConfig.r2,
args.upload,
)

View File

@@ -10,10 +10,6 @@ import {
import type { R2Config } from './types'
export interface UploadFileOptions {
contentType?: string
}
function createClientConfig(r2: R2Config): S3ClientConfig {
return {
region: 'auto',
@@ -85,7 +81,6 @@ export async function uploadFileToObject(
r2: R2Config,
key: string,
filePath: string,
options: UploadFileOptions = {},
): Promise<void> {
const data = await readFile(filePath)
await client.send(
@@ -93,7 +88,7 @@ export async function uploadFileToObject(
Bucket: r2.bucket,
Key: key,
Body: data,
ContentType: options.contentType ?? 'application/zip',
ContentType: 'application/zip',
}),
)
}

View File

@@ -21,7 +21,6 @@ export interface BuildArgs {
targets: BuildTarget[]
manifestPath: string
upload: boolean
compileOnly: boolean
}
export interface R2Config {
@@ -37,7 +36,7 @@ export interface BuildConfig {
version: string
envVars: Record<string, string>
processEnv: NodeJS.ProcessEnv
r2?: R2Config
r2: R2Config
}
export interface ResourceSource {

View File

@@ -1,145 +0,0 @@
/**
* Fetches models.dev/api.json and generates a compact models data file
* for BrowserOS. Run: bun scripts/generate-models.ts
*/
const API_URL = 'https://models.dev/api.json'
const OUTPUT_PATH = new URL(
'../apps/agent/lib/llm-providers/models-dev-data.json',
import.meta.url,
).pathname
interface ModelsDevModel {
id: string
name: string
family?: string
attachment: boolean
reasoning: boolean
tool_call: boolean
structured_output?: boolean
modalities: { input: string[]; output: string[] }
cost?: {
input: number
output: number
cache_read?: number
cache_write?: number
}
limit: { context: number; output: number; input?: number }
status?: string
release_date: string
last_updated: string
}
interface ModelsDevProvider {
id: string
name: string
npm: string
api?: string
doc: string
env: string[]
models: Record<string, ModelsDevModel>
}
interface OutputModel {
id: string
name: string
contextWindow: number
maxOutput: number
supportsImages: boolean
supportsReasoning: boolean
supportsToolCall: boolean
inputCost?: number
outputCost?: number
}
interface OutputProvider {
name: string
api?: string
doc: string
models: OutputModel[]
}
// models.dev ID → BrowserOS provider ID
const PROVIDER_MAP: Record<string, string> = {
anthropic: 'anthropic',
openai: 'openai',
google: 'google',
openrouter: 'openrouter',
azure: 'azure',
'amazon-bedrock': 'bedrock',
lmstudio: 'lmstudio',
moonshotai: 'moonshot',
'github-copilot': 'github-copilot',
}
function transformModel(model: ModelsDevModel): OutputModel | null {
if (model.status === 'deprecated') return null
const supportsImages =
model.attachment || model.modalities.input.includes('image')
return {
id: model.id,
name: model.name,
contextWindow: model.limit.context,
maxOutput: model.limit.output,
supportsImages,
supportsReasoning: model.reasoning,
supportsToolCall: model.tool_call,
...(model.cost && {
inputCost: model.cost.input,
outputCost: model.cost.output,
}),
}
}
async function main() {
console.log(`Fetching ${API_URL}...`)
const response = await fetch(API_URL)
if (!response.ok) throw new Error(`Failed to fetch: ${response.status}`)
const data: Record<string, ModelsDevProvider> = await response.json()
console.log(`Fetched ${Object.keys(data).length} providers`)
const output: Record<string, OutputProvider> = {}
for (const [modelsDevId, browserosId] of Object.entries(PROVIDER_MAP)) {
const provider = data[modelsDevId]
if (!provider) {
console.warn(`Provider not found in models.dev: ${modelsDevId}`)
continue
}
const models = Object.values(provider.models)
.map(transformModel)
.filter((m): m is OutputModel => m !== null)
.sort((a, b) => {
const dateA = provider.models[a.id]?.last_updated ?? ''
const dateB = provider.models[b.id]?.last_updated ?? ''
return dateB.localeCompare(dateA)
})
output[browserosId] = {
name: provider.name,
...(provider.api && { api: provider.api }),
doc: provider.doc,
models,
}
}
const totalModels = Object.values(output).reduce(
(sum, p) => sum + p.models.length,
0,
)
console.log(
`Generated ${Object.keys(output).length} providers with ${totalModels} models`,
)
await Bun.write(OUTPUT_PATH, JSON.stringify(output, null, 2))
console.log(`Written to ${OUTPUT_PATH}`)
}
main().catch((err) => {
console.error(err)
process.exit(1)
})

View File

@@ -1,133 +0,0 @@
# BrowserOS Browser (Chromium Fork)
Custom Chromium build with AI agent integration, enhanced privacy patches, and native MCP support.
> Based on **Chromium 146.0.7680.31** · Built with Python 3.12+ · Licensed under [AGPL-3.0](../../LICENSE)
## What This Is
This package contains the BrowserOS browser build system — everything needed to fetch Chromium source, apply BrowserOS patches, and produce signed binaries for macOS, Windows, and Linux. The build system is a Python CLI that orchestrates the entire pipeline from source to distributable.
BrowserOS patches add:
- Native AI agent sidebar and new tab integration
- MCP server endpoints baked into the browser
- Enhanced privacy via [ungoogled-chromium](https://github.com/ungoogled-software/ungoogled-chromium) patches
- Custom branding, icons, and entitlements
- Keychain access group management (macOS)
- Sparkle auto-update framework (macOS)
## Prerequisites
| Requirement | Details |
|-------------|---------|
| **Disk space** | ~100 GB for Chromium source + build artifacts |
| **Python** | 3.12+ |
| **macOS** | Xcode + Command Line Tools |
| **Linux** | `build-essential`, `clang`, `lld`, and Chromium's [Linux deps](https://chromium.googlesource.com/chromium/src/+/main/docs/linux/build_instructions.md) |
| **Windows** | Visual Studio 2022, Windows SDK |
## Directory Structure
```
packages/browseros/
├── build/ # Build system (Python CLI)
│ ├── __main__.py # CLI entry point
│ ├── browseros.py # Main app definition
│ ├── modules/
│ │ ├── setup/ # Chromium source fetch and setup
│ │ ├── patches/ # Patch application logic
│ │ ├── apply/ # Apply patches to source tree
│ │ ├── extract/ # Extract patches from modified source
│ │ ├── feature/ # Feature flag management
│ │ ├── package/ # Binary packaging
│ │ ├── sign/ # Code signing (macOS, Windows)
│ │ ├── ota/ # Over-the-air update support
│ │ └── resources/ # Resource management
│ ├── config/ # Build configuration
│ └── features.yaml # Feature flag definitions
├── chromium_patches/ # BrowserOS patches applied to Chromium source
│ ├── chrome/browser/ # Browser UI and feature patches
│ ├── components/ # Component patches (e.g., os_crypt)
│ └── ... # Organized to mirror Chromium source tree
├── chromium_files/ # New files added to Chromium (not patches)
├── series_patches/ # Ordered patch series
├── resources/ # Icons, entitlements, signing resources
│ └── entitlements/ # macOS entitlements (app, helper, GPU, etc.)
├── tools/
│ └── bdev # Developer tool
├── CHROMIUM_VERSION # Pinned Chromium version (MAJOR.MINOR.BUILD.PATCH)
├── BASE_COMMIT # Base Chromium commit hash
├── pyproject.toml # Python project config
└── requirements.txt # Python dependencies
```
## Build System
The `browseros` CLI manages the full build lifecycle:
```bash
# Install the build system
pip install -e .
# Or use uv
uv pip install -e .
```
Key commands:
```bash
browseros setup # Fetch and prepare Chromium source
browseros apply # Apply all patches to Chromium source
browseros build # Build BrowserOS binary
browseros package # Package into distributable (DMG, installer, AppImage)
browseros sign # Code sign the binary (macOS/Windows)
```
## Patch System
BrowserOS applies patches on top of vanilla Chromium. Patches are organized in two directories:
- **`chromium_patches/`** — Individual file patches, organized to mirror the Chromium source tree. Each file here replaces or modifies the corresponding file in Chromium.
- **`series_patches/`** — Ordered patch series applied sequentially.
### Adding a New Patch
1. Make your changes in the Chromium source tree
2. Use `browseros extract` to pull changes back into patch format
3. Place the patch in the appropriate directory mirroring Chromium's structure
4. Test with a full `browseros apply && browseros build` cycle
### Chromium Version Pinning
The exact Chromium version is pinned in `CHROMIUM_VERSION`:
```
MAJOR=146
MINOR=0
BUILD=7680
PATCH=31
```
To update the base Chromium version, update this file and `BASE_COMMIT`, then resolve any patch conflicts.
## Signing (macOS)
macOS builds require code signing for Keychain access, Gatekeeper, and notarization:
- Entitlements are in `resources/entitlements/` (app, helper, GPU, renderer, etc.)
- Designated requirements pin to Team ID for Keychain persistence across updates
- The signing module is at `build/modules/sign/macos.py`
## Feature Flags
Feature flags are defined in `features.yaml` and control which BrowserOS-specific features are compiled into the build. The feature module (`build/modules/feature/`) manages flag resolution at build time.
## Related Resources
- [Chromium Build Instructions](https://chromium.googlesource.com/chromium/src/+/main/docs/linux/build_instructions.md)
- [ungoogled-chromium](https://github.com/ungoogled-software/ungoogled-chromium) — upstream privacy patches
- [BrowserOS Agent Platform](../browseros-agent/) — the TypeScript/Go agent system that runs inside the browser

View File

@@ -0,0 +1,5 @@
bros
bros-linux-amd64
bros-linux-arm64
bros-darwin-amd64
bros-darwin-arm64

View File

@@ -1,27 +1,27 @@
BINARY := bdev
PREFIX ?= /usr/local/bin
VERSION ?= dev
VERSION := $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev")
LDFLAGS := -ldflags "-X main.version=$(VERSION)"
.PHONY: build install clean test fmt
.PHONY: build install clean test
build:
go build -ldflags "-X github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/cmd.Version=$(VERSION)" -o $(BINARY) .
go build $(LDFLAGS) -o $(BINARY) .
install: build
mkdir -p $(PREFIX)
cp $(BINARY) $(PREFIX)/$(BINARY)
ifneq ($(shell uname -s),Darwin)
@echo "Skipping codesign on non-macOS host"
else
codesign --force --sign - $(PREFIX)/$(BINARY)
endif
@echo "Installed $(BINARY) to $(PREFIX)/$(BINARY)"
install:
go install $(LDFLAGS) .
clean:
rm -f $(BINARY)
test:
go test ./...
fmt:
gofmt -w $$(find . -name '*.go' -not -path './vendor/*')
build-linux:
GOOS=linux GOARCH=amd64 go build $(LDFLAGS) -o $(BINARY)-linux-amd64 .
clean:
rm -f $(BINARY)
build-linux-arm:
GOOS=linux GOARCH=arm64 go build $(LDFLAGS) -o $(BINARY)-linux-arm64 .
build-darwin:
GOOS=darwin GOARCH=amd64 go build $(LDFLAGS) -o $(BINARY)-darwin-amd64 .
GOOS=darwin GOARCH=arm64 go build $(LDFLAGS) -o $(BINARY)-darwin-arm64 .

View File

@@ -1,32 +0,0 @@
package cmd
import (
"fmt"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/engine"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/resolve"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/ui"
"github.com/spf13/cobra"
)
func init() {
command := &cobra.Command{
Use: "abort",
Annotations: map[string]string{"group": "Conflict:"},
Short: "Abort conflict resolution and roll the pending files back",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
ws, err := resolve.FindActive(appState.Registry, appState.CWD)
if err != nil {
return err
}
if err := engine.Abort(cmd.Context(), ws); err != nil {
return err
}
return renderResult(map[string]any{"workspace": ws.Name, "aborted": true}, func() {
fmt.Println(ui.Warning(fmt.Sprintf("Aborted conflict resolution for %s", ws.Name)))
})
},
}
rootCmd.AddCommand(command)
}

View File

@@ -1,42 +0,0 @@
package cmd
import (
"fmt"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/ui"
"github.com/spf13/cobra"
)
func init() {
var patchesRepo string
command := &cobra.Command{
Use: "add <name> <path>",
Aliases: []string{"register"},
Annotations: map[string]string{"group": "Workspace:"},
Short: "Register a Chromium checkout as a workspace",
Args: cobra.ExactArgs(2),
RunE: func(cmd *cobra.Command, args []string) error {
if err := ensureRepoConfigured(patchesRepo); err != nil {
return err
}
entry, err := appState.Registry.Add(args[0], args[1])
if err != nil {
return err
}
if err := appState.Save(); err != nil {
return err
}
return renderResult(map[string]any{
"workspace": entry,
"patches_repo": appState.Config.PatchesRepo,
}, func() {
fmt.Println(ui.Success("Registered workspace"))
fmt.Printf("%s %s\n", ui.Muted("name:"), entry.Name)
fmt.Printf("%s %s\n", ui.Muted("path:"), entry.Path)
fmt.Printf("%s %s\n", ui.Muted("repo:"), appState.Config.PatchesRepo)
})
},
}
command.Flags().StringVar(&patchesRepo, "patches-repo", "", "Path to packages/browseros")
rootCmd.AddCommand(command)
}

View File

@@ -1,65 +0,0 @@
package cmd
import (
"fmt"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/engine"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/ui"
"github.com/spf13/cobra"
)
func init() {
var src string
var reset bool
var changed string
var rangeEnd string
command := &cobra.Command{
Use: "apply [workspace] [-- files...]",
Annotations: map[string]string{"group": "Core:"},
Short: "Apply repo patches to a workspace",
Args: cobra.ArbitraryArgs,
RunE: func(cmd *cobra.Command, args []string) error {
positional, filters := splitWorkspaceAndFilters(cmd, args)
if len(positional) > 1 {
return fmt.Errorf("expected at most one workspace name")
}
ws, err := resolveWorkspace(positional, src)
if err != nil {
return err
}
info, err := repoInfo()
if err != nil {
return err
}
result, err := engine.Apply(cmd.Context(), engine.ApplyOptions{
Workspace: ws,
Repo: info,
Reset: reset,
ChangedRef: changed,
RangeEnd: rangeEnd,
Filters: filters,
})
if err != nil {
return err
}
return renderResult(result, func() {
fmt.Println(ui.Title(fmt.Sprintf("Applied patches to %s", ws.Name)))
fmt.Printf("%s %s\n", ui.Muted("mode:"), result.Mode)
fmt.Printf("%s %d\n", ui.Muted("applied:"), len(result.Applied))
fmt.Printf("%s %d\n", ui.Muted("orphaned:"), len(result.Orphaned))
if len(result.Conflicts) > 0 {
fmt.Println(ui.Warning("Conflicts detected"))
for _, conflict := range result.Conflicts {
fmt.Printf(" %s\n", conflict.ChromiumPath)
}
fmt.Println(ui.Hint(`Run "bdev continue" after fixing the current conflict.`))
}
})
},
}
command.Flags().StringVar(&src, "src", "", "Chromium checkout path to operate on directly")
command.Flags().BoolVar(&reset, "reset", false, "Reset patched files to BASE_COMMIT before applying")
command.Flags().StringVar(&changed, "changed", "", "Apply only patches changed in the given repo commit")
command.Flags().StringVar(&rangeEnd, "range-end", "", "End revision when using --changed as a range start")
rootCmd.AddCommand(command)
}

View File

@@ -0,0 +1,151 @@
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"bdev/internal/config"
"bdev/internal/engine"
"bdev/internal/git"
"bdev/internal/log"
"bdev/internal/ui"
"github.com/spf13/cobra"
)
var cloneCmd = &cobra.Command{
Use: "clone",
Short: "Fresh-apply all patches (for CI/new checkouts)",
Long: `Apply all patches from the patches repository onto the current
Chromium checkout. Used for CI builds and new checkout setup.
Unlike pull, clone does not compare existing state — it applies everything.`,
RunE: runClone,
}
var (
clonePatchesRepo string
cloneVerifyBase bool
cloneClean bool
cloneDryRun bool
cloneName string
)
func init() {
cloneCmd.Flags().StringVar(&clonePatchesRepo, "patches-repo", "", "path to BrowserOS packages directory")
cloneCmd.Flags().BoolVar(&cloneVerifyBase, "verify-base", false, "fail if HEAD != BASE_COMMIT")
cloneCmd.Flags().BoolVar(&cloneClean, "clean", false, "reset all modified files to BASE before applying")
cloneCmd.Flags().BoolVar(&cloneDryRun, "dry-run", false, "show what would be applied")
cloneCmd.Flags().StringVar(&cloneName, "name", "", "checkout name (default: directory name)")
rootCmd.AddCommand(cloneCmd)
}
func runClone(cmd *cobra.Command, args []string) error {
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("getting cwd: %w", err)
}
// Try loading existing context, or create one from flags
ctx, err := config.LoadContext()
if err != nil {
// No existing .bros/ — need --patches-repo
if clonePatchesRepo == "" {
return fmt.Errorf("no .bros/ found and --patches-repo not specified")
}
patchesRepo, err := filepath.Abs(clonePatchesRepo)
if err != nil {
return fmt.Errorf("resolving patches repo: %w", err)
}
baseCommit, err := config.ReadBaseCommit(patchesRepo)
if err != nil {
return err
}
name := cloneName
if name == "" {
name = filepath.Base(cwd)
}
brosDir := filepath.Join(cwd, config.BrosDirName)
cfg := &config.Config{
Name: name,
PatchesRepo: patchesRepo,
}
if !cloneDryRun {
if err := config.WriteConfig(brosDir, cfg); err != nil {
return err
}
_ = os.MkdirAll(filepath.Join(brosDir, "logs"), 0o755)
}
chromiumVersion, _ := config.ReadChromiumVersion(patchesRepo)
ctx = &config.Context{
Config: cfg,
State: &config.State{},
ChromiumDir: cwd,
BrosDir: brosDir,
PatchesRepo: patchesRepo,
PatchesDir: filepath.Join(patchesRepo, "chromium_patches"),
BaseCommit: baseCommit,
ChromiumVersion: chromiumVersion,
}
}
if cloneDryRun {
fmt.Println(ui.MutedStyle.Render("dry run — no files will be modified"))
fmt.Println()
}
opts := engine.CloneOpts{
VerifyBase: cloneVerifyBase,
Clean: cloneClean,
DryRun: cloneDryRun,
}
result, err := engine.Clone(ctx, opts)
if err != nil {
return err
}
// Reuse pull rendering
fmt.Println(ui.TitleStyle.Render("bdev clone"))
fmt.Println()
fmt.Printf(" %s %d patches applied\n",
ui.SuccessStyle.Render("+"), len(result.Applied))
if len(result.Conflicts) > 0 {
fmt.Printf(" %s %d conflicts\n",
ui.ErrorStyle.Render("x"), len(result.Conflicts))
}
if len(result.Deleted) > 0 {
fmt.Printf(" %s %d files deleted\n",
ui.DeletedPrefix, len(result.Deleted))
}
if len(result.Conflicts) > 0 {
fmt.Print(ui.RenderConflictReport(result.Conflicts))
}
if !cloneDryRun {
repoRev, _ := git.HeadRev(ctx.PatchesRepo)
ctx.State.LastPull = &config.SyncEvent{
PatchesRepoRev: repoRev,
BaseCommit: ctx.BaseCommit,
Timestamp: time.Now(),
FileCount: len(result.Applied) + len(result.Deleted),
}
_ = config.WriteState(ctx.BrosDir, ctx.State)
logger := log.New(ctx.BrosDir)
_ = logger.LogClone(ctx.BaseCommit, result)
}
if len(result.Conflicts) > 0 {
return fmt.Errorf("%d conflicts — see above for details", len(result.Conflicts))
}
return nil
}

View File

@@ -1,49 +0,0 @@
package cmd
import (
"fmt"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/repo"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/workspace"
"github.com/spf13/cobra"
)
func repoInfo() (*repo.Info, error) {
return appState.RepoInfo()
}
func resolveWorkspace(positional []string, src string) (workspace.Entry, error) {
name := ""
if len(positional) > 0 {
name = positional[0]
}
return appState.ResolveWorkspace(name, src)
}
func splitWorkspaceAndFilters(cmd *cobra.Command, args []string) ([]string, []string) {
atDash := cmd.ArgsLenAtDash()
if atDash == -1 {
return args, nil
}
return args[:atDash], args[atDash:]
}
func ensureRepoConfigured(override string) error {
if override == "" && appState.Config.PatchesRepo != "" {
return nil
}
root := override
if root == "" {
discovered, err := repo.Discover(appState.CWD)
if err != nil {
return fmt.Errorf(`unable to discover patches repo; pass --patches-repo or run from packages/browseros`)
}
root = discovered
}
info, err := repo.Load(root)
if err != nil {
return err
}
appState.Config.PatchesRepo = info.Root
return nil
}

View File

@@ -1,39 +0,0 @@
package cmd
import (
"fmt"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/engine"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/resolve"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/ui"
"github.com/spf13/cobra"
)
func init() {
command := &cobra.Command{
Use: "continue",
Annotations: map[string]string{"group": "Conflict:"},
Short: "Advance to the next conflict after fixing the current one",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
ws, err := resolve.FindActive(appState.Registry, appState.CWD)
if err != nil {
return err
}
result, err := engine.Continue(cmd.Context(), ws)
if err != nil {
return err
}
return renderResult(result, func() {
fmt.Println(ui.Success(fmt.Sprintf("Advanced conflict resolution for %s", ws.Name)))
if len(result.Conflicts) > 0 {
fmt.Println(ui.Warning("Next conflict"))
for _, conflict := range result.Conflicts {
fmt.Printf(" %s\n", conflict.ChromiumPath)
}
}
})
},
}
rootCmd.AddCommand(command)
}

View File

@@ -2,52 +2,113 @@ package cmd
import (
"fmt"
"strings"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/engine"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/ui"
"bdev/internal/config"
"bdev/internal/git"
"bdev/internal/patch"
"bdev/internal/ui"
"github.com/spf13/cobra"
)
func init() {
var src string
command := &cobra.Command{
Use: "diff [workspace]",
Annotations: map[string]string{"group": "Core:"},
Short: "Preview patch differences for a workspace",
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ws, err := resolveWorkspace(args, src)
if err != nil {
return err
}
info, err := repoInfo()
if err != nil {
return err
}
status, err := engine.InspectWorkspace(cmd.Context(), ws, info)
if err != nil {
return err
}
return renderResult(status, func() {
fmt.Println(ui.Title(fmt.Sprintf("%s patch diff", ws.Name)))
printGroup("Needs apply", status.NeedsApply)
printGroup("Needs update", status.NeedsUpdate)
printGroup("Orphaned", status.Orphaned)
})
},
}
command.Flags().StringVar(&src, "src", "", "Chromium checkout path to operate on directly")
rootCmd.AddCommand(command)
var diffCmd = &cobra.Command{
Use: "diff",
Short: "Preview what push or pull would do",
RunE: runDiff,
}
func printGroup(title string, items []string) {
if len(items) == 0 {
fmt.Printf("%s %s\n", ui.Muted(title+":"), ui.Muted("none"))
return
var diffDirection string
func init() {
diffCmd.Flags().StringVar(&diffDirection, "direction", "push", "\"push\" or \"pull\"")
rootCmd.AddCommand(diffCmd)
}
func runDiff(cmd *cobra.Command, args []string) error {
ctx, err := config.LoadContext()
if err != nil {
return err
}
fmt.Printf("%s\n", ui.Header(title+":"))
for _, item := range items {
fmt.Printf(" %s\n", strings.TrimSpace(item))
switch diffDirection {
case "push":
return diffPush(ctx)
case "pull":
return diffPull(ctx)
default:
return fmt.Errorf("invalid direction %q — use \"push\" or \"pull\"", diffDirection)
}
}
func diffPush(ctx *config.Context) error {
nameStatus, err := git.DiffNameStatus(ctx.ChromiumDir, ctx.BaseCommit)
if err != nil {
return err
}
if len(nameStatus) == 0 {
fmt.Println(ui.MutedStyle.Render("No local changes to push."))
return nil
}
fmt.Println(ui.TitleStyle.Render("bdev diff --direction push"))
fmt.Println()
for path, op := range nameStatus {
prefix := ui.ModifiedPrefix
switch op {
case patch.OpAdded:
prefix = ui.AddedPrefix
case patch.OpDeleted:
prefix = ui.DeletedPrefix
}
fmt.Printf(" %s %s\n", prefix, path)
}
fmt.Println()
fmt.Println(ui.MutedStyle.Render(fmt.Sprintf("%d files would be pushed", len(nameStatus))))
return nil
}
func diffPull(ctx *config.Context) error {
repoPatchSet, err := patch.ReadPatchSet(ctx.PatchesDir)
if err != nil {
return err
}
diffOutput, err := git.DiffFull(ctx.ChromiumDir, ctx.BaseCommit)
if err != nil {
return err
}
localPatchSet, err := patch.ParseUnifiedDiff(diffOutput)
if err != nil {
return err
}
delta := patch.Compare(localPatchSet, repoPatchSet)
total := len(delta.NeedsUpdate) + len(delta.NeedsApply)
if total == 0 && len(delta.Deleted) == 0 {
fmt.Println(ui.MutedStyle.Render("Already up to date."))
return nil
}
fmt.Println(ui.TitleStyle.Render("bdev diff --direction pull"))
fmt.Println()
for _, f := range delta.NeedsUpdate {
fmt.Printf(" %s %s %s\n", ui.ModifiedPrefix, f, ui.MutedStyle.Render("(update)"))
}
for _, f := range delta.NeedsApply {
fmt.Printf(" %s %s %s\n", ui.AddedPrefix, f, ui.MutedStyle.Render("(new)"))
}
for _, f := range delta.Deleted {
fmt.Printf(" %s %s %s\n", ui.DeletedPrefix, f, ui.MutedStyle.Render("(delete)"))
}
fmt.Println()
fmt.Println(ui.MutedStyle.Render(fmt.Sprintf("%d files would be changed", total+len(delta.Deleted))))
return nil
}

View File

@@ -1,73 +0,0 @@
package cmd
import (
"fmt"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/engine"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/ui"
"github.com/spf13/cobra"
)
func init() {
var src string
var commit string
var rangeMode bool
var squash bool
var base string
command := &cobra.Command{
Use: "extract [workspace] [--range <start> <end>] [-- files...]",
Annotations: map[string]string{"group": "Core:"},
Short: "Extract workspace changes back to chromium_patches",
Args: cobra.ArbitraryArgs,
RunE: func(cmd *cobra.Command, args []string) error {
positional, filters := splitWorkspaceAndFilters(cmd, args)
workspaceArgs := positional
rangeStart := ""
rangeEnd := ""
if rangeMode {
if len(positional) < 2 || len(positional) > 3 {
return fmt.Errorf(`range mode expects "bdev extract [workspace] --range <start> <end>"`)
}
rangeStart = positional[len(positional)-2]
rangeEnd = positional[len(positional)-1]
workspaceArgs = positional[:len(positional)-2]
}
if len(workspaceArgs) > 1 {
return fmt.Errorf("expected at most one workspace name")
}
ws, err := resolveWorkspace(workspaceArgs, src)
if err != nil {
return err
}
info, err := repoInfo()
if err != nil {
return err
}
result, err := engine.Extract(cmd.Context(), engine.ExtractOptions{
Workspace: ws,
Repo: info,
Commit: commit,
RangeStart: rangeStart,
RangeEnd: rangeEnd,
Squash: squash,
Base: base,
Filters: filters,
})
if err != nil {
return err
}
return renderResult(result, func() {
fmt.Println(ui.Title(fmt.Sprintf("Extracted patches from %s", ws.Name)))
fmt.Printf("%s %s\n", ui.Muted("mode:"), result.Mode)
fmt.Printf("%s %d\n", ui.Muted("written:"), len(result.Written))
fmt.Printf("%s %d\n", ui.Muted("deleted:"), len(result.Deleted))
})
},
}
command.Flags().StringVar(&src, "src", "", "Chromium checkout path to operate on directly")
command.Flags().StringVar(&commit, "commit", "", "Extract from a single commit")
command.Flags().BoolVar(&rangeMode, "range", false, "Extract from a commit range")
command.Flags().BoolVar(&squash, "squash", false, "Squash a range into a cumulative diff")
command.Flags().StringVar(&base, "base", "", "Override BASE_COMMIT for extraction")
rootCmd.AddCommand(command)
}

View File

@@ -0,0 +1,115 @@
package cmd
import (
"fmt"
"os"
"path/filepath"
"bdev/internal/config"
"bdev/internal/git"
"bdev/internal/ui"
"github.com/spf13/cobra"
)
var initCmd = &cobra.Command{
Use: "init",
Short: "Initialize a Chromium checkout for bdev",
Long: "Sets up a .bros/ directory in the current Chromium checkout,\nlinking it to a BrowserOS patches repository.",
RunE: runInit,
}
var (
initPatchesRepo string
initName string
)
func init() {
initCmd.Flags().StringVar(&initPatchesRepo, "patches-repo", "", "path to BrowserOS packages directory (required)")
initCmd.Flags().StringVar(&initName, "name", "", "human name for this checkout (default: directory name)")
_ = initCmd.MarkFlagRequired("patches-repo")
rootCmd.AddCommand(initCmd)
}
func runInit(cmd *cobra.Command, args []string) error {
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("getting cwd: %w", err)
}
if !config.LooksLikeChromium(cwd) {
return fmt.Errorf("current directory does not look like a Chromium checkout (missing chrome/, base/, or .git/)")
}
brosDir := filepath.Join(cwd, config.BrosDirName)
if _, err := os.Stat(filepath.Join(brosDir, "config.yaml")); err == nil {
return fmt.Errorf(".bros/config.yaml already exists — checkout already initialized")
}
patchesRepo, err := filepath.Abs(initPatchesRepo)
if err != nil {
return fmt.Errorf("resolving patches repo path: %w", err)
}
patchesDir := filepath.Join(patchesRepo, "chromium_patches")
if _, err := os.Stat(patchesDir); err != nil {
return fmt.Errorf("chromium_patches/ not found in %s", patchesRepo)
}
baseCommit, err := config.ReadBaseCommit(patchesRepo)
if err != nil {
return err
}
if !git.CommitExists(cwd, baseCommit) {
return fmt.Errorf("BASE_COMMIT %s not found in this checkout's git history", baseCommit)
}
name := initName
if name == "" {
name = filepath.Base(cwd)
}
cfg := &config.Config{
Name: name,
PatchesRepo: patchesRepo,
}
if err := config.WriteConfig(brosDir, cfg); err != nil {
return err
}
// Create logs directory
if err := os.MkdirAll(filepath.Join(brosDir, "logs"), 0o755); err != nil {
return fmt.Errorf("creating logs directory: %w", err)
}
chromiumVersion, _ := config.ReadChromiumVersion(patchesRepo)
// Count existing patches
patchCount := 0
_ = filepath.Walk(patchesDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
if !info.IsDir() {
patchCount++
}
return nil
})
fmt.Println(ui.TitleStyle.Render("bdev init"))
fmt.Println()
fmt.Printf(" %s %s\n", ui.LabelStyle.Render("Checkout:"), ui.ValueStyle.Render(name))
fmt.Printf(" %s %s\n", ui.LabelStyle.Render("Directory:"), cwd)
fmt.Printf(" %s %s\n", ui.LabelStyle.Render("Patches repo:"), patchesRepo)
fmt.Printf(" %s %s\n", ui.LabelStyle.Render("Base commit:"), baseCommit[:min(12, len(baseCommit))])
if chromiumVersion != "" {
fmt.Printf(" %s %s\n", ui.LabelStyle.Render("Chromium:"), chromiumVersion)
}
fmt.Printf(" %s %d files\n", ui.LabelStyle.Render("Patches:"), patchCount)
fmt.Println()
fmt.Println(ui.SuccessStyle.Render("Initialized .bros/config.yaml"))
fmt.Println(ui.MutedStyle.Render("Run 'bdev pull' to apply patches, or 'bdev push' to extract."))
return nil
}

View File

@@ -1,49 +0,0 @@
package cmd
import (
"fmt"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/engine"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/ui"
"github.com/spf13/cobra"
)
func init() {
command := &cobra.Command{
Use: "list",
Aliases: []string{"ls"},
Annotations: map[string]string{"group": "Workspace:"},
Short: "List registered workspaces and their sync state",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
if len(appState.Registry.Workspaces) == 0 {
return renderResult(map[string]any{"workspaces": []any{}}, func() {
fmt.Println("No workspaces registered. Run `bdev add <name> <path>`.")
})
}
info, err := repoInfo()
if err != nil {
return err
}
rows := make([][]string, 0, len(appState.Registry.Workspaces))
statuses := make([]*engine.WorkspaceStatus, 0, len(appState.Registry.Workspaces))
for _, ws := range appState.Registry.Workspaces {
status, err := engine.InspectWorkspace(cmd.Context(), ws, info)
if err != nil {
return err
}
statuses = append(statuses, status)
rows = append(rows, []string{
ws.Name,
status.SyncState,
fmt.Sprintf("%d/%d/%d", len(status.UpToDate), len(status.NeedsUpdate), len(status.Orphaned)),
ws.Path,
})
}
return renderResult(map[string]any{"workspaces": statuses}, func() {
fmt.Println(ui.RenderTable([]string{"NAME", "STATE", "PATCHES", "PATH"}, rows))
})
},
}
rootCmd.AddCommand(command)
}

View File

@@ -1,41 +0,0 @@
package cmd
import (
"fmt"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/engine"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/ui"
"github.com/spf13/cobra"
)
func init() {
var message string
command := &cobra.Command{
Use: "publish [remote]",
Annotations: map[string]string{"group": "Remote:"},
Short: "Commit and push chromium_patches to a remote",
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
info, err := repoInfo()
if err != nil {
return err
}
remote := "origin"
if len(args) == 1 {
remote = args[0]
}
result, err := engine.Publish(cmd.Context(), info, remote, message)
if err != nil {
return err
}
return renderResult(result, func() {
fmt.Println(ui.Success("Published chromium_patches"))
fmt.Printf("%s %s\n", ui.Muted("remote:"), result.Remote)
fmt.Printf("%s %s\n", ui.Muted("branch:"), result.Branch)
fmt.Printf("%s %s\n", ui.Muted("message:"), result.Message)
})
},
}
command.Flags().StringVarP(&message, "message", "m", "", "Commit message for the patch publish commit")
rootCmd.AddCommand(command)
}

View File

@@ -0,0 +1,142 @@
package cmd
import (
"fmt"
"time"
"bdev/internal/config"
"bdev/internal/engine"
"bdev/internal/git"
"bdev/internal/log"
"bdev/internal/ui"
"github.com/spf13/cobra"
)
var pullCmd = &cobra.Command{
Use: "pull [remote] [-- file1 file2 ...]",
Short: "Pull patches from repo to checkout",
Long: `Apply patches from the patches repository to the current Chromium
checkout. Use an optional remote (for example: 'bdev pull origin')
to fetch/rebase the patches repo before applying changes locally.`,
RunE: runPull,
}
var (
pullDryRun bool
pullRemote string
pullNoSync bool
pullRebase bool
pullKeepLocalOnly bool
)
func init() {
pullCmd.Flags().BoolVar(&pullDryRun, "dry-run", false, "show what would change")
pullCmd.Flags().StringVar(&pullRemote, "remote", "", "patches repo remote to sync before pull")
pullCmd.Flags().BoolVar(&pullNoSync, "no-sync", false, "skip syncing patches repo from remote")
pullCmd.Flags().BoolVar(&pullRebase, "rebase", true, "use git pull --rebase when syncing remote")
pullCmd.Flags().BoolVar(&pullKeepLocalOnly, "keep-local-only", true, "keep local-only checkout changes that are not in patches repo")
rootCmd.AddCommand(pullCmd)
}
func runPull(cmd *cobra.Command, args []string) error {
ctx, err := config.LoadContext()
if err != nil {
return err
}
activity := ui.NewActivity(verbose)
remote, files, err := resolveRemoteAndFiles(ctx.PatchesRepo, args, pullRemote)
if err != nil {
return err
}
shouldSync := remote != "" && !pullNoSync && !pullDryRun
if shouldSync {
dirty, err := git.IsDirty(ctx.PatchesRepo)
if err != nil {
return err
}
if dirty {
return fmt.Errorf("patches repo has local changes; commit/stash before syncing remote %q", remote)
}
activity.Step("syncing patches repo from remote %q", remote)
beforeRev, _ := git.HeadRev(ctx.PatchesRepo)
if err := git.Fetch(ctx.PatchesRepo, remote); err != nil {
return err
}
branch, detached, err := git.CurrentBranch(ctx.PatchesRepo)
if err != nil {
return err
}
if detached {
activity.Warn("patches repo is in detached HEAD; fetched remote but skipped pull/rebase")
} else {
if err := git.Pull(ctx.PatchesRepo, remote, branch, pullRebase); err != nil {
return err
}
}
afterRev, _ := git.HeadRev(ctx.PatchesRepo)
if beforeRev != "" && afterRev != "" && beforeRev != afterRev {
activity.Success("patches repo advanced %s -> %s", shortRev(beforeRev), shortRev(afterRev))
} else {
activity.Info("patches repo already up to date")
}
ctx, err = config.LoadContext()
if err != nil {
return err
}
} else if remote != "" && pullDryRun {
activity.Info("dry run enabled — skipping remote sync")
} else if remote != "" && pullNoSync {
activity.Info("remote %q provided, but sync is disabled via --no-sync", remote)
}
opts := engine.PullOpts{
DryRun: pullDryRun,
Files: files,
KeepLocalOnly: pullKeepLocalOnly,
}
if pullDryRun {
activity.Info("dry run enabled — no files will be modified")
activity.Divider()
}
activity.Step("computing patch delta and applying updates")
result, err := engine.Pull(ctx, opts)
if err != nil {
return err
}
fmt.Print(ui.RenderPullResult(result))
if len(result.Conflicts) > 0 {
fmt.Print(ui.RenderConflictReport(result.Conflicts))
}
if !pullDryRun {
repoRev, _ := git.HeadRev(ctx.PatchesRepo)
ctx.State.LastPull = &config.SyncEvent{
PatchesRepoRev: repoRev,
BaseCommit: ctx.BaseCommit,
Timestamp: time.Now(),
FileCount: len(result.Applied) + len(result.Deleted) + len(result.Reverted) + len(result.LocalOnly) + len(result.Skipped),
}
_ = config.WriteState(ctx.BrosDir, ctx.State)
logger := log.New(ctx.BrosDir)
_ = logger.LogPull(ctx.BaseCommit, repoRev, result)
}
if len(result.Conflicts) > 0 {
return fmt.Errorf("%d conflicts — see above for details", len(result.Conflicts))
}
return nil
}

View File

@@ -0,0 +1,238 @@
package cmd
import (
"fmt"
"time"
"bdev/internal/config"
"bdev/internal/engine"
"bdev/internal/git"
"bdev/internal/log"
"bdev/internal/patch"
"bdev/internal/ui"
"github.com/spf13/cobra"
)
var pushCmd = &cobra.Command{
Use: "push [remote] [-- file1 file2 ...]",
Short: "Push local changes to patches repo",
Long: `Extract diffs from the current Chromium checkout and write them
to the patches repository. When a remote is provided (for example:
'bdev push origin'), bdev commits patch changes and pushes upstream.`,
RunE: runPush,
}
var (
pushDryRun bool
pushRemote string
pushNoSync bool
pushRebase bool
pushMessage string
)
func init() {
pushCmd.Flags().BoolVar(&pushDryRun, "dry-run", false, "show what would be pushed")
pushCmd.Flags().StringVar(&pushRemote, "remote", "", "patches repo remote to publish to")
pushCmd.Flags().BoolVar(&pushNoSync, "no-sync", false, "skip syncing patches repo from remote before publish")
pushCmd.Flags().BoolVar(&pushRebase, "rebase", true, "use git pull --rebase when syncing before publish")
pushCmd.Flags().StringVarP(&pushMessage, "message", "m", "", "commit message when publishing to remote")
rootCmd.AddCommand(pushCmd)
}
func runPush(cmd *cobra.Command, args []string) error {
ctx, err := config.LoadContext()
if err != nil {
return err
}
activity := ui.NewActivity(verbose)
remote, files, err := resolveRemoteAndFiles(ctx.PatchesRepo, args, pushRemote)
if err != nil {
return err
}
shouldPublish := remote != "" && !pushDryRun
if shouldPublish {
dirty, err := git.IsDirty(ctx.PatchesRepo)
if err != nil {
return err
}
if dirty {
return fmt.Errorf("patches repo has local changes; commit/stash before publishing to remote %q", remote)
}
}
if shouldPublish && !pushNoSync {
if err := syncPatchesRepo(activity, ctx.PatchesRepo, remote, pushRebase); err != nil {
return err
}
}
if remote != "" && pushDryRun {
activity.Info("dry run enabled — skipping remote sync and publish")
}
opts := engine.PushOpts{
DryRun: pushDryRun,
Files: files,
}
if pushDryRun {
activity.Info("dry run enabled — no patch files will be written")
activity.Divider()
}
activity.Step("extracting checkout changes into patches")
result, err := engine.Push(ctx, opts)
if err != nil {
return err
}
renderPushResult(result, pushDryRun)
if !pushDryRun {
if remote != "" {
if err := publishPatchChanges(activity, ctx, remote, result, pushMessage); err != nil {
return err
}
}
// Update state
repoRev, _ := git.HeadRev(ctx.PatchesRepo)
ctx.State.LastPush = &config.SyncEvent{
PatchesRepoRev: repoRev,
Timestamp: time.Now(),
FileCount: result.Total() + len(result.Stale),
}
_ = config.WriteState(ctx.BrosDir, ctx.State)
// Activity log
logger := log.New(ctx.BrosDir)
_ = logger.LogPush(ctx.BaseCommit, result)
}
return nil
}
func syncPatchesRepo(activity *ui.Activity, patchesRepo, remote string, rebase bool) error {
activity.Step("syncing patches repo from remote %q", remote)
beforeRev, _ := git.HeadRev(patchesRepo)
if err := git.Fetch(patchesRepo, remote); err != nil {
return err
}
branch, detached, err := git.CurrentBranch(patchesRepo)
if err != nil {
return err
}
if detached {
return fmt.Errorf("patches repo is in detached HEAD; cannot sync for publish")
}
if err := git.Pull(patchesRepo, remote, branch, rebase); err != nil {
return err
}
afterRev, _ := git.HeadRev(patchesRepo)
if beforeRev != "" && afterRev != "" && beforeRev != afterRev {
activity.Success("patches repo advanced %s -> %s", shortRev(beforeRev), shortRev(afterRev))
} else {
activity.Info("patches repo already up to date")
}
return nil
}
func publishPatchChanges(
activity *ui.Activity,
ctx *config.Context,
remote string,
result *patch.PushResult,
commitMessage string,
) error {
dirty, err := git.IsDirty(ctx.PatchesRepo, "chromium_patches")
if err != nil {
return err
}
if !dirty {
activity.Info("no patch repository changes to commit")
return nil
}
branch, detached, err := git.CurrentBranch(ctx.PatchesRepo)
if err != nil {
return err
}
if detached {
return fmt.Errorf("patches repo is in detached HEAD; cannot publish")
}
message := commitMessage
if message == "" {
message = fmt.Sprintf(
"bdev push: %s (%d modified, %d added, %d deleted, %d stale)",
ctx.Config.Name,
len(result.Modified),
len(result.Added),
len(result.Deleted),
len(result.Stale),
)
}
activity.Step("committing patch changes to %s", branch)
if err := git.Add(ctx.PatchesRepo, "chromium_patches"); err != nil {
return err
}
if err := git.Commit(ctx.PatchesRepo, message); err != nil {
return err
}
activity.Success("created patch commit")
activity.Step("pushing patch commit to %s/%s", remote, branch)
if err := git.Push(ctx.PatchesRepo, remote, branch); err != nil {
return err
}
activity.Success("remote publish complete")
return nil
}
func renderPushResult(r *patch.PushResult, dryRun bool) {
if r.Total() == 0 && len(r.Stale) == 0 {
fmt.Println(ui.MutedStyle.Render("Nothing to push — checkout matches patches repo."))
return
}
verb := "Pushed"
if dryRun {
verb = "Would push"
}
fmt.Println(ui.TitleStyle.Render("bdev push"))
fmt.Println()
for _, f := range r.Added {
fmt.Printf(" %s %s\n", ui.AddedPrefix, f)
}
for _, f := range r.Modified {
fmt.Printf(" %s %s\n", ui.ModifiedPrefix, f)
}
for _, f := range r.Deleted {
fmt.Printf(" %s %s\n", ui.DeletedPrefix, f)
}
for _, f := range r.Stale {
fmt.Printf(" %s %s\n", ui.SkippedPrefix, ui.MutedStyle.Render(f+" (stale, removed)"))
}
fmt.Println()
summary := fmt.Sprintf("%s %d patches", verb, r.Total())
detail := fmt.Sprintf(" (%d modified, %d added, %d deleted)",
len(r.Modified), len(r.Added), len(r.Deleted))
fmt.Print(ui.SuccessStyle.Render(summary))
fmt.Println(ui.MutedStyle.Render(detail))
if len(r.Stale) > 0 {
fmt.Println(ui.MutedStyle.Render(fmt.Sprintf("Cleaned %d stale patches", len(r.Stale))))
}
}

View File

@@ -1,33 +0,0 @@
package cmd
import (
"fmt"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/ui"
"github.com/spf13/cobra"
)
func init() {
command := &cobra.Command{
Use: "remove <name>",
Aliases: []string{"rm"},
Annotations: map[string]string{"group": "Workspace:"},
Short: "Unregister a workspace",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
entry, err := appState.Registry.Remove(args[0])
if err != nil {
return err
}
if err := appState.Save(); err != nil {
return err
}
return renderResult(map[string]any{"workspace": entry}, func() {
fmt.Println(ui.Success("Removed workspace"))
fmt.Printf("%s %s\n", ui.Muted("name:"), entry.Name)
fmt.Printf("%s %s\n", ui.Muted("path:"), entry.Path)
})
},
}
rootCmd.AddCommand(command)
}

View File

@@ -1,129 +1,31 @@
package cmd
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/app"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/ui"
"github.com/spf13/cobra"
)
var Version = "dev"
var (
jsonOut bool
verbose bool
appState *app.App
verbose bool
version string
)
var groupOrder = []string{
"Workspace:",
"Core:",
"Conflict:",
"Remote:",
}
func helpHeader(s string) string { return ui.Header(s) }
func helpCmdCol(s string) string { return ui.Command(s) }
func helpHint(s string) string { return ui.Hint(s) }
func helpAliases(aliases []string) string {
return ui.Aliases(aliases)
}
func groupedHelp(cmd *cobra.Command) string {
groups := map[string][]*cobra.Command{}
for _, child := range cmd.Commands() {
if !child.IsAvailableCommand() && child.Name() != "help" {
continue
}
group := child.Annotations["group"]
if group == "" {
group = "Core:"
}
groups[group] = append(groups[group], child)
}
var builder strings.Builder
for _, group := range groupOrder {
commands, ok := groups[group]
if !ok {
continue
}
builder.WriteString("\n" + helpHeader(group) + "\n")
for _, child := range commands {
line := " " + helpCmdCol(fmt.Sprintf("%-14s", child.Name())) + " " + child.Short
if len(child.Aliases) > 0 {
line += " " + helpAliases(child.Aliases)
}
builder.WriteString(line + "\n")
}
}
return builder.String()
}
const usageTemplate = `{{helpHeader "Usage:"}}{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
{{helpHeader "Aliases:"}}
{{.NameAndAliases}}{{end}}{{if .HasExample}}
{{helpHeader "Examples:"}}
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
{{groupedHelp .}}{{end}}{{if .HasAvailableLocalFlags}}
{{helpHeader "Flags:"}}
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
{{helpHeader "Global Flags:"}}
{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableSubCommands}}
{{helpHint (printf "Use \"%s [command] --help\" for more information." .CommandPath)}}{{end}}
`
var rootCmd = &cobra.Command{
Use: "bdev",
Short: "Workspace-centric BrowserOS patch tooling for Chromium checkouts",
Version: Version,
Short: "BrowserOS CLI — patch management, builds, and releases",
Long: "bdev manages BrowserOS patches across Chromium checkouts.\nUse push/pull to sync patches, clone for fresh applies.",
SilenceUsage: true,
SilenceErrors: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
var err error
appState, err = app.Load(jsonOut, verbose, "")
return err
},
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
func init() {
cobra.AddTemplateFunc("helpHeader", helpHeader)
cobra.AddTemplateFunc("helpCmdCol", helpCmdCol)
cobra.AddTemplateFunc("helpAliases", helpAliases)
cobra.AddTemplateFunc("helpHint", helpHint)
cobra.AddTemplateFunc("groupedHelp", groupedHelp)
rootCmd.SetUsageTemplate(usageTemplate)
rootCmd.PersistentFlags().BoolVar(&jsonOut, "json", false, "Emit JSON output")
rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Enable verbose output")
rootCmd.CompletionOptions.DisableDefaultCmd = true
rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "increase output detail")
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
func SetVersion(v string) {
version = v
rootCmd.Version = v
}
func renderResult(data any, human func()) error {
if jsonOut {
encoder := json.NewEncoder(os.Stdout)
encoder.SetIndent("", " ")
return encoder.Encode(data)
}
human()
return nil
func Execute() error {
return rootCmd.Execute()
}

View File

@@ -1,33 +0,0 @@
package cmd
import (
"fmt"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/engine"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/resolve"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/ui"
"github.com/spf13/cobra"
)
func init() {
command := &cobra.Command{
Use: "skip",
Annotations: map[string]string{"group": "Conflict:"},
Short: "Skip the current conflict and move to the next one",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
ws, err := resolve.FindActive(appState.Registry, appState.CWD)
if err != nil {
return err
}
result, err := engine.Skip(cmd.Context(), ws)
if err != nil {
return err
}
return renderResult(result, func() {
fmt.Println(ui.Warning(fmt.Sprintf("Skipped current conflict in %s", ws.Name)))
})
},
}
rootCmd.AddCommand(command)
}

View File

@@ -1,45 +1,96 @@
package cmd
import (
"encoding/json"
"fmt"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/engine"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/ui"
"bdev/internal/config"
"bdev/internal/engine"
"bdev/internal/ui"
"github.com/spf13/cobra"
)
func init() {
var src string
command := &cobra.Command{
Use: "status [workspace]",
Annotations: map[string]string{"group": "Core:"},
Short: "Show workspace sync state",
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ws, err := resolveWorkspace(args, src)
if err != nil {
return err
}
info, err := repoInfo()
if err != nil {
return err
}
status, err := engine.InspectWorkspace(cmd.Context(), ws, info)
if err != nil {
return err
}
return renderResult(status, func() {
fmt.Println(ui.Title(fmt.Sprintf("%s (%s)", ws.Name, status.SyncState)))
fmt.Printf("%s %s\n", ui.Muted("path:"), ws.Path)
fmt.Printf("%s %s\n", ui.Muted("repo head:"), status.RepoHead)
fmt.Printf("%s %s\n", ui.Muted("last sync:"), status.LastSyncRev)
fmt.Printf("%s %s\n", ui.Muted("last apply:"), status.LastApplyRev)
fmt.Printf("%s %d\n", ui.Muted("needs apply:"), len(status.NeedsApply))
fmt.Printf("%s %d\n", ui.Muted("needs update:"), len(status.NeedsUpdate))
fmt.Printf("%s %d\n", ui.Muted("orphaned:"), len(status.Orphaned))
})
},
}
command.Flags().StringVar(&src, "src", "", "Chromium checkout path to operate on directly")
rootCmd.AddCommand(command)
var statusCmd = &cobra.Command{
Use: "status",
Short: "Show sync state between checkout and patches repo",
RunE: runStatus,
}
var (
statusJSON bool
statusFiles bool
)
func init() {
statusCmd.Flags().BoolVar(&statusJSON, "json", false, "output as JSON")
statusCmd.Flags().BoolVar(&statusFiles, "files", false, "list individual files per category")
rootCmd.AddCommand(statusCmd)
}
func runStatus(cmd *cobra.Command, args []string) error {
ctx, err := config.LoadContext()
if err != nil {
return err
}
result, err := engine.Status(ctx, statusFiles)
if err != nil {
return err
}
if statusJSON {
data, err := json.MarshalIndent(result, "", " ")
if err != nil {
return err
}
fmt.Println(string(data))
return nil
}
renderStatus(result)
return nil
}
func renderStatus(r *engine.StatusResult) {
fmt.Println(ui.TitleStyle.Render("bdev status"))
fmt.Println()
fmt.Printf(" %s %s\n", ui.LabelStyle.Render("Checkout:"), ui.ValueStyle.Render(r.CheckoutName))
fmt.Printf(" %s %s\n", ui.LabelStyle.Render("Base commit:"), r.BaseCommit[:min(12, len(r.BaseCommit))])
if r.ChromiumVersion != "" {
fmt.Printf(" %s %s\n", ui.LabelStyle.Render("Chromium:"), r.ChromiumVersion)
}
fmt.Printf(" %s %s\n", ui.LabelStyle.Render("Patches repo:"), r.PatchesRepo)
fmt.Println()
fmt.Println(" Sync status:")
if r.Ahead > 0 {
fmt.Printf(" %s %s\n",
ui.WarningStyle.Render(fmt.Sprintf("ahead: %3d files", r.Ahead)),
ui.MutedStyle.Render("(local changes not in patches repo)"))
}
if r.Behind > 0 {
fmt.Printf(" %s %s\n",
ui.WarningStyle.Render(fmt.Sprintf("behind: %3d files", r.Behind)),
ui.MutedStyle.Render("(patches in repo not applied locally)"))
}
fmt.Printf(" %s\n",
ui.SuccessStyle.Render(fmt.Sprintf("synced: %3d files", r.Synced)))
if len(r.AheadFiles) > 0 {
fmt.Println()
fmt.Println(" Ahead files:")
for _, f := range r.AheadFiles {
fmt.Printf(" %s %s\n", ui.AddedPrefix, f)
}
}
if len(r.BehindFiles) > 0 {
fmt.Println()
fmt.Println(" Behind files:")
for _, f := range r.BehindFiles {
fmt.Printf(" %s %s\n", ui.WarningStyle.Render(">"), f)
}
}
}

View File

@@ -1,58 +0,0 @@
package cmd
import (
"fmt"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/engine"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/ui"
"github.com/spf13/cobra"
)
func init() {
var src string
var rebase bool
var remote string
command := &cobra.Command{
Use: "sync [workspace]",
Annotations: map[string]string{"group": "Core:"},
Short: "Sync a workspace with the latest patch repo state",
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ws, err := resolveWorkspace(args, src)
if err != nil {
return err
}
info, err := repoInfo()
if err != nil {
return err
}
result, err := engine.Sync(cmd.Context(), engine.SyncOptions{
Workspace: ws,
Repo: info,
Remote: remote,
Rebase: rebase,
})
if err != nil {
return err
}
return renderResult(result, func() {
fmt.Println(ui.Title(fmt.Sprintf("Synced %s", ws.Name)))
fmt.Printf("%s %s\n", ui.Muted("repo head:"), result.RepoHead)
fmt.Printf("%s %d\n", ui.Muted("applied:"), len(result.Applied))
if result.StashRef != "" {
fmt.Printf("%s %s\n", ui.Muted("stash:"), result.StashRef)
}
if len(result.Conflicts) > 0 {
fmt.Println(ui.Warning("Conflicts detected"))
for _, conflict := range result.Conflicts {
fmt.Printf(" %s\n", conflict)
}
}
})
},
}
command.Flags().StringVar(&src, "src", "", "Chromium checkout path to operate on directly")
command.Flags().BoolVar(&rebase, "rebase", false, "Re-apply stashed local changes after syncing")
command.Flags().StringVar(&remote, "remote", "origin", "Remote to pull from")
rootCmd.AddCommand(command)
}

View File

@@ -0,0 +1,44 @@
package cmd
import (
"fmt"
"strings"
"bdev/internal/git"
)
func resolveRemoteAndFiles(repoDir string, args []string, explicitRemote string) (string, []string, error) {
remote := strings.TrimSpace(explicitRemote)
if remote != "" {
hasRemote, err := git.HasRemote(repoDir, remote)
if err != nil {
return "", nil, fmt.Errorf("resolving remote %q: %w", remote, err)
}
if !hasRemote {
return "", nil, fmt.Errorf("remote %q not found in patches repo", remote)
}
return remote, args, nil
}
if len(args) == 0 {
return "", nil, nil
}
hasRemote, err := git.HasRemote(repoDir, args[0])
if err != nil {
return "", nil, fmt.Errorf("resolving remote %q: %w", args[0], err)
}
if hasRemote {
return args[0], args[1:], nil
}
return "", args, nil
}
func shortRev(rev string) string {
rev = strings.TrimSpace(rev)
if len(rev) <= 12 {
return rev
}
return rev[:12]
}

View File

@@ -0,0 +1,59 @@
package cmd
import (
"os"
"os/exec"
"path/filepath"
"reflect"
"testing"
)
func TestResolveRemoteAndFiles(t *testing.T) {
t.Parallel()
repo := initRemoteRepo(t)
remote, files, err := resolveRemoteAndFiles(repo, []string{"origin", "content/foo.cc"}, "")
if err != nil {
t.Fatalf("resolveRemoteAndFiles: %v", err)
}
if remote != "origin" {
t.Fatalf("expected origin, got %q", remote)
}
if !reflect.DeepEqual(files, []string{"content/foo.cc"}) {
t.Fatalf("unexpected files: %#v", files)
}
}
func TestResolveRemoteAndFilesUnknownExplicitRemote(t *testing.T) {
t.Parallel()
repo := initRemoteRepo(t)
if _, _, err := resolveRemoteAndFiles(repo, nil, "missing"); err == nil {
t.Fatalf("expected error for unknown explicit remote")
}
}
func initRemoteRepo(t *testing.T) string {
t.Helper()
dir := filepath.Join(t.TempDir(), "patches")
if err := os.MkdirAll(dir, 0o755); err != nil {
t.Fatalf("mkdir: %v", err)
}
runGitCmd(t, dir, "init")
runGitCmd(t, dir, "remote", "add", "origin", "https://example.com/org/repo.git")
return dir
}
func runGitCmd(t *testing.T, dir string, args ...string) string {
t.Helper()
cmd := exec.Command("git", args...)
cmd.Dir = dir
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("git %v failed: %v\n%s", args, err, string(out))
}
return string(out)
}

View File

@@ -0,0 +1,40 @@
package e2e
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
)
var bdevBinary string
func TestMain(m *testing.M) {
tmpDir, err := os.MkdirTemp("", "bdev-e2e-bin-*")
if err != nil {
fmt.Fprintf(os.Stderr, "failed to create temp dir: %v\n", err)
os.Exit(1)
}
_, file, _, ok := runtime.Caller(0)
if !ok {
fmt.Fprintln(os.Stderr, "failed to resolve e2e test path")
os.Exit(1)
}
moduleDir := filepath.Clean(filepath.Join(filepath.Dir(file), ".."))
bdevBinary = filepath.Join(tmpDir, "bdev-e2e")
build := exec.Command("go", "build", "-o", bdevBinary, ".")
build.Dir = moduleDir
if out, err := build.CombinedOutput(); err != nil {
fmt.Fprintf(os.Stderr, "failed to build bdev binary: %v\n%s\n", err, string(out))
os.Exit(1)
}
code := m.Run()
_ = os.RemoveAll(tmpDir)
os.Exit(code)
}

View File

@@ -0,0 +1,244 @@
package e2e
import (
"encoding/json"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
)
type scenario struct {
root string
baseCommit string
patchesRemote string
patchesRepo string
chromiumA string
chromiumB string
trackedPath string
newPath string
}
type statusJSON struct {
Ahead int
Behind int
Synced int
}
func TestBdevOperationsE2E(t *testing.T) {
env := setupScenario(t)
runBdev(t, env.chromiumA, "init", "--patches-repo", env.patchesRepo, "--name", "checkout-a")
statusBefore := readStatus(t, env.chromiumA)
if statusBefore.Behind == 0 {
t.Fatalf("expected checkout-a to be behind before pull, got %#v", statusBefore)
}
pullPreview := runBdev(t, env.chromiumA, "diff", "--direction", "pull")
assertContains(t, pullPreview, env.trackedPath)
runBdev(t, env.chromiumA, "pull", "--no-sync")
assertFileContains(t, filepath.Join(env.chromiumA, env.trackedPath), "patch-v1")
statusAfterPull := readStatus(t, env.chromiumA)
if statusAfterPull.Behind != 0 || statusAfterPull.Synced == 0 {
t.Fatalf("unexpected status after pull: %#v", statusAfterPull)
}
writeFile(t, filepath.Join(env.chromiumA, "base", ".keep"), "my-local-work\n")
pullAgain := runBdev(t, env.chromiumA, "pull", "--no-sync", "base/.keep")
assertContains(t, pullAgain, "local-only, kept")
assertFileContains(t, filepath.Join(env.chromiumA, "base", ".keep"), "my-local-work")
runGit(t, env.chromiumA, "checkout", env.baseCommit, "--", "base/.keep")
writeFile(t, filepath.Join(env.chromiumA, env.trackedPath), "patch-v2\n")
writeFile(t, filepath.Join(env.chromiumA, env.newPath), "brand-new\n")
pushPreview := runBdev(t, env.chromiumA, "diff", "--direction", "push")
assertContains(t, pushPreview, env.trackedPath)
runBdev(t, env.chromiumA, "push", "--no-sync", env.trackedPath, env.newPath)
assertFileContains(t, filepath.Join(env.patchesRepo, "chromium_patches", env.newPath), "diff --git")
// Keep the patches repo clean before remote-aware publish flow.
commitRepo(t, env.patchesRepo, "chore: e2e checkpoint after push --no-sync")
writeFile(t, filepath.Join(env.chromiumA, env.trackedPath), "patch-v3\n")
publish := runBdev(t, env.chromiumA, "push", "origin", "-m", "e2e: publish patch-v3", env.trackedPath)
assertContains(t, publish, "remote publish complete")
mirror := filepath.Join(env.root, "mirror")
runGit(t, env.root, "clone", env.patchesRemote, mirror)
assertFileContains(t, filepath.Join(mirror, "chromium_patches", env.trackedPath), "patch-v3")
collab := filepath.Join(env.root, "collab")
runGit(t, env.root, "clone", env.patchesRemote, collab)
configRepo(t, collab)
diffV4 := buildDiffFromBase(t, env.chromiumA, env.baseCommit, env.trackedPath, "patch-v4\n")
writeFile(t, filepath.Join(collab, "chromium_patches", env.trackedPath), diffV4)
commitRepo(t, collab, "feat: remote patch-v4 update")
branch := strings.TrimSpace(runGit(t, collab, "symbolic-ref", "--short", "HEAD"))
runGit(t, collab, "push", "origin", "HEAD:"+branch)
runBdev(t, env.chromiumA, "pull", "origin")
assertFileContains(t, filepath.Join(env.chromiumA, env.trackedPath), "patch-v4")
runBdev(t, env.chromiumB, "clone", "--patches-repo", env.patchesRepo, "--verify-base", "--clean", "--name", "checkout-b")
assertFileContains(t, filepath.Join(env.chromiumB, env.trackedPath), "patch-v4")
statusB := readStatus(t, env.chromiumB)
if statusB.Ahead != 0 || statusB.Synced == 0 {
t.Fatalf("expected checkout-b to have clean/synced clone state, got %#v", statusB)
}
}
func setupScenario(t *testing.T) *scenario {
t.Helper()
root := t.TempDir()
patchesRemote := filepath.Join(root, "patches-remote.git")
chromiumA := filepath.Join(root, "chromium-a")
chromiumB := filepath.Join(root, "chromium-b")
patchesRepo := filepath.Join(root, "patches")
trackedPath := filepath.ToSlash(filepath.Join("chrome", "app", "test.txt"))
newPath := filepath.ToSlash(filepath.Join("chrome", "browser", "new_file.txt"))
runGit(t, root, "init", "--bare", patchesRemote)
setupChromiumRepo(t, chromiumA)
writeFile(t, filepath.Join(chromiumA, trackedPath), "base\n")
runGit(t, chromiumA, "add", "-A")
runGit(t, chromiumA, "commit", "-m", "base")
baseCommit := strings.TrimSpace(runGit(t, chromiumA, "rev-parse", "HEAD"))
diffV1 := buildDiffFromBase(t, chromiumA, baseCommit, trackedPath, "patch-v1\n")
runGit(t, root, "clone", patchesRemote, patchesRepo)
configRepo(t, patchesRepo)
writeFile(t, filepath.Join(patchesRepo, "BASE_COMMIT"), baseCommit+"\n")
writeFile(t, filepath.Join(patchesRepo, "CHROMIUM_VERSION"), "MAJOR=145\nMINOR=0\nBUILD=7632\nPATCH=45\n")
writeFile(t, filepath.Join(patchesRepo, "chromium_patches", trackedPath), diffV1)
commitRepo(t, patchesRepo, "seed patches")
branch := strings.TrimSpace(runGit(t, patchesRepo, "symbolic-ref", "--short", "HEAD"))
runGit(t, patchesRepo, "push", "-u", "origin", "HEAD:"+branch)
runGit(t, root, "clone", chromiumA, chromiumB)
configRepo(t, chromiumB)
return &scenario{
root: root,
baseCommit: baseCommit,
patchesRemote: patchesRemote,
patchesRepo: patchesRepo,
chromiumA: chromiumA,
chromiumB: chromiumB,
trackedPath: trackedPath,
newPath: newPath,
}
}
func setupChromiumRepo(t *testing.T, dir string) {
t.Helper()
if err := os.MkdirAll(filepath.Join(dir, "chrome"), 0o755); err != nil {
t.Fatalf("mkdir chrome: %v", err)
}
if err := os.MkdirAll(filepath.Join(dir, "base"), 0o755); err != nil {
t.Fatalf("mkdir base: %v", err)
}
writeFile(t, filepath.Join(dir, "base", ".keep"), "marker\n")
runGit(t, dir, "init")
configRepo(t, dir)
}
func buildDiffFromBase(t *testing.T, repo, base, relPath, content string) string {
t.Helper()
abs := filepath.Join(repo, relPath)
original := mustRead(t, abs)
writeFile(t, abs, content)
diff := runGit(t, repo, "diff", "--full-index", base, "--", relPath)
writeFile(t, abs, original)
if strings.TrimSpace(diff) == "" {
t.Fatalf("expected non-empty diff for %s", relPath)
}
return diff
}
func readStatus(t *testing.T, chromiumDir string) statusJSON {
t.Helper()
raw := runBdev(t, chromiumDir, "status", "--json")
var s statusJSON
if err := json.Unmarshal([]byte(raw), &s); err != nil {
t.Fatalf("failed to parse status json: %v\nraw=%s", err, raw)
}
return s
}
func runBdev(t *testing.T, dir string, args ...string) string {
t.Helper()
cmd := exec.Command(bdevBinary, args...)
cmd.Dir = dir
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("bdev %v failed: %v\n%s", args, err, string(out))
}
return string(out)
}
func runGit(t *testing.T, dir string, args ...string) string {
t.Helper()
cmd := exec.Command("git", args...)
cmd.Dir = dir
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("git %v failed: %v\n%s", args, err, string(out))
}
return string(out)
}
func commitRepo(t *testing.T, dir, message string) {
t.Helper()
runGit(t, dir, "add", "-A")
runGit(t, dir, "commit", "-m", message)
}
func configRepo(t *testing.T, dir string) {
t.Helper()
runGit(t, dir, "config", "user.email", "bdev-e2e@example.com")
runGit(t, dir, "config", "user.name", "bdev e2e")
}
func writeFile(t *testing.T, path, content string) {
t.Helper()
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
t.Fatalf("mkdir %s: %v", path, err)
}
if err := os.WriteFile(path, []byte(content), 0o644); err != nil {
t.Fatalf("write %s: %v", path, err)
}
}
func mustRead(t *testing.T, path string) string {
t.Helper()
data, err := os.ReadFile(path)
if err != nil {
t.Fatalf("read %s: %v", path, err)
}
return string(data)
}
func assertContains(t *testing.T, output, want string) {
t.Helper()
if !strings.Contains(output, want) {
t.Fatalf("expected output to contain %q\noutput:\n%s", want, output)
}
}
func assertFileContains(t *testing.T, path, want string) {
t.Helper()
content := mustRead(t, path)
if !strings.Contains(content, want) {
t.Fatalf("expected %s to contain %q\ncontent:\n%s", path, want, content)
}
}

View File

@@ -1,10 +1,11 @@
module github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev
module bdev
go 1.25.0
go 1.25.7
require (
github.com/charmbracelet/lipgloss v1.1.0
github.com/spf13/cobra v1.10.2
golang.org/x/sync v0.19.0
gopkg.in/yaml.v3 v3.0.1
)

View File

@@ -1,7 +1,5 @@
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8=
github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA=
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
@@ -10,8 +8,6 @@ github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2ll
github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q=
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8=
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a h1:G99klV19u0QnhiizODirwVksQB91TJKV/UaTnACcG30=
github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
@@ -38,6 +34,8 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJu
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E=
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=

View File

@@ -1,67 +0,0 @@
package app
import (
"fmt"
"os"
"path/filepath"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/repo"
"github.com/browseros-ai/BrowserOS/packages/browseros/tools/bdev/internal/workspace"
)
type App struct {
JSON bool
Verbose bool
CWD string
Config *workspace.Config
Registry *workspace.Registry
}
func Load(jsonOut bool, verbose bool, cwd string) (*App, error) {
if cwd == "" {
var err error
cwd, err = os.Getwd()
if err != nil {
return nil, err
}
}
cfg, err := workspace.LoadConfig()
if err != nil {
return nil, err
}
reg, err := workspace.LoadRegistry()
if err != nil {
return nil, err
}
return &App{
JSON: jsonOut,
Verbose: verbose,
CWD: filepath.Clean(cwd),
Config: cfg,
Registry: reg,
}, nil
}
func (a *App) Save() error {
if err := workspace.SaveConfig(a.Config); err != nil {
return err
}
return workspace.SaveRegistry(a.Registry)
}
func (a *App) ResolveWorkspace(name string, src string) (workspace.Entry, error) {
return workspace.Resolve(a.Registry, name, a.CWD, src)
}
func (a *App) RepoInfo() (*repo.Info, error) {
if a.Config.PatchesRepo == "" {
discovered, err := repo.Discover(a.CWD)
if err != nil {
return nil, fmt.Errorf(
`patches repo is not configured; run "bdev add <name> <path> --patches-repo <repo>" from the browseros repo once`,
)
}
return repo.Load(discovered)
}
return repo.Load(a.Config.PatchesRepo)
}

View File

@@ -0,0 +1,57 @@
package config
import (
"fmt"
"os"
"path/filepath"
"strings"
)
func ReadBaseCommit(patchesRepo string) (string, error) {
path := filepath.Join(patchesRepo, "BASE_COMMIT")
data, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return "", fmt.Errorf("BASE_COMMIT not found in %s — create it with the Chromium commit hash", patchesRepo)
}
return "", fmt.Errorf("reading BASE_COMMIT: %w", err)
}
commit := strings.TrimSpace(string(data))
if commit == "" {
return "", fmt.Errorf("BASE_COMMIT is empty in %s", patchesRepo)
}
return commit, nil
}
func ReadChromiumVersion(patchesRepo string) (string, error) {
path := filepath.Join(patchesRepo, "CHROMIUM_VERSION")
data, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return "", nil
}
return "", fmt.Errorf("reading CHROMIUM_VERSION: %w", err)
}
vars := make(map[string]string)
for _, line := range strings.Split(string(data), "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
parts := strings.SplitN(line, "=", 2)
if len(parts) == 2 {
vars[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
}
}
major := vars["MAJOR"]
minor := vars["MINOR"]
build := vars["BUILD"]
patch := vars["PATCH"]
if major == "" {
return "", nil
}
return fmt.Sprintf("%s.%s.%s.%s", major, minor, build, patch), nil
}

View File

@@ -0,0 +1,41 @@
package config
import (
"fmt"
"os"
"path/filepath"
"gopkg.in/yaml.v3"
)
type Config struct {
Name string `yaml:"name"`
PatchesRepo string `yaml:"patches_repo"`
}
func ReadConfig(brosDir string) (*Config, error) {
data, err := os.ReadFile(filepath.Join(brosDir, "config.yaml"))
if err != nil {
return nil, fmt.Errorf("reading config: %w", err)
}
var cfg Config
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, fmt.Errorf("parsing config.yaml: %w", err)
}
return &cfg, nil
}
func WriteConfig(brosDir string, cfg *Config) error {
if err := os.MkdirAll(brosDir, 0o755); err != nil {
return fmt.Errorf("creating .bros directory: %w", err)
}
data, err := yaml.Marshal(cfg)
if err != nil {
return fmt.Errorf("marshaling config: %w", err)
}
path := filepath.Join(brosDir, "config.yaml")
if err := os.WriteFile(path, data, 0o644); err != nil {
return fmt.Errorf("writing config.yaml: %w", err)
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More