From c5c41007a6d672e43dca07b04bc11311067a1410 Mon Sep 17 00:00:00 2001 From: Felarof Date: Tue, 13 Jan 2026 15:32:46 -0800 Subject: [PATCH] docs: update byollm page --- docs/docs.json | 26 +- docs/features/bring-your-own-llm.mdx | 242 ++++++++++++++++++ .../images/byollm--claude-provider-config.png | 3 + .../images/byollm--gemini-provider-config.png | 3 + docs/images/byollm--gemini-settings-card.png | 3 + docs/images/byollm--lmstudio-config.png | 3 + docs/images/byollm--ollama-config.png | 3 + .../images/byollm--openai-provider-config.png | 3 + .../byollm--openrouter-provider-config.png | 3 + docs/images/byollm--switcher.png | 3 + 10 files changed, 268 insertions(+), 24 deletions(-) create mode 100644 docs/features/bring-your-own-llm.mdx create mode 100644 docs/images/byollm--claude-provider-config.png create mode 100644 docs/images/byollm--gemini-provider-config.png create mode 100644 docs/images/byollm--gemini-settings-card.png create mode 100644 docs/images/byollm--lmstudio-config.png create mode 100644 docs/images/byollm--ollama-config.png create mode 100644 docs/images/byollm--openai-provider-config.png create mode 100644 docs/images/byollm--openrouter-provider-config.png create mode 100644 docs/images/byollm--switcher.png diff --git a/docs/docs.json b/docs/docs.json index 0043b5b27..649c2919f 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -18,35 +18,13 @@ "update" ] }, - { - "group": "Bring Your Own LLM", - "pages": [ - "llm-setup-guide", - { - "group": "Cloud Providers", - "pages": [ - "bring-your-own-keys/gemini", - "bring-your-own-keys/claude", - "bring-your-own-keys/openai", - "bring-your-own-keys/openrouter" - ] - }, - { - "group": "Local LLMs", - "pages": [ - "local-LLMs/ollama", - "local-LLMs/lm-studio", - "local-LLMs/gpt-oss" - ] - } - ] - }, { "group": "Core Features", "pages": [ "features/llm-chat-hub", + "features/bring-your-own-llm", "features/ad-blocking", - "browseros-mcp/how-to-guide", + "features/use-with-claude-code", "integrations/n8n" ] }, diff --git a/docs/features/bring-your-own-llm.mdx b/docs/features/bring-your-own-llm.mdx new file mode 100644 index 000000000..8b57b4a87 --- /dev/null +++ b/docs/features/bring-your-own-llm.mdx @@ -0,0 +1,242 @@ +--- +title: "Bring Your Own LLM" +description: "Connect your own AI models to BrowserOS" +--- + +BrowserOS includes a default AI model you can use right away, but it has strict rate limits. For the best experience, bring your own API keys or run models locally. + +**Why bring your own?** +- Your API keys stay on your machine — requests go directly to the provider +- No rate limits from BrowserOS — use it as much as you want +- Run locally with Ollama for complete privacy + +## Which Model Should I Use? + +This is the most important thing to understand: + +| Mode | What works | Recommendation | +|------|------------|----------------| +| **Chat Mode** | Any model, including local | Ollama or Gemini Flash — fast and cheap | +| **Agent Mode** | Cloud models only | Claude Opus 4.5 for best results | + + +Local LLMs are great for Chat Mode — asking questions about a page, summarizing, etc. But they're not powerful enough for Agent Mode yet. Agent tasks need strong reasoning to click the right elements and handle multi-step workflows. + + +**For Agent Mode, we recommend:** +- **Claude Opus 4.5** — Best quality, slower +- **Claude Sonnet 4.5** — Great quality, faster +- **Claude Haiku 4.5** or **Gemini 3 Flash** — Good and fast + +## Cloud Providers + + + + Gemini 3 Flash is fast and free. Google gives you 20 requests per minute at no cost. + + ### Get your API key + + + + Visit [aistudio.google.com](https://aistudio.google.com) and click **Get API key** in the sidebar. + + ![Google AI Studio](/images/gemini-get-api-key.png) + + + Click **Create API key**, name your project, and click **Create key**. + + ![Create API key](/images/gemini-create-key.png) + + + Click on your key to copy it. + + ![Copy key](/images/gemini-copy-key.png) + + + + ### Add to BrowserOS + + + + Go to `chrome://browseros/settings` and click **USE** on the Gemini card. + + ![Gemini card in BrowserOS](/images/byollm--gemini-settings-card.png) + + + - Set **Model ID** to `gemini-2.5-flash-preview-05-20` + - Paste your API key + - Check **Supports Images** + - Set **Context Window** to `1000000` + - Click **Save** + + ![Gemini config](/images/byollm--gemini-provider-config.png) + + + + + + Claude Opus 4.5 gives the best results for Agent Mode. It's slower but handles complex tasks reliably. + + ### Get your API key + + + + Visit [console.anthropic.com](https://console.anthropic.com/dashboard) and click **API keys** in the sidebar. + + ![Anthropic console](/images/claude-api-keys.png) + + + Click **Create Key**, name it, and copy the key that appears. + + ![Create key](/images/claude-create-key.png) + + + + ### Add to BrowserOS + + Go to `chrome://browseros/settings`, click **USE** on the Claude card, and configure: + + - **Model ID**: `claude-opus-4-5-20250514` (or `claude-sonnet-4-5-20250514` for faster) + - Paste your API key + - Check **Supports Images** + - Set **Context Window** to `200000` + - Click **Save** + + ![Claude config](/images/byollm--claude-provider-config.png) + + + + GPT-4.1 is solid for both chat and agent tasks. + + ### Get your API key + + + + Visit [platform.openai.com](https://platform.openai.com), click the settings icon, then **API keys**. + + ![OpenAI platform](/images/openai-api-keys.png) + + + Click **Create new secret key**, name it, and copy it. You won't see it again. + + ![Create key](/images/openai-create-key.png) + + + + ### Add to BrowserOS + + Go to `chrome://browseros/settings`, click **USE** on the OpenAI card, and configure: + + - **Model ID**: `gpt-4.1` (or `gpt-4.1-mini` for cheaper) + - Paste your API key + - Check **Supports Images** + - Set **Context Window** to `128000` + - Click **Save** + + ![OpenAI config](/images/byollm--openai-provider-config.png) + + + + OpenRouter gives you access to 500+ models through one API. Good if you want to try different models. + + ### Get your API key + + Visit [openrouter.ai](https://openrouter.ai), sign up, and grab your API key from the homepage. + + ![OpenRouter](/images/openrouter-get-api-key.png) + + ### Pick a model + + Go to [openrouter.ai/models](https://openrouter.ai/models) and find a model you want. Copy the model ID (like `anthropic/claude-opus-4.5`). + + ![OpenRouter models](/images/openrouter-models.png) + + ### Add to BrowserOS + + Go to `chrome://browseros/settings`, click **USE** on the OpenRouter card, and configure: + + - **Model ID**: The one you copied (e.g., `anthropic/claude-opus-4.5`) + - Paste your API key + - Set **Context Window** based on the model + - Click **Save** + + ![OpenRouter config](/images/byollm--openrouter-provider-config.png) + + + +## Local Models + +Local models are free and private — your data never leaves your machine. They work great for Chat Mode. + + + + Ollama is the easiest way to run models locally. + + ### Setup + + + + Download from [ollama.com](https://ollama.com) and install it. + + + Open your terminal and run: + ```bash + ollama pull llama3.2 + ``` + + + ```bash + ollama serve + ``` + + ![Ollama running](/images/setting-up-ollama/ollama-step3.png) + + + Go to `chrome://browseros/settings`, click **Add Provider**, select **Ollama**, and set the model ID to `llama3.2`. + + ![Ollama in BrowserOS](/images/byollm--ollama-config.png) + + + + **Recommended models:** `llama3.2`, `qwen3:8b`, `mistral` + + + + LM Studio has a nice GUI if you don't want to use the terminal. + + ### Setup + + + + Get it from [lmstudio.ai](https://lmstudio.ai) and install. + + + Open LM Studio, go to the **Developer** tab, and load a model. + + ![LM Studio](/images/setting-up-lm-studio/lmstudio-step1.png) + + + LM Studio runs a local server at `http://localhost:1234/v1/`. + + ![LM Studio server](/images/setting-up-lm-studio/lmstudio-step2.png) + + + Go to `chrome://browseros/settings`, click **Add Provider**, select **OpenAI Compatible**, and set: + - **Base URL**: `http://localhost:1234/v1/` + - **Model ID**: The model you loaded + - **Context Window**: Match your LM Studio config + + ![LM Studio in BrowserOS](/images/byollm--lmstudio-config.png) + + + + + +## Switching Models + +Use the model switcher in the Assistant to change between providers anytime. + +- Use local models for sensitive data +- Switch to Claude for agent tasks + +![Model switcher](/images/byollm--switcher.png) diff --git a/docs/images/byollm--claude-provider-config.png b/docs/images/byollm--claude-provider-config.png new file mode 100644 index 000000000..172fdc085 --- /dev/null +++ b/docs/images/byollm--claude-provider-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16555bdfa08c5e905e2363073104ad09a02cf8ea071f95453fa153ad79a77242 +size 4763371 diff --git a/docs/images/byollm--gemini-provider-config.png b/docs/images/byollm--gemini-provider-config.png new file mode 100644 index 000000000..6f1410043 --- /dev/null +++ b/docs/images/byollm--gemini-provider-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49dddd13cf0323d3cbe28a2cd8fc14fe305c21ac191dcb676abc93a916afb6ce +size 4537801 diff --git a/docs/images/byollm--gemini-settings-card.png b/docs/images/byollm--gemini-settings-card.png new file mode 100644 index 000000000..17980d06d --- /dev/null +++ b/docs/images/byollm--gemini-settings-card.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d860591dbd308f5c7c1a3e774d186ab302b5aa93d3c43763e5595412bbb0c1e6 +size 4756097 diff --git a/docs/images/byollm--lmstudio-config.png b/docs/images/byollm--lmstudio-config.png new file mode 100644 index 000000000..ed16f58e9 --- /dev/null +++ b/docs/images/byollm--lmstudio-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef1b96c9439bd5833a1be8dcfd83d2a1f7aee34cf8fddd37ab65c6c6c0324668 +size 4992844 diff --git a/docs/images/byollm--ollama-config.png b/docs/images/byollm--ollama-config.png new file mode 100644 index 000000000..55798a953 --- /dev/null +++ b/docs/images/byollm--ollama-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bec7c30055ac62a14a526a9f1fde47d54fdfa22b1fc1caee42393131ce56e56f +size 4776402 diff --git a/docs/images/byollm--openai-provider-config.png b/docs/images/byollm--openai-provider-config.png new file mode 100644 index 000000000..fa62d33d3 --- /dev/null +++ b/docs/images/byollm--openai-provider-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ced4e6a2bcd107207d03fc8dc874f315e257e4b87cb28d17f6c32cba48c31f88 +size 5050714 diff --git a/docs/images/byollm--openrouter-provider-config.png b/docs/images/byollm--openrouter-provider-config.png new file mode 100644 index 000000000..2c9987424 --- /dev/null +++ b/docs/images/byollm--openrouter-provider-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5b5b536f6b54a38d0e3d3e36a73963a005a7d65a09f1fa13449bde52a86763d +size 4759471 diff --git a/docs/images/byollm--switcher.png b/docs/images/byollm--switcher.png new file mode 100644 index 000000000..e1dccded6 --- /dev/null +++ b/docs/images/byollm--switcher.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c717a2f507797eed79438620aebdaeb3743dc7ae984561756c91fe0f531aa846 +size 6696779