Files
BrowserOS/packages/browseros-agent/apps/agent/entrypoints/app/ai-settings/models.ts
shivammittal274 720baaed3e feat: add GitHub Copilot as OAuth LLM provider (#500)
* feat: add GitHub Copilot as OAuth-based LLM provider

Add GitHub Copilot as a second OAuth provider using the Device Code flow
(RFC 8628). Users authenticate via github.com/login/device, and the server
polls for token completion. Supports 25+ models through a single Copilot
subscription.

Key changes:
- Device Code OAuth flow in token manager (poll with safety margin)
- Custom fetch wrapper injecting Copilot headers + vision detection
- Provider factory using createOpenAICompatible for Chat Completions API
- Extension UI with template card, auto-create on auth, and disconnect

* fix: address PR review comments for GitHub Copilot OAuth

- Validate device code response for error fields (GitHub can return 200
  with error payload)
- Store empty refreshToken instead of access token for GitHub tokens
- Add closeButton to Toaster for dismissing device code toast

* fix: add github-copilot to agent provider factory

The chat route uses a separate provider-factory.ts (agent layer) from the
test-provider route (llm/provider.ts). Added createGitHubCopilotFactory
to the agent factory so chat works with GitHub Copilot.

* fix: add github-copilot to provider icons, models, and dialog

- Add Github icon from lucide-react to providerIcons map
- Add 8 Copilot models (GPT-4o, Claude, Gemini, Grok) to models.ts
- Add github-copilot to NewProviderDialog zod enum, validation skip,
  canTest check, and OAuth credential message

* fix: reorder copilot models with free-tier models first

Put models available on Copilot Free at the top (gpt-4o, gpt-4.1,
gpt-5-mini, claude-haiku-4.5, grok-code-fast-1), followed by
premium models that require paid Copilot subscription.

* fix: set correct 64K context window for Copilot models

Copilot API enforces a 64K input token limit regardless of the
underlying model's native context window. Updated all model entries
and the default template to 64000 so compaction triggers correctly.

* fix: use actual per-model prompt limits from Copilot /models API

Queried api.githubcopilot.com/models for real max_prompt_tokens values.
GPT-4o/4.1 have 64K, Claude/gpt-5-mini have 128K, GPT-5.x have 272K.
Also updated model list to match what's actually available on the API
(e.g. claude-sonnet-4.6 instead of 4.5, added gpt-5.4/5.2-codex).

* feat: resize images for Copilot using VS Code's algorithm

Large screenshots cause 413 errors on Copilot's API. Resize images
following VS Code's approach: max 2048px longest side, 768px shortest
side, re-encode as JPEG at 75% quality. Uses sharp for server-side
image processing.

* fix: address all Greptile P1 review comments

- Add .catch() on fire-and-forget pollDeviceCode to prevent unhandled
  rejection crashes (Node 15+)
- Add deduplication guard (activeDeviceFlows Set) to prevent concurrent
  device code flows for the same provider
- Add runtime validation of server response in frontend before calling
  window.open() and showing toast
- Remove dead GITHUB_DEVICE_VERIFICATION constant from urls.ts

* fix: upgrade biome to 2.4.8, fix all lint errors, and address review bugs

- Upgrade biome from 2.4.5 to 2.4.8 (matches CI) and migrate configs
- Fix image resize: only re-encode when dimensions actually change
- Fix device code polling: retry on transient network errors instead of aborting
- Allow restarting device code flow (clear old flow instead of throwing 500)
- Fix pre-existing noNonNullAssertion and noExplicitAny lint errors globally

* fix: address Greptile P2 review — image resize and config guard

- Fix early-return guard: check max/min sides against their respective
  limits (MAX_LONG_SIDE/MAX_SHORT_SIDE) instead of both against SHORT
- Preserve PNG alpha: detect hasAlpha and keep PNG format instead of
  unconditionally converting to lossy JPEG
- Keep browserosId guard in resolveGitHubCopilotConfig consistent with
  ChatGPT Pro pattern (safety check that caller context is valid)

* feat: update Copilot models to full list from pricing page, default to gpt-5-mini

Added all 23 models from GitHub Copilot pricing page. Ordered with
free-tier models first (gpt-5-mini, claude-haiku-4.5), then premium.
Changed default from gpt-4o to gpt-5-mini since it's unlimited on
Pro plan and has 128K context (vs gpt-4o's 64K limit).
2026-03-20 02:33:09 +05:30

171 lines
6.5 KiB
TypeScript

import type { ProviderType } from '@/lib/llm-providers/types'
/**
* Model information with context length
*/
export interface ModelInfo {
modelId: string
contextLength: number
}
/**
* Models data organized by provider type (matches backend AIProvider enum)
*/
export interface ModelsData {
anthropic: ModelInfo[]
openai: ModelInfo[]
'openai-compatible': ModelInfo[]
google: ModelInfo[]
openrouter: ModelInfo[]
azure: ModelInfo[]
ollama: ModelInfo[]
lmstudio: ModelInfo[]
bedrock: ModelInfo[]
browseros: ModelInfo[]
moonshot: ModelInfo[]
'chatgpt-pro': ModelInfo[]
'github-copilot': ModelInfo[]
}
/**
* Available models per provider with context lengths
* Based on: https://github.com/browseros-ai/BrowserOS-agent/blob/main/src/options/data/models.ts
*/
export const MODELS_DATA: ModelsData = {
moonshot: [{ modelId: 'kimi-k2.5', contextLength: 200000 }],
anthropic: [
{ modelId: 'claude-opus-4-5-20251101', contextLength: 200000 },
{ modelId: 'claude-haiku-4-5-20251001', contextLength: 200000 },
{ modelId: 'claude-sonnet-4-5-20250929', contextLength: 200000 },
{ modelId: 'claude-sonnet-4-20250514', contextLength: 200000 },
{ modelId: 'claude-opus-4-20250514', contextLength: 200000 },
{ modelId: 'claude-3-7-sonnet-20250219', contextLength: 200000 },
{ modelId: 'claude-3-5-haiku-20241022', contextLength: 200000 },
],
openai: [
{ modelId: 'gpt-5.2', contextLength: 200000 },
{ modelId: 'gpt-5.2-pro', contextLength: 200000 },
{ modelId: 'gpt-5', contextLength: 200000 },
{ modelId: 'gpt-5-mini', contextLength: 200000 },
{ modelId: 'gpt-5-nano', contextLength: 200000 },
{ modelId: 'gpt-4.1', contextLength: 200000 },
{ modelId: 'gpt-4.1-mini', contextLength: 200000 },
{ modelId: 'o4-mini', contextLength: 200000 },
{ modelId: 'o3-mini', contextLength: 200000 },
{ modelId: 'gpt-4o', contextLength: 128000 },
{ modelId: 'gpt-4o-mini', contextLength: 128000 },
],
'openai-compatible': [],
google: [
{ modelId: 'gemini-3-pro-preview', contextLength: 1048576 },
{ modelId: 'gemini-3-flash-preview', contextLength: 1048576 },
{ modelId: 'gemini-2.5-flash', contextLength: 1048576 },
{ modelId: 'gemini-2.5-pro', contextLength: 1048576 },
],
openrouter: [
{ modelId: 'google/gemini-3-pro-preview', contextLength: 1048576 },
{ modelId: 'google/gemini-3-flash-preview', contextLength: 1048576 },
{ modelId: 'google/gemini-2.5-flash', contextLength: 1048576 },
{ modelId: 'anthropic/claude-opus-4.5', contextLength: 200000 },
{ modelId: 'anthropic/claude-haiku-4.5', contextLength: 200000 },
{ modelId: 'anthropic/claude-sonnet-4.5', contextLength: 200000 },
{ modelId: 'anthropic/claude-sonnet-4', contextLength: 200000 },
{ modelId: 'anthropic/claude-3.7-sonnet', contextLength: 200000 },
{ modelId: 'openai/gpt-4o', contextLength: 128000 },
{ modelId: 'openai/gpt-oss-120b', contextLength: 128000 },
{ modelId: 'openai/gpt-oss-20b', contextLength: 128000 },
{ modelId: 'qwen/qwen3-14b', contextLength: 131072 },
{ modelId: 'qwen/qwen3-8b', contextLength: 131072 },
],
azure: [],
ollama: [
{ modelId: 'qwen3:4b', contextLength: 262144 },
{ modelId: 'qwen3:8b', contextLength: 40960 },
{ modelId: 'qwen3:14b', contextLength: 40960 },
{ modelId: 'gpt-oss:20b', contextLength: 128000 },
{ modelId: 'gpt-oss:120b', contextLength: 128000 },
],
lmstudio: [
{ modelId: 'openai/gpt-oss-20b', contextLength: 128000 },
{ modelId: 'openai/gpt-oss-120b', contextLength: 128000 },
{ modelId: 'qwen/qwen3-vl-8b', contextLength: 131072 },
],
bedrock: [],
browseros: [{ modelId: 'browseros-auto', contextLength: 200000 }],
'chatgpt-pro': [
{ modelId: 'gpt-5.4', contextLength: 400000 },
{ modelId: 'gpt-5.3-codex', contextLength: 400000 },
{ modelId: 'gpt-5.2-codex', contextLength: 400000 },
{ modelId: 'gpt-5.2', contextLength: 200000 },
{ modelId: 'gpt-5.1-codex', contextLength: 400000 },
{ modelId: 'gpt-5.1-codex-max', contextLength: 400000 },
{ modelId: 'gpt-5.1-codex-mini', contextLength: 400000 },
{ modelId: 'gpt-5.1', contextLength: 200000 },
],
'github-copilot': [
// Free tier (unlimited with Pro)
{ modelId: 'gpt-5-mini', contextLength: 128000 },
{ modelId: 'claude-haiku-4.5', contextLength: 128000 },
{ modelId: 'gpt-4o', contextLength: 64000 },
{ modelId: 'gpt-4.1', contextLength: 64000 },
// Premium models (Pro: 300/mo, Pro+: 1500/mo)
{ modelId: 'claude-sonnet-4.6', contextLength: 128000 },
{ modelId: 'claude-sonnet-4.5', contextLength: 128000 },
{ modelId: 'claude-sonnet-4', contextLength: 128000 },
{ modelId: 'claude-opus-4.6', contextLength: 128000 },
{ modelId: 'claude-opus-4.5', contextLength: 128000 },
{ modelId: 'gemini-2.5-pro', contextLength: 128000 },
{ modelId: 'gemini-3-pro-preview', contextLength: 128000 },
{ modelId: 'gemini-3-flash-preview', contextLength: 128000 },
{ modelId: 'gemini-3.1-pro-preview', contextLength: 128000 },
{ modelId: 'gpt-5.4', contextLength: 272000 },
{ modelId: 'gpt-5.4-mini', contextLength: 128000 },
{ modelId: 'gpt-5.3-codex', contextLength: 272000 },
{ modelId: 'gpt-5.2-codex', contextLength: 272000 },
{ modelId: 'gpt-5.2', contextLength: 128000 },
{ modelId: 'gpt-5.1-codex', contextLength: 128000 },
{ modelId: 'gpt-5.1-codex-max', contextLength: 128000 },
{ modelId: 'gpt-5.1', contextLength: 128000 },
{ modelId: 'grok-code-fast-1', contextLength: 128000 },
],
}
/**
* Get models for a specific provider type
*/
export function getModelsForProvider(providerType: ProviderType): ModelInfo[] {
return MODELS_DATA[providerType] || []
}
/**
* Get model options for select dropdown (model IDs + custom option)
*/
export function getModelOptions(providerType: ProviderType): string[] {
const models = getModelsForProvider(providerType)
const modelIds = models.map((m) => m.modelId)
return modelIds.length > 0 ? [...modelIds, 'custom'] : ['custom']
}
/**
* Get context length for a specific model
*/
export function getModelContextLength(
providerType: ProviderType,
modelId: string,
): number | undefined {
const models = getModelsForProvider(providerType)
const model = models.find((m) => m.modelId === modelId)
return model?.contextLength
}
/**
* Check if model ID is a custom (user-entered) value
*/
export function isCustomModel(
providerType: ProviderType,
modelId: string,
): boolean {
const models = getModelsForProvider(providerType)
return !models.some((m) => m.modelId === modelId)
}