Compare commits

...

2 Commits

Author SHA1 Message Date
Nikhil Sonti
c3f725be9c fix: disable mock browseros llm in production 2026-04-15 18:07:09 -07:00
Nikhil Sonti
b64124f151 test: mock browseros llm in chat integration 2026-04-15 17:57:41 -07:00
7 changed files with 167 additions and 2 deletions

View File

@@ -9,6 +9,10 @@ import { LLM_PROVIDERS } from '@browseros/shared/schemas/llm'
import { createOpenRouter } from '@openrouter/ai-sdk-provider'
import type { LanguageModel } from 'ai'
import { createBrowserOSFetch } from '../lib/browseros-fetch'
import {
createMockBrowserOSLanguageModel,
shouldUseMockBrowserOSLLM,
} from '../lib/clients/llm/mock-language-model'
import { createCodexFetch } from '../lib/clients/oauth/codex-fetch'
import { createCopilotFetch } from '../lib/clients/oauth/copilot-fetch'
import { logger } from '../lib/logger'
@@ -219,6 +223,9 @@ const PROVIDER_FACTORIES: Record<string, ProviderFactory> = {
export function createLanguageModel(
config: ResolvedAgentConfig,
): LanguageModel {
if (shouldUseMockBrowserOSLLM(config)) {
return createMockBrowserOSLanguageModel()
}
const provider = config.provider as string
const factory = PROVIDER_FACTORIES[provider]
if (!factory) throw new Error(`Unknown provider: ${provider}`)

View File

@@ -11,6 +11,10 @@ import { INLINED_ENV } from '../../../env'
import { logger } from '../../logger'
import { fetchBrowserOSConfig, getLLMConfigFromProvider } from '../gateway'
import { getOAuthTokenManager } from '../oauth'
import {
resolveMockBrowserOSConfig,
shouldUseMockBrowserOSLLM,
} from './mock-language-model'
import type { ResolvedLLMConfig } from './types'
export async function resolveLLMConfig(
@@ -49,6 +53,9 @@ export async function resolveLLMConfig(
// BrowserOS gateway: fetch config from remote service
if (config.provider === LLM_PROVIDERS.BROWSEROS) {
if (shouldUseMockBrowserOSLLM(config)) {
return resolveMockBrowserOSConfig(config, browserosId)
}
return resolveBrowserOSConfig(config, browserosId)
}

View File

@@ -0,0 +1,86 @@
import type {
LanguageModelV3GenerateResult,
LanguageModelV3StreamPart,
LanguageModelV3Usage,
} from '@ai-sdk/provider'
import { LLM_PROVIDERS, type LLMConfig } from '@browseros/shared/schemas/llm'
import { type LanguageModel, simulateReadableStream } from 'ai'
import { MockLanguageModelV3 } from 'ai/test'
import type { ResolvedLLMConfig } from './types'
export const MOCK_BROWSEROS_MODEL_ID = 'browseros-test-mock'
export const MOCK_BROWSEROS_RESPONSE_TEXT = 'Mock BrowserOS test response.'
const MOCK_USAGE: LanguageModelV3Usage = {
inputTokens: {
total: 1,
noCache: 1,
cacheRead: undefined,
cacheWrite: undefined,
},
outputTokens: {
total: 4,
text: 4,
reasoning: undefined,
},
}
function createMockResult(): LanguageModelV3GenerateResult {
return {
content: [{ type: 'text', text: MOCK_BROWSEROS_RESPONSE_TEXT }],
finishReason: { unified: 'stop', raw: 'stop' },
usage: MOCK_USAGE,
warnings: [],
}
}
export function isMockBrowserOSLLMEnabled(): boolean {
return (
process.env.NODE_ENV !== 'production' &&
process.env.BROWSEROS_USE_MOCK_LLM === 'true'
)
}
export function shouldUseMockBrowserOSLLM(
config: Pick<LLMConfig, 'provider'>,
): boolean {
return (
config.provider === LLM_PROVIDERS.BROWSEROS && isMockBrowserOSLLMEnabled()
)
}
export function resolveMockBrowserOSConfig(
config: LLMConfig,
browserosId?: string,
): ResolvedLLMConfig {
return {
...config,
model: config.model ?? MOCK_BROWSEROS_MODEL_ID,
browserosId,
upstreamProvider: LLM_PROVIDERS.OPENAI,
}
}
export function createMockBrowserOSLanguageModel(): LanguageModel {
const chunks: LanguageModelV3StreamPart[] = [
{ type: 'text-start', id: 'text-1' },
{
type: 'text-delta',
id: 'text-1',
delta: MOCK_BROWSEROS_RESPONSE_TEXT,
},
{ type: 'text-end', id: 'text-1' },
{
type: 'finish',
finishReason: { unified: 'stop', raw: 'stop' },
usage: MOCK_USAGE,
},
]
return new MockLanguageModelV3({
doGenerate: async () => createMockResult(),
doStream: async () => ({
stream: simulateReadableStream({ chunks }),
}),
}) as LanguageModel
}

View File

@@ -21,6 +21,10 @@ import { logger } from '../../logger'
import { createOpenRouterCompatibleFetch } from '../../openrouter-fetch'
import { createCodexFetch } from '../oauth/codex-fetch'
import { createCopilotFetch } from '../oauth/copilot-fetch'
import {
createMockBrowserOSLanguageModel,
shouldUseMockBrowserOSLLM,
} from './mock-language-model'
import type { ResolvedLLMConfig } from './types'
type ProviderFactory = (config: ResolvedLLMConfig) => LanguageModel
@@ -195,6 +199,9 @@ const PROVIDER_FACTORIES: Record<string, ProviderFactory> = {
}
export function createLLMProvider(config: ResolvedLLMConfig): LanguageModel {
if (shouldUseMockBrowserOSLLM(config)) {
return createMockBrowserOSLanguageModel()
}
const factory = PROVIDER_FACTORIES[config.provider]
if (!factory) throw new Error(`Unknown provider: ${config.provider}`)
return factory(config)

View File

@@ -79,7 +79,11 @@ export async function spawnServer(config: ServerConfig): Promise<ServerState> {
],
{
stdio: ['ignore', 'pipe', 'pipe'],
env: { ...globalThis.process.env, NODE_ENV: 'test' },
env: {
...globalThis.process.env,
NODE_ENV: 'test',
BROWSEROS_USE_MOCK_LLM: 'true',
},
},
)

View File

@@ -0,0 +1,49 @@
import { afterEach, describe, expect, it } from 'bun:test'
import { LLM_PROVIDERS } from '@browseros/shared/schemas/llm'
import { shouldUseMockBrowserOSLLM } from '../../../../src/lib/clients/llm/mock-language-model'
const ORIGINAL_NODE_ENV = process.env.NODE_ENV
const ORIGINAL_MOCK_FLAG = process.env.BROWSEROS_USE_MOCK_LLM
afterEach(() => {
if (ORIGINAL_NODE_ENV === undefined) {
delete process.env.NODE_ENV
} else {
process.env.NODE_ENV = ORIGINAL_NODE_ENV
}
if (ORIGINAL_MOCK_FLAG === undefined) {
delete process.env.BROWSEROS_USE_MOCK_LLM
} else {
process.env.BROWSEROS_USE_MOCK_LLM = ORIGINAL_MOCK_FLAG
}
})
describe('shouldUseMockBrowserOSLLM', () => {
it('enables the mock for BrowserOS in non-production when the flag is set', () => {
process.env.NODE_ENV = 'test'
process.env.BROWSEROS_USE_MOCK_LLM = 'true'
expect(
shouldUseMockBrowserOSLLM({ provider: LLM_PROVIDERS.BROWSEROS }),
).toBe(true)
})
it('disables the mock in production even when the flag is set', () => {
process.env.NODE_ENV = 'production'
process.env.BROWSEROS_USE_MOCK_LLM = 'true'
expect(
shouldUseMockBrowserOSLLM({ provider: LLM_PROVIDERS.BROWSEROS }),
).toBe(false)
})
it('disables the mock for non-BrowserOS providers', () => {
process.env.NODE_ENV = 'test'
process.env.BROWSEROS_USE_MOCK_LLM = 'true'
expect(shouldUseMockBrowserOSLLM({ provider: LLM_PROVIDERS.OPENAI })).toBe(
false,
)
})
})

View File

@@ -12,6 +12,7 @@ import { URL } from 'node:url'
import { Client } from '@modelcontextprotocol/sdk/client/index.js'
import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'
import { MOCK_BROWSEROS_RESPONSE_TEXT } from '../src/lib/clients/llm/mock-language-model'
import {
cleanupBrowserOS,
ensureBrowserOS,
@@ -155,7 +156,7 @@ describe('HTTP Server Integration Tests', () => {
describe('Chat endpoint', () => {
it(
'streams a chat response with BrowserOS provider',
'streams a mocked chat response for BrowserOS provider requests in test mode',
async () => {
const conversationId = crypto.randomUUID()
@@ -206,6 +207,10 @@ describe('HTTP Server Integration Tests', () => {
fullResponse.includes('data:'),
'Should contain SSE data events',
)
assert.ok(
fullResponse.includes(MOCK_BROWSEROS_RESPONSE_TEXT),
'Should include the mocked BrowserOS chat response',
)
const deleteResponse = await fetch(
`${getBaseUrl()}/chat/${conversationId}`,