mirror of
https://github.com/eggent-ai/eggent.git
synced 2026-05-13 15:46:00 +00:00
Improve chat stream failure visibility and fallback response
This commit is contained in:
@@ -5,6 +5,16 @@ import { ensureCronSchedulerStarted } from "@/lib/cron/runtime";
|
||||
|
||||
export const maxDuration = 300; // 5 min max for long agent runs
|
||||
|
||||
function formatChatStreamError(error: unknown): string {
|
||||
const raw = error instanceof Error ? error.message : String(error);
|
||||
const compact = raw.replace(/\s+/g, " ").trim();
|
||||
if (!compact) {
|
||||
return "Generation failed after tool execution. Please retry.";
|
||||
}
|
||||
const short = compact.length > 220 ? `${compact.slice(0, 220)}...` : compact;
|
||||
return `Generation failed after tool execution: ${short}`;
|
||||
}
|
||||
|
||||
export async function POST(req: NextRequest) {
|
||||
try {
|
||||
await ensureCronSchedulerStarted();
|
||||
@@ -60,6 +70,10 @@ export async function POST(req: NextRequest) {
|
||||
headers: {
|
||||
"X-Chat-Id": resolvedChatId,
|
||||
},
|
||||
onError: (error) => {
|
||||
console.error("Chat stream response error:", error);
|
||||
return formatChatStreamError(error);
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("Chat API error:", error);
|
||||
|
||||
@@ -8,9 +8,10 @@ import type { UIMessage } from "ai";
|
||||
interface ChatMessagesProps {
|
||||
messages: UIMessage[];
|
||||
isLoading: boolean;
|
||||
errorMessage?: string | null;
|
||||
}
|
||||
|
||||
export function ChatMessages({ messages, isLoading }: ChatMessagesProps) {
|
||||
export function ChatMessages({ messages, isLoading, errorMessage }: ChatMessagesProps) {
|
||||
const endRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
// Auto-scroll on new messages
|
||||
@@ -69,6 +70,12 @@ export function ChatMessages({ messages, isLoading }: ChatMessagesProps) {
|
||||
</div>
|
||||
)}
|
||||
|
||||
{errorMessage ? (
|
||||
<div className="rounded-md border border-destructive/30 bg-destructive/10 px-3 py-2 text-sm text-destructive">
|
||||
{errorMessage}
|
||||
</div>
|
||||
) : null}
|
||||
|
||||
<div ref={endRef} />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -208,6 +208,15 @@ function areUIMessagesEquivalentById(
|
||||
return true;
|
||||
}
|
||||
|
||||
function formatChatErrorMessage(error: unknown): string {
|
||||
const raw = error instanceof Error ? error.message : String(error);
|
||||
const compact = raw.replace(/\s+/g, " ").trim();
|
||||
if (!compact) {
|
||||
return "The model stopped before producing a final response. Please retry.";
|
||||
}
|
||||
return compact.length > 280 ? `${compact.slice(0, 280)}...` : compact;
|
||||
}
|
||||
|
||||
export function ChatPanel() {
|
||||
const {
|
||||
activeChatId,
|
||||
@@ -220,6 +229,7 @@ export function ChatPanel() {
|
||||
addChat,
|
||||
} = useAppStore();
|
||||
const [input, setInput] = useState("");
|
||||
const [chatError, setChatError] = useState<string | null>(null);
|
||||
|
||||
// Internal chatId that stays stable during a message send.
|
||||
// Pre-generate a UUID so useChat always has a consistent id.
|
||||
@@ -247,6 +257,7 @@ export function ChatPanel() {
|
||||
useEffect(() => {
|
||||
if (activeChatId !== prevActiveChatId.current) {
|
||||
prevActiveChatId.current = activeChatId;
|
||||
setChatError(null);
|
||||
if (activeChatId !== null) {
|
||||
setInternalChatId(activeChatId);
|
||||
} else {
|
||||
@@ -275,6 +286,7 @@ export function ChatPanel() {
|
||||
transport,
|
||||
onError: (error) => {
|
||||
console.error("Chat error:", error);
|
||||
setChatError(formatChatErrorMessage(error));
|
||||
},
|
||||
});
|
||||
|
||||
@@ -435,6 +447,7 @@ export function ChatPanel() {
|
||||
|
||||
const onSubmit = useCallback(() => {
|
||||
if (!input.trim() || isLoading) return;
|
||||
setChatError(null);
|
||||
|
||||
pendingProjectSwitchRef.current = true;
|
||||
submissionStartCountRef.current = messagesRef.current.length;
|
||||
@@ -473,7 +486,7 @@ export function ChatPanel() {
|
||||
|
||||
return (
|
||||
<div className="flex flex-col h-full">
|
||||
<ChatMessages messages={messages} isLoading={isLoading} />
|
||||
<ChatMessages messages={messages} isLoading={isLoading} errorMessage={chatError} />
|
||||
<ChatInput
|
||||
input={input}
|
||||
setInput={setInput}
|
||||
|
||||
@@ -679,6 +679,53 @@ function getLastResponseToolText(messages: ModelMessage[]): string {
|
||||
return "";
|
||||
}
|
||||
|
||||
function getLastNonResponseToolResult(messages: ModelMessage[]): {
|
||||
toolName: string;
|
||||
text: string;
|
||||
} | null {
|
||||
for (let i = messages.length - 1; i >= 0; i -= 1) {
|
||||
const msg = messages[i];
|
||||
if (msg.role !== "tool" || !Array.isArray(msg.content)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (let j = msg.content.length - 1; j >= 0; j -= 1) {
|
||||
const part = msg.content[j];
|
||||
if (!(typeof part === "object" && part !== null)) continue;
|
||||
if (!("type" in part) || part.type !== "tool-result") continue;
|
||||
|
||||
const toolName =
|
||||
"toolName" in part && typeof (part as { toolName?: unknown }).toolName === "string"
|
||||
? (part as { toolName: string }).toolName
|
||||
: "";
|
||||
if (!toolName || toolName === "response") continue;
|
||||
|
||||
const output =
|
||||
"output" in part ? (part as { output?: unknown }).output : (part as { result?: unknown }).result;
|
||||
const text = extractToolResultOutputText(output).trim();
|
||||
return {
|
||||
toolName,
|
||||
text,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function truncateForFallback(value: string, maxChars: number): string {
|
||||
if (value.length <= maxChars) {
|
||||
return value;
|
||||
}
|
||||
return `${value.slice(0, maxChars)}\n...[truncated]`;
|
||||
}
|
||||
|
||||
function formatStreamErrorForUser(errorMessage: string): string {
|
||||
const compact = errorMessage.replace(/\s+/g, " ").trim();
|
||||
if (!compact) return "";
|
||||
return compact.length > 220 ? `${compact.slice(0, 220)}...` : compact;
|
||||
}
|
||||
|
||||
function shouldAutoContinueAssistant(
|
||||
text: string,
|
||||
finishReason?: string
|
||||
@@ -783,6 +830,8 @@ export async function runAgent(options: {
|
||||
label: "LLM Request (stream)",
|
||||
});
|
||||
|
||||
let streamErrorMessage = "";
|
||||
|
||||
// Run the agent with streaming
|
||||
const result = streamText({
|
||||
model,
|
||||
@@ -793,6 +842,10 @@ export async function runAgent(options: {
|
||||
stopWhen: [stepCountIs(MAX_TOOL_STEPS_PER_TURN), hasToolCall("response")],
|
||||
temperature: settings.chatModel.temperature ?? 0.7,
|
||||
maxOutputTokens: settings.chatModel.maxTokens ?? 4096,
|
||||
onError: async ({ error }) => {
|
||||
streamErrorMessage = error instanceof Error ? error.message : String(error);
|
||||
console.error("Agent stream error:", error);
|
||||
},
|
||||
onFinish: async (event) => {
|
||||
const finishReason =
|
||||
typeof (event as unknown as { finishReason?: unknown }).finishReason === "string"
|
||||
@@ -801,7 +854,9 @@ export async function runAgent(options: {
|
||||
|
||||
const responseMessages = event.response.messages;
|
||||
const lastAssistantText = getLastAssistantText(responseMessages);
|
||||
const responseToolText = getLastResponseToolText(responseMessages).trim();
|
||||
let continuationText = "";
|
||||
let fallbackText = "";
|
||||
|
||||
if (shouldAutoContinueAssistant(lastAssistantText, finishReason)) {
|
||||
try {
|
||||
@@ -828,6 +883,40 @@ export async function runAgent(options: {
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
!lastAssistantText.trim() &&
|
||||
!responseToolText &&
|
||||
!continuationText.trim()
|
||||
) {
|
||||
const lastToolResult = getLastNonResponseToolResult(responseMessages);
|
||||
const streamErrorText = formatStreamErrorForUser(streamErrorMessage);
|
||||
const fallbackLines: string[] = [
|
||||
"Tool execution finished, but I could not produce a final response for this turn.",
|
||||
];
|
||||
|
||||
if (streamErrorText) {
|
||||
fallbackLines.push(`Reason: ${streamErrorText}`);
|
||||
}
|
||||
|
||||
if (lastToolResult?.toolName) {
|
||||
fallbackLines.push(`Last tool: \`${lastToolResult.toolName}\``);
|
||||
}
|
||||
|
||||
if (lastToolResult?.text) {
|
||||
fallbackLines.push(
|
||||
[
|
||||
"Last tool output (truncated):",
|
||||
"```text",
|
||||
truncateForFallback(lastToolResult.text, 1200),
|
||||
"```",
|
||||
].join("\n")
|
||||
);
|
||||
}
|
||||
|
||||
fallbackLines.push("Send `continue` and I will finish the answer.");
|
||||
fallbackText = fallbackLines.join("\n\n");
|
||||
}
|
||||
|
||||
if (mcpCleanup) {
|
||||
try {
|
||||
await mcpCleanup();
|
||||
@@ -853,11 +942,11 @@ export async function runAgent(options: {
|
||||
for (const msg of responseMessages) {
|
||||
chat.messages.push(...convertModelMessageToChatMessages(msg, now));
|
||||
}
|
||||
if (continuationText) {
|
||||
if (continuationText || fallbackText) {
|
||||
chat.messages.push({
|
||||
id: crypto.randomUUID(),
|
||||
role: "assistant",
|
||||
content: continuationText,
|
||||
content: continuationText || fallbackText,
|
||||
createdAt: now,
|
||||
});
|
||||
}
|
||||
@@ -880,7 +969,11 @@ export async function runAgent(options: {
|
||||
topic: "chat",
|
||||
projectId: options.projectId ?? null,
|
||||
chatId: options.chatId,
|
||||
reason: continuationText ? "agent_turn_auto_continued" : "agent_turn_finished",
|
||||
reason: continuationText
|
||||
? "agent_turn_auto_continued"
|
||||
: fallbackText
|
||||
? "agent_turn_fallback_response"
|
||||
: "agent_turn_finished",
|
||||
});
|
||||
publishUiSyncEvent({
|
||||
topic: "files",
|
||||
|
||||
Reference in New Issue
Block a user