Skip to content

Commit 1f9b4a8

Browse files
authored
fix(wand): remove unstable__noStore and remove, add additional logs for wand generation (#1133)
* feat(wand): added additional logs for wand generation * remove unstable__noStore
1 parent 3372829 commit 1f9b4a8

File tree

1 file changed

+82
-15
lines changed

1 file changed

+82
-15
lines changed

apps/sim/app/api/wand-generate/route.ts

Lines changed: 82 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1-
import { unstable_noStore as noStore } from 'next/cache'
21
import { type NextRequest, NextResponse } from 'next/server'
32
import OpenAI, { AzureOpenAI } from 'openai'
43
import { env } from '@/lib/env'
54
import { createLogger } from '@/lib/logs/console/logger'
65

76
export const dynamic = 'force-dynamic'
7+
export const runtime = 'edge'
88
export const maxDuration = 60
99

1010
const logger = createLogger('WandGenerateAPI')
@@ -62,7 +62,6 @@ export async function POST(req: NextRequest) {
6262
}
6363

6464
try {
65-
noStore()
6665
const body = (await req.json()) as RequestBody
6766

6867
const { prompt, systemPrompt, stream = false, history = [] } = body
@@ -107,14 +106,38 @@ export async function POST(req: NextRequest) {
107106
`[${requestId}] Starting streaming request to ${useWandAzure ? 'Azure OpenAI' : 'OpenAI'}`
108107
)
109108

110-
const streamCompletion = await client.chat.completions.create({
111-
model: useWandAzure ? wandModelName : 'gpt-4o',
112-
messages: messages,
113-
temperature: 0.3,
114-
max_tokens: 10000,
115-
stream: true,
109+
logger.info(
110+
`[${requestId}] About to create stream with model: ${useWandAzure ? wandModelName : 'gpt-4o'}`
111+
)
112+
113+
// Add AbortController with timeout
114+
const abortController = new AbortController()
115+
const timeoutId = setTimeout(() => {
116+
abortController.abort('Stream timeout after 30 seconds')
117+
}, 30000)
118+
119+
// Forward request abort signal if available
120+
req.signal?.addEventListener('abort', () => {
121+
abortController.abort('Request cancelled by client')
116122
})
117123

124+
const streamCompletion = await client.chat.completions.create(
125+
{
126+
model: useWandAzure ? wandModelName : 'gpt-4o',
127+
messages: messages,
128+
temperature: 0.3,
129+
max_tokens: 10000,
130+
stream: true,
131+
stream_options: { include_usage: true },
132+
},
133+
{
134+
signal: abortController.signal, // Add AbortSignal
135+
}
136+
)
137+
138+
clearTimeout(timeoutId) // Clear timeout after successful creation
139+
logger.info(`[${requestId}] Stream created successfully, starting reader pattern`)
140+
118141
logger.debug(`[${requestId}] Stream connection established successfully`)
119142

120143
return new Response(
@@ -123,27 +146,71 @@ export async function POST(req: NextRequest) {
123146
const encoder = new TextEncoder()
124147

125148
try {
149+
logger.info(`[${requestId}] Starting streaming with timeout protection`)
150+
let chunkCount = 0
151+
let hasUsageData = false
152+
153+
// Use for await with AbortController timeout protection
126154
for await (const chunk of streamCompletion) {
127-
const content = chunk.choices[0]?.delta?.content || ''
155+
chunkCount++
156+
157+
if (chunkCount === 1) {
158+
logger.info(`[${requestId}] Received first chunk via for await`)
159+
}
160+
161+
// Process the chunk
162+
const content = chunk.choices?.[0]?.delta?.content || ''
128163
if (content) {
129164
// Use SSE format identical to chat streaming
130165
controller.enqueue(
131166
encoder.encode(`data: ${JSON.stringify({ chunk: content })}\n\n`)
132167
)
133168
}
169+
170+
// Check for usage data
171+
if (chunk.usage) {
172+
hasUsageData = true
173+
logger.info(
174+
`[${requestId}] Received usage data: ${JSON.stringify(chunk.usage)}`
175+
)
176+
}
177+
178+
// Log every 5th chunk to avoid spam
179+
if (chunkCount % 5 === 0) {
180+
logger.debug(`[${requestId}] Processed ${chunkCount} chunks so far`)
181+
}
134182
}
135183

184+
logger.info(
185+
`[${requestId}] Reader pattern completed. Total chunks: ${chunkCount}, Usage data received: ${hasUsageData}`
186+
)
187+
136188
// Send completion signal in SSE format
189+
logger.info(`[${requestId}] Sending completion signal`)
137190
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ done: true })}\n\n`))
191+
192+
logger.info(`[${requestId}] Closing controller`)
138193
controller.close()
139-
logger.info(`[${requestId}] Wand generation streaming completed`)
194+
195+
logger.info(`[${requestId}] Wand generation streaming completed successfully`)
140196
} catch (streamError: any) {
141-
logger.error(`[${requestId}] Streaming error`, { error: streamError.message })
142-
controller.enqueue(
143-
encoder.encode(
144-
`data: ${JSON.stringify({ error: 'Streaming failed', done: true })}\n\n`
197+
if (streamError.name === 'AbortError') {
198+
logger.info(
199+
`[${requestId}] Stream was aborted (timeout or cancel): ${streamError.message}`
145200
)
146-
)
201+
controller.enqueue(
202+
encoder.encode(
203+
`data: ${JSON.stringify({ error: 'Stream cancelled', done: true })}\n\n`
204+
)
205+
)
206+
} else {
207+
logger.error(`[${requestId}] Streaming error`, { error: streamError.message })
208+
controller.enqueue(
209+
encoder.encode(
210+
`data: ${JSON.stringify({ error: 'Streaming failed', done: true })}\n\n`
211+
)
212+
)
213+
}
147214
controller.close()
148215
}
149216
},

0 commit comments

Comments
 (0)