@@ -49,7 +49,6 @@ interface RequestBody {
4949 history ?: ChatMessage [ ]
5050}
5151
52- // Helper: safe stringify for error payloads that may include circular structures
5352function safeStringify ( value : unknown ) : string {
5453 try {
5554 return JSON . stringify ( value )
@@ -83,18 +82,14 @@ export async function POST(req: NextRequest) {
8382 )
8483 }
8584
86- // Use provided system prompt or default
8785 const finalSystemPrompt =
8886 systemPrompt ||
8987 'You are a helpful AI assistant. Generate content exactly as requested by the user.'
9088
91- // Prepare messages for OpenAI API
9289 const messages : ChatMessage [ ] = [ { role : 'system' , content : finalSystemPrompt } ]
9390
94- // Add previous messages from history
9591 messages . push ( ...history . filter ( ( msg ) => msg . role !== 'system' ) )
9692
97- // Add the current user prompt
9893 messages . push ( { role : 'user' , content : prompt } )
9994
10095 logger . debug (
@@ -108,7 +103,6 @@ export async function POST(req: NextRequest) {
108103 }
109104 )
110105
111- // For streaming responses
112106 if ( stream ) {
113107 try {
114108 logger . debug (
@@ -119,7 +113,6 @@ export async function POST(req: NextRequest) {
119113 `[${ requestId } ] About to create stream with model: ${ useWandAzure ? wandModelName : 'gpt-4o' } `
120114 )
121115
122- // Use native fetch for streaming to avoid OpenAI SDK issues with Node.js runtime
123116 const apiUrl = useWandAzure
124117 ? `${ azureEndpoint } /openai/deployments/${ wandModelName } /chat/completions?api-version=${ azureApiVersion } `
125118 : 'https://api.openai.com/v1/chat/completions'
@@ -161,7 +154,6 @@ export async function POST(req: NextRequest) {
161154
162155 logger . info ( `[${ requestId } ] Stream response received, starting processing` )
163156
164- // Create a TransformStream to process the SSE data
165157 const encoder = new TextEncoder ( )
166158 const decoder = new TextDecoder ( )
167159
@@ -187,12 +179,10 @@ export async function POST(req: NextRequest) {
187179 break
188180 }
189181
190- // Decode the chunk
191182 buffer += decoder . decode ( value , { stream : true } )
192183
193- // Process complete SSE messages
194184 const lines = buffer . split ( '\n' )
195- buffer = lines . pop ( ) || '' // Keep incomplete line in buffer
185+ buffer = lines . pop ( ) || ''
196186
197187 for ( const line of lines ) {
198188 if ( line . startsWith ( 'data: ' ) ) {
@@ -217,25 +207,21 @@ export async function POST(req: NextRequest) {
217207 logger . info ( `[${ requestId } ] Received first content chunk` )
218208 }
219209
220- // Forward the content
221210 controller . enqueue (
222211 encoder . encode ( `data: ${ JSON . stringify ( { chunk : content } ) } \n\n` )
223212 )
224213 }
225214
226- // Log usage if present
227215 if ( parsed . usage ) {
228216 logger . info (
229217 `[${ requestId } ] Received usage data: ${ JSON . stringify ( parsed . usage ) } `
230218 )
231219 }
232220
233- // Log progress periodically
234221 if ( chunkCount % 10 === 0 ) {
235222 logger . debug ( `[${ requestId } ] Processed ${ chunkCount } chunks` )
236223 }
237224 } catch ( parseError ) {
238- // Skip invalid JSON lines
239225 logger . debug (
240226 `[${ requestId } ] Skipped non-JSON line: ${ data . substring ( 0 , 100 ) } `
241227 )
@@ -252,7 +238,6 @@ export async function POST(req: NextRequest) {
252238 stack : streamError ?. stack ,
253239 } )
254240
255- // Send error to client
256241 const errorData = `data: ${ JSON . stringify ( { error : 'Streaming failed' , done : true } ) } \n\n`
257242 controller . enqueue ( encoder . encode ( errorData ) )
258243 controller . close ( )
@@ -262,14 +247,12 @@ export async function POST(req: NextRequest) {
262247 } ,
263248 } )
264249
265- // Return Response with proper headers for Node.js runtime
266250 return new Response ( readable , {
267251 headers : {
268252 'Content-Type' : 'text/event-stream' ,
269253 'Cache-Control' : 'no-cache, no-transform' ,
270254 Connection : 'keep-alive' ,
271- 'X-Accel-Buffering' : 'no' , // Disable Nginx buffering
272- 'Transfer-Encoding' : 'chunked' , // Important for Node.js runtime
255+ 'X-Accel-Buffering' : 'no' ,
273256 } ,
274257 } )
275258 } catch ( error : any ) {
@@ -294,7 +277,6 @@ export async function POST(req: NextRequest) {
294277 }
295278 }
296279
297- // For non-streaming responses
298280 const completion = await client . chat . completions . create ( {
299281 model : useWandAzure ? wandModelName : 'gpt-4o' ,
300282 messages : messages ,
0 commit comments