Fix streaming response handling for OpenAI async iterators

parent 0e5fab02
This diff is collapsed.
...@@ -293,15 +293,15 @@ async def rotation_chat_completions(request: Request, body: ChatCompletionReques ...@@ -293,15 +293,15 @@ async def rotation_chat_completions(request: Request, body: ChatCompletionReques
logger.error(f"Error processing Google chunk: {str(chunk_error)}") logger.error(f"Error processing Google chunk: {str(chunk_error)}")
continue continue
else: else:
# Handle OpenAI/Anthropic streaming responses (async iterators) # Handle OpenAI/Anthropic streaming responses (async iterators)
for chunk in response: async for chunk in response:
try: try:
chunk_dict = chunk.model_dump() if hasattr(chunk, 'model_dump') else chunk chunk_dict = chunk.model_dump() if hasattr(chunk, 'model_dump') else chunk
import json import json
yield f"data: {json.dumps(chunk_dict)}\n\n".encode('utf-8') yield f"data: {json.dumps(chunk_dict)}\n\n".encode('utf-8')
except Exception as chunk_error: except Exception as chunk_error:
logger.warning(f"Error serializing chunk: {str(chunk_error)}") logger.warning(f"Error serializing chunk: {str(chunk_error)}")
continue continue
except Exception as e: except Exception as e:
logger.error(f"Error in streaming response: {str(e)}") logger.error(f"Error in streaming response: {str(e)}")
import json import json
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment