Fix Google GenAI streaming handler to use async generator

- Keep stream_generator as async function (not sync)
- Wrap Google's synchronous iterator in async generator
- Properly structure if/else for streaming vs non-streaming paths
- Fix 'client has been closed' error in streaming responses

This fixes the issue where streaming requests through autoselect
were failing with 'Cannot send a request, as a client has been closed'
error.
parent 63268f97
...@@ -160,23 +160,14 @@ class GoogleProviderHandler(BaseProviderHandler): ...@@ -160,23 +160,14 @@ class GoogleProviderHandler(BaseProviderHandler):
) )
logging.info(f"GoogleProviderHandler: Streaming response received") logging.info(f"GoogleProviderHandler: Streaming response received")
self.record_success() self.record_success()
return response
else:
# Generate content using the google-genai client
response = self.client.models.generate_content(
model=model,
contents=content,
config=config
)
# Handle streaming response # Create an async generator that yields OpenAI-compatible chunks
if stream: # Google's generate_content_stream() returns a synchronous iterator
logging.info(f"GoogleProviderHandler: Processing streaming response") # We need to wrap it in an async generator
# Create a generator that yields OpenAI-compatible chunks
async def stream_generator(): async def stream_generator():
try: try:
chunk_id = 0 chunk_id = 0
# Iterate over the sync iterator
for chunk in response: for chunk in response:
logging.info(f"GoogleProviderHandler: Processing stream chunk") logging.info(f"GoogleProviderHandler: Processing stream chunk")
...@@ -210,7 +201,7 @@ class GoogleProviderHandler(BaseProviderHandler): ...@@ -210,7 +201,7 @@ class GoogleProviderHandler(BaseProviderHandler):
chunk_id += 1 chunk_id += 1
logging.info(f"Yielding OpenAI chunk: {openai_chunk}") logging.info(f"Yielding OpenAI chunk: {openai_chunk}")
# Yield the complete chunk object as a single line # Yield to complete chunk object as a single line
yield openai_chunk yield openai_chunk
except Exception as e: except Exception as e:
...@@ -218,8 +209,15 @@ class GoogleProviderHandler(BaseProviderHandler): ...@@ -218,8 +209,15 @@ class GoogleProviderHandler(BaseProviderHandler):
raise raise
return stream_generator() return stream_generator()
else:
# Non-streaming request
# Generate content using the google-genai client
response = self.client.models.generate_content(
model=model,
contents=content,
config=config
)
# Non-streaming response
logging.info(f"GoogleProviderHandler: Response received: {response}") logging.info(f"GoogleProviderHandler: Response received: {response}")
self.record_success() self.record_success()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment