Add AISBF_DEBUG environment variable for conditional message logging

- Added AISBF_DEBUG check to control verbose message content logging
- Messages are only dumped when AISBF_DEBUG=true/1/yes
- Otherwise only message count is logged
- Applies to Google, OpenAI, and Anthropic provider handlers
- Reduces log verbosity in production while maintaining debug capability
parent 631d4c4d
...@@ -25,6 +25,7 @@ Provider handlers for AISBF. ...@@ -25,6 +25,7 @@ Provider handlers for AISBF.
import httpx import httpx
import asyncio import asyncio
import time import time
import os
from typing import Dict, List, Optional, Union from typing import Dict, List, Optional, Union
from google import genai from google import genai
from openai import OpenAI from openai import OpenAI
...@@ -33,6 +34,9 @@ from pydantic import BaseModel ...@@ -33,6 +34,9 @@ from pydantic import BaseModel
from .models import Provider, Model, ErrorTracking from .models import Provider, Model, ErrorTracking
from .config import config from .config import config
# Check if debug mode is enabled
AISBF_DEBUG = os.environ.get('AISBF_DEBUG', '').lower() in ('true', '1', 'yes')
class BaseProviderHandler: class BaseProviderHandler:
def __init__(self, provider_id: str, api_key: Optional[str] = None): def __init__(self, provider_id: str, api_key: Optional[str] = None):
self.provider_id = provider_id self.provider_id = provider_id
...@@ -129,7 +133,10 @@ class GoogleProviderHandler(BaseProviderHandler): ...@@ -129,7 +133,10 @@ class GoogleProviderHandler(BaseProviderHandler):
try: try:
import logging import logging
logging.info(f"GoogleProviderHandler: Handling request for model {model}") logging.info(f"GoogleProviderHandler: Handling request for model {model}")
if AISBF_DEBUG:
logging.info(f"GoogleProviderHandler: Messages: {messages}") logging.info(f"GoogleProviderHandler: Messages: {messages}")
else:
logging.info(f"GoogleProviderHandler: Messages count: {len(messages)}")
# Apply rate limiting # Apply rate limiting
await self.apply_rate_limit() await self.apply_rate_limit()
...@@ -232,7 +239,11 @@ class OpenAIProviderHandler(BaseProviderHandler): ...@@ -232,7 +239,11 @@ class OpenAIProviderHandler(BaseProviderHandler):
try: try:
import logging import logging
logging.info(f"OpenAIProviderHandler: Handling request for model {model}") logging.info(f"OpenAIProviderHandler: Handling request for model {model}")
if AISBF_DEBUG:
logging.info(f"OpenAIProviderHandler: Messages: {messages}") logging.info(f"OpenAIProviderHandler: Messages: {messages}")
else:
logging.info(f"OpenAIProviderHandler: Messages count: {len(messages)}")
if AISBF_DEBUG:
logging.info(f"OpenAIProviderHandler: Tools: {tools}") logging.info(f"OpenAIProviderHandler: Tools: {tools}")
logging.info(f"OpenAIProviderHandler: Tool choice: {tool_choice}") logging.info(f"OpenAIProviderHandler: Tool choice: {tool_choice}")
...@@ -323,7 +334,10 @@ class AnthropicProviderHandler(BaseProviderHandler): ...@@ -323,7 +334,10 @@ class AnthropicProviderHandler(BaseProviderHandler):
try: try:
import logging import logging
logging.info(f"AnthropicProviderHandler: Handling request for model {model}") logging.info(f"AnthropicProviderHandler: Handling request for model {model}")
if AISBF_DEBUG:
logging.info(f"AnthropicProviderHandler: Messages: {messages}") logging.info(f"AnthropicProviderHandler: Messages: {messages}")
else:
logging.info(f"AnthropicProviderHandler: Messages count: {len(messages)}")
# Apply rate limiting # Apply rate limiting
await self.apply_rate_limit() await self.apply_rate_limit()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment