Commit 142522c1 authored by nextime's avatar nextime

Add Python client for OLProxy API

- Created olproxy_client.py with full API support
- Supports both Ollama and OpenAI API formats
- Includes command-line interface for easy testing
- Provides methods for all major API endpoints
- Added proper error handling and JSON formatting
parent 62fc2340
#!/usr/bin/env python3
"""
OLProxy Python Client
Simple Python client for interacting with the OLProxy API server.
Supports both Ollama and OpenAI API formats.
"""
import requests
import json
import argparse
from typing import Dict, List, Optional, Union
class OLProxyClient:
"""Client for interacting with OLProxy API server."""
def __init__(self, base_url: str = "http://localhost:11434"):
"""
Initialize the OLProxy client.
Args:
base_url: Base URL of the OLProxy server (default: http://localhost:11434)
"""
self.base_url = base_url.rstrip('/')
def list_models(self) -> Dict:
"""
List available models using Ollama API format.
Returns:
Dict containing models information
"""
response = requests.get(f"{self.base_url}/api/tags")
response.raise_for_status()
return response.json()
def generate(self, model: str, prompt: str, **kwargs) -> Dict:
"""
Generate text using Ollama API format.
Args:
model: Model name (e.g., "grok:latest", "gemini:latest")
prompt: Input prompt
**kwargs: Additional parameters
Returns:
Dict containing generated response
"""
data = {
"model": model,
"prompt": prompt,
**kwargs
}
response = requests.post(f"{self.base_url}/api/generate", json=data)
response.raise_for_status()
return response.json()
def chat(self, model: str, messages: List[Dict[str, str]], **kwargs) -> Dict:
"""
Chat completion using Ollama API format.
Args:
model: Model name
messages: List of message dictionaries with 'role' and 'content'
**kwargs: Additional parameters
Returns:
Dict containing chat response
"""
data = {
"model": model,
"messages": messages,
**kwargs
}
response = requests.post(f"{self.base_url}/api/chat", json=data)
response.raise_for_status()
return response.json()
def openai_list_models(self) -> Dict:
"""
List available models using OpenAI API format.
Returns:
Dict containing models information in OpenAI format
"""
response = requests.get(f"{self.base_url}/v1/models")
response.raise_for_status()
return response.json()
def openai_chat_completion(self, model: str, messages: List[Dict[str, str]], **kwargs) -> Dict:
"""
Chat completion using OpenAI API format.
Args:
model: Model name
messages: List of message dictionaries with 'role' and 'content'
**kwargs: Additional parameters
Returns:
Dict containing chat completion response in OpenAI format
"""
data = {
"model": model,
"messages": messages,
**kwargs
}
response = requests.post(f"{self.base_url}/v1/chat/completions", json=data)
response.raise_for_status()
return response.json()
def health_check(self) -> Dict:
"""
Check server health.
Returns:
Dict containing health status
"""
response = requests.get(f"{self.base_url}/")
response.raise_for_status()
return response.json()
def main():
"""Command line interface for the OLProxy client."""
parser = argparse.ArgumentParser(description="OLProxy Python Client")
parser.add_argument("--url", default="http://localhost:11434", help="OLProxy server URL")
parser.add_argument("--model", default="grok:latest", help="Model to use")
parser.add_argument("--format", choices=["ollama", "openai"], default="ollama", help="API format to use")
subparsers = parser.add_subparsers(dest="command", help="Available commands")
# List models command
subparsers.add_parser("list", help="List available models")
# Generate command
generate_parser = subparsers.add_parser("generate", help="Generate text")
generate_parser.add_argument("prompt", help="Input prompt")
# Chat command
chat_parser = subparsers.add_parser("chat", help="Chat completion")
chat_parser.add_argument("message", help="User message")
# Health check command
subparsers.add_parser("health", help="Check server health")
args = parser.parse_args()
if not args.command:
parser.print_help()
return
client = OLProxyClient(args.url)
try:
if args.command == "list":
if args.format == "openai":
result = client.openai_list_models()
else:
result = client.list_models()
print(json.dumps(result, indent=2))
elif args.command == "generate":
result = client.generate(args.model, args.prompt)
print(json.dumps(result, indent=2))
elif args.command == "chat":
messages = [{"role": "user", "content": args.message}]
if args.format == "openai":
result = client.openai_chat_completion(args.model, messages)
else:
result = client.chat(args.model, messages)
print(json.dumps(result, indent=2))
elif args.command == "health":
result = client.health_check()
print(json.dumps(result, indent=2))
except requests.exceptions.RequestException as e:
print(f"Error: {e}")
exit(1)
if __name__ == "__main__":
main()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment