# FastAPI and server dependencies
fastapi>=0.104.0
uvicorn[standard]>=0.24.0
pydantic>=2.5.0
python-multipart>=0.0.6  # for multipart form data parsing
requests>=2.31.0  # for HTTP requests

# llama-cpp-python is installed by build.sh with Vulkan support
# CMAKE_ARGS="-DGGML_VULKAN=ON" pip install llama-cpp-python --no-cache-dir

# System resource detection
psutil>=5.9.0
# procname>=0.3.0  # optional - uncomment to set process name (requires libproc2-dev)

# HuggingFace Hub for downloading GGUF models
huggingface-hub>=0.19.0

# Optional: Audio transcription without PyTorch (whispercpp)
# Note: faster-whisper requires PyTorch, but whispercpp works without it
whispercpp>=1.0.0  # For GGUF-based Whisper transcription without PyTorch
