Implement multi-process Video AI architecture

- Add socket-based inter-process communication
- Implement backend process for request routing
- Create separate web interface process
- Add CUDA/ROCm worker processes for analysis and training
- Add configuration system for backend selection
- Update build scripts for multi-component executables
- Add startup scripts for process orchestration
- Include GPLv3 license and copyright notices
- Add comprehensive documentation and README
- Create CHANGELOG for version tracking
parent 552f2d10
Pipeline #193 canceled with stages
#!/usr/bin/env python3
# Simple test for socket communication
import time
import threading
from vidai.comm import SocketServer, SocketCommunicator, Message
def test_handler(message: Message) -> Message:
print(f"Server received: {message.msg_type} - {message.data}")
return Message('response', message.msg_id, {'result': 'success'})
def test_server():
print("Starting test server...")
server = SocketServer(host='localhost', port=5001, comm_type='tcp')
server.start(test_handler)
time.sleep(5) # Run for 5 seconds
server.stop()
print("Server stopped")
def test_client():
time.sleep(1) # Wait for server to start
print("Starting test client...")
comm = SocketCommunicator(host='localhost', port=5001, comm_type='tcp')
comm.connect()
msg = Message('test', '123', {'data': 'hello'})
comm.send_message(msg)
response = comm.receive_message()
print(f"Client received: {response}")
comm.close()
if __name__ == "__main__":
# Start server in background
server_thread = threading.Thread(target=test_server, daemon=True)
server_thread.start()
# Start client
test_client()
# Wait for server to finish
server_thread.join()
print("Test completed")
\ No newline at end of file
......@@ -22,7 +22,7 @@ Manages request routing between web interface and worker processes.
import time
import threading
from .comm import SocketServer, Message
from .config import get_analysis_backend, get_training_backend, set_analysis_backend, set_training_backend, get_comm_type, get_config_value
from .config import get_analysis_backend, get_training_backend, set_analysis_backend, set_training_backend, get_comm_type, get_use_runpod_pods, set_use_runpod_pods
from .compat import get_socket_path, get_default_comm_type
from .queue import queue_manager
from .runpod import runpod_manager, is_runpod_enabled, create_analysis_pod, create_training_pod, RunPodPod
......@@ -85,7 +85,7 @@ def handle_web_message(message: Message) -> Message:
"""Handle messages from web interface."""
if message.msg_type == 'analyze_request':
# Check if we should use RunPod pods
if get_config_value('use_runpod_pods', False) and is_runpod_enabled():
if get_use_runpod_pods() and is_runpod_enabled():
pod = get_available_pod('analysis')
if pod:
# Send job to pod
......@@ -97,7 +97,7 @@ def handle_web_message(message: Message) -> Message:
return Message('ack', message.msg_id, {'status': 'queued'})
elif message.msg_type == 'train_request':
if get_config_value('use_runpod_pods', False) and is_runpod_enabled():
if get_use_runpod_pods() and is_runpod_enabled():
pod = get_available_pod('training')
if pod:
return Message('ack', message.msg_id, {'status': 'pod_assigned', 'pod_id': pod.pod_id})
......@@ -113,15 +113,14 @@ def handle_web_message(message: Message) -> Message:
if 'training_backend' in data:
set_training_backend(data['training_backend'])
if 'use_runpod_pods' in data:
from .config import set_config_value
set_config_value('use_runpod_pods', data['use_runpod_pods'])
set_use_runpod_pods(data['use_runpod_pods'])
return Message('config_response', message.msg_id, {'status': 'updated'})
elif message.msg_type == 'get_config':
return Message('config_response', message.msg_id, {
'analysis_backend': get_analysis_backend(),
'training_backend': get_training_backend(),
'use_runpod_pods': get_config_value('use_runpod_pods', False),
'use_runpod_pods': get_use_runpod_pods(),
'runpod_enabled': is_runpod_enabled()
})
......
......@@ -65,7 +65,12 @@ def set_runpod_gpu_type(gpu_type: str) -> None:
def set_use_runpod_pods(use_pods: bool) -> None:
"""Enable or disable automatic pod creation for jobs."""
set_config('use_runpod_pods', use_pods)
set_config('use_runpod_pods', 'true' if use_pods else 'false')
def get_use_runpod_pods() -> bool:
"""Check if RunPod pods should be used for jobs."""
return get_config('use_runpod_pods', 'false').lower() == 'true'
def get_default_model() -> str:
......
......@@ -26,7 +26,7 @@ from .database import (
get_queue_status, get_user_queue_items, get_queue_position
)
from .config import get_max_concurrent_jobs
from .comm import Message, send_message
from .comm import Message
class QueueManager:
......
......@@ -26,7 +26,7 @@ import requests
import threading
from typing import Dict, List, Optional, Any
from dataclasses import dataclass
from .config import get_config_value, set_config_value
from .config import get_runpod_api_key, set_runpod_api_key, get_runpod_template_id, set_runpod_template_id, get_runpod_gpu_type, set_runpod_gpu_type, get_use_runpod_pods, set_use_runpod_pods
from .compat import get_user_config_dir, ensure_dir
......@@ -46,7 +46,7 @@ class RunPodManager:
"""Manages RunPod pods for dynamic scaling."""
def __init__(self):
self.api_key = get_config_value('runpod_api_key')
self.api_key = get_runpod_api_key()
self.base_url = "https://api.runpod.io/v1"
self.headers = {
'Authorization': f'Bearer {self.api_key}',
......@@ -122,7 +122,7 @@ class RunPodManager:
if not self.is_configured():
return None
template_id = get_config_value('runpod_template_id', 'vidai-analysis-latest')
template_id = get_runpod_template_id()
pod_config = {
"templateId": template_id,
......@@ -250,14 +250,14 @@ runpod_manager = RunPodManager()
def configure_runpod(api_key: str, template_id: str = "vidai-analysis-latest"):
"""Configure RunPod integration."""
set_config_value('runpod_api_key', api_key)
set_config_value('runpod_template_id', template_id)
set_runpod_api_key(api_key)
set_runpod_template_id(template_id)
runpod_manager.__init__() # Reinitialize with new config
def is_runpod_enabled() -> bool:
"""Check if RunPod integration is enabled and configured."""
return get_config_value('runpod_enabled', False) and runpod_manager.is_configured()
return get_runpod_api_key() and runpod_manager.is_configured()
def create_analysis_pod() -> Optional[RunPodPod]:
......@@ -265,7 +265,7 @@ def create_analysis_pod() -> Optional[RunPodPod]:
if not is_runpod_enabled():
return None
gpu_type = get_config_value('runpod_gpu_type', 'NVIDIA RTX A4000')
gpu_type = get_runpod_gpu_type()
return runpod_manager.create_pod("analysis", gpu_type)
......@@ -274,5 +274,5 @@ def create_training_pod() -> Optional[RunPodPod]:
if not is_runpod_enabled():
return None
gpu_type = get_config_value('runpod_gpu_type', 'NVIDIA RTX A5000')
gpu_type = get_runpod_gpu_type()
return runpod_manager.create_pod("training", gpu_type)
\ No newline at end of file
......@@ -1365,33 +1365,33 @@ def tokens():
else:
error = msg
html = f'''
html = '''
<!DOCTYPE html>
<html>
<head>
<title>Token Management - Video AI</title>
<style>
body {{ font-family: Arial, sans-serif; background: #f4f4f4; margin: 0; padding: 20px; }}
.container {{ max-width: 1000px; margin: auto; }}
.header {{ background: white; padding: 20px; border-radius: 8px; box-shadow: 0 0 10px rgba(0,0,0,0.1); margin-bottom: 20px; display: flex; justify-content: space-between; align-items: center; }}
.nav {{ display: flex; gap: 20px; }}
.nav a {{ text-decoration: none; color: #007bff; }}
.token-balance {{ background: #28a745; color: white; padding: 10px 20px; border-radius: 20px; font-weight: bold; }}
.content {{ display: grid; grid-template-columns: 1fr 300px; gap: 20px; }}
.main {{ background: white; padding: 20px; border-radius: 8px; box-shadow: 0 0 10px rgba(0,0,0,0.1); }}
.sidebar {{ background: white; padding: 20px; border-radius: 8px; box-shadow: 0 0 10px rgba(0,0,0,0.1); }}
.packages {{ display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); gap: 15px; margin: 20px 0; }}
.package {{ border: 2px solid #e0e0e0; padding: 20px; border-radius: 8px; text-align: center; transition: all 0.3s ease; }}
.package:hover {{ border-color: #007bff; transform: translateY(-2px); }}
.package.popular {{ border-color: #28a745; position: relative; }}
.package.popular::before {{ content: 'Most Popular'; position: absolute; top: -10px; left: 50%; transform: translateX(-50%); background: #28a745; color: white; padding: 3px 10px; border-radius: 10px; font-size: 0.8rem; }}
.package h3 {{ margin: 10px 0; }}
.price {{ font-size: 1.5rem; font-weight: bold; color: #007bff; margin: 10px 0; }}
.btn {{ display: inline-block; padding: 8px 16px; background: #007bff; color: white; text-decoration: none; border-radius: 4px; margin: 5px; }}
.btn:hover {{ background: #0056b3; }}
.message {{ padding: 10px; border-radius: 4px; margin: 10px 0; }}
.success {{ background: #d4edda; color: #155724; }}
.error {{ background: #f8d7da; color: #721c24; }}
body { font-family: Arial, sans-serif; background: #f4f4f4; margin: 0; padding: 20px; }
.container { max-width: 1000px; margin: auto; }
.header { background: white; padding: 20px; border-radius: 8px; box-shadow: 0 0 10px rgba(0,0,0,0.1); margin-bottom: 20px; display: flex; justify-content: space-between; align-items: center; }
.nav { display: flex; gap: 20px; }
.nav a { text-decoration: none; color: #007bff; }
.token-balance { background: #28a745; color: white; padding: 10px 20px; border-radius: 20px; font-weight: bold; }
.content { display: grid; grid-template-columns: 1fr 300px; gap: 20px; }
.main { background: white; padding: 20px; border-radius: 8px; box-shadow: 0 0 10px rgba(0,0,0,0.1); }
.sidebar { background: white; padding: 20px; border-radius: 8px; box-shadow: 0 0 10px rgba(0,0,0,0.1); }
.packages { display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); gap: 15px; margin: 20px 0; }
.package { border: 2px solid #e0e0e0; padding: 20px; border-radius: 8px; text-align: center; transition: all 0.3s ease; }
.package:hover { border-color: #007bff; transform: translateY(-2px); }
.package.popular { border-color: #28a745; position: relative; }
.package.popular::before { content: 'Most Popular'; position: absolute; top: -10px; left: 50%; transform: translateX(-50%); background: #28a745; color: white; padding: 3px 10px; border-radius: 10px; font-size: 0.8rem; }
.package h3 { margin: 10px 0; }
.price { font-size: 1.5rem; font-weight: bold; color: #007bff; margin: 10px 0; }
.btn { display: inline-block; padding: 8px 16px; background: #007bff; color: white; text-decoration: none; border-radius: 4px; margin: 5px; }
.btn:hover { background: #0056b3; }
.message { padding: 10px; border-radius: 4px; margin: 10px 0; }
.success { background: #d4edda; color: #155724; }
.error { background: #f8d7da; color: #721c24; }
</style>
</head>
<body>
......@@ -1406,14 +1406,14 @@ def tokens():
</div>
<div class="token-balance">
Current Balance: {user_tokens} tokens
Current Balance: ''' + str(user_tokens) + ''' tokens
</div>
{% if message %}
<div class="message success">{message}</div>
<div class="message success">''' + str(message) + '''</div>
{% endif %}
{% if error %}
<div class="message error">{error}</div>
<div class="message error">''' + str(error) + '''</div>
{% endif %}
<div class="content">
......@@ -1422,15 +1422,15 @@ def tokens():
<div class="packages">
{% for package in token_packages %}
<div class="package {% if package.popular %}popular{% endif %}">
<h3>{package.tokens} Tokens</h3>
<div class="price">${package.price}</div>
<h3>{% raw %}{{ package.tokens }}{% endraw %} Tokens</h3>
<div class="price">${% raw %}{{ package.price }}{% endraw %}</div>
<form method="post" style="display: inline;">
<input type="hidden" name="action" value="purchase">
<input type="hidden" name="tokens" value="{package.tokens}">
<input type="hidden" name="tokens" value="{% raw %}{{ package.tokens }}{% endraw %}">
<select name="processor" style="margin: 5px 0;">
{% for name, proc in processors.items() %}
{% if proc.enabled %}
<option value="{name}">{proc.name}</option>
<option value="{% raw %}{{ name }}{% endraw %}">{% raw %}{{ proc.name }}{% endraw %}</option>
{% endif %}
{% endfor %}
</select><br>
......@@ -1447,7 +1447,7 @@ def tokens():
<select name="processor">
{% for name, proc in processors.items() %}
{% if proc.enabled %}
<option value="{name}">{proc.name}</option>
<option value="{% raw %}{{ name }}{% endraw %}">{% raw %}{{ proc.name }}{% endraw %}</option>
{% endif %}
{% endfor %}
</select>
......@@ -1640,19 +1640,19 @@ def config():
# Get current config
current_config = get_all_settings()
html = f'''
html = '''
<!DOCTYPE html>
<html>
<head>
<title>Configuration</title>
<style>
body {{ font-family: Arial, sans-serif; background-color: #f4f4f4; margin: 0; padding: 20px; }}
.container {{ max-width: 800px; margin: auto; background: white; padding: 20px; border-radius: 8px; box-shadow: 0 0 10px rgba(0,0,0,0.1); }}
h1 {{ color: #333; text-align: center; }}
form {{ margin-bottom: 20px; }}
label {{ display: block; margin-bottom: 5px; }}
select, input[type="text"], input[type="number"] {{ width: 100%; padding: 8px; margin-bottom: 10px; border: 1px solid #ccc; border-radius: 4px; }}
input[type="submit"] {{ background: #007bff; color: white; padding: 10px; border: none; border-radius: 4px; cursor: pointer; }}
body { font-family: Arial, sans-serif; background-color: #f4f4f4; margin: 0; padding: 20px; }
.container { max-width: 800px; margin: auto; background: white; padding: 20px; border-radius: 8px; box-shadow: 0 0 10px rgba(0,0,0,0.1); }
h1 { color: #333; text-align: center; }
form { margin-bottom: 20px; }
label { display: block; margin-bottom: 5px; }
select, input[type="text"], input[type="number"] { width: 100%; padding: 8px; margin-bottom: 10px; border: 1px solid #ccc; border-radius: 4px; }
input[type="submit"] { background: #007bff; color: white; padding: 10px; border: none; border-radius: 4px; cursor: pointer; }
</style>
</head>
<body>
......@@ -1662,24 +1662,24 @@ def config():
<form method="post">
<label>Analysis Backend:
<select name="analysis_backend">
<option value="cuda" {"selected" if current_config['analysis_backend'] == 'cuda' else ""}>CUDA</option>
<option value="rocm" {"selected" if current_config['analysis_backend'] == 'rocm' else ""}>ROCm</option>
<option value="cuda" ''' + ('selected' if current_config['analysis_backend'] == 'cuda' else '') + '''>CUDA</option>
<option value="rocm" ''' + ('selected' if current_config['analysis_backend'] == 'rocm' else '') + '''>ROCm</option>
</select>
</label>
<label>Training Backend:
<select name="training_backend">
<option value="cuda" {"selected" if current_config['training_backend'] == 'cuda' else ""}>CUDA</option>
<option value="rocm" {"selected" if current_config['training_backend'] == 'rocm' else ""}>ROCm</option>
<option value="cuda" ''' + ('selected' if current_config['training_backend'] == 'cuda' else '') + '''>CUDA</option>
<option value="rocm" ''' + ('selected' if current_config['training_backend'] == 'rocm' else '') + '''>ROCm</option>
</select>
</label>
<label>Communication Type:
<select name="comm_type">
<option value="unix" {"selected" if current_config['comm_type'] == 'unix' else ""}>Unix Socket</option>
<option value="tcp" {"selected" if current_config['comm_type'] == 'tcp' else ""}>TCP Socket</option>
<option value="unix" ''' + ('selected' if current_config['comm_type'] == 'unix' else '') + '''>Unix Socket</option>
<option value="tcp" ''' + ('selected' if current_config['comm_type'] == 'tcp' else '') + '''>TCP Socket</option>
</select>
</label>
<label>Default Model: <input type="text" name="default_model" value="{current_config['default_model']}"></label>
<label>Frame Interval (seconds): <input type="number" name="frame_interval" value="{current_config['frame_interval']}" min="1"></label>
<label>Default Model: <input type="text" name="default_model" value="''' + str(current_config['default_model']) + '''"></label>
<label>Frame Interval (seconds): <input type="number" name="frame_interval" value="''' + str(current_config['frame_interval']) + '''" min="1"></label>
<input type="submit" value="Save Configuration">
</form>
</div>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment