Implement prompts management system and fix analyze page stats blinking

- Add new prompts database table for system prompt management
- Create admin prompts page with analyze and training sections
- Add prompts management UI with separate areas for different functionalities
- Update analyze page with audio analysis and video splitting checkboxes
- Modify analyze API to handle new prompt composition logic
- Update worker_analyze to handle multi-step prompts (video + audio)
- Fix stats reload blinking in analyze page right panel by updating only changing sections
- Add prompts link to admin menu navigation

All changes follow the AI.PROMPT rules for database migrations and API updates.
parent c2855737
{% extends "base.html" %}
{% block title %}Prompts Management - VidAI{% endblock %}
{% block head %}
<style>
.container { max-width: 1200px; margin: 2rem auto; padding: 0 2rem; }
.prompts-section { background: white; padding: 2rem; border-radius: 12px; box-shadow: 0 2px 10px rgba(0,0,0,0.05); margin-bottom: 2rem; }
.prompts-section h3 { color: #1e293b; margin-bottom: 1.5rem; border-bottom: 2px solid #e5e7eb; padding-bottom: 0.5rem; }
.prompt-group { margin-bottom: 2rem; }
.prompt-group h4 { color: #374151; margin-bottom: 1rem; }
.form-group { margin-bottom: 1.5rem; }
.form-group label { display: block; margin-bottom: 0.5rem; color: #374151; font-weight: 500; }
.form-group textarea { width: 100%; min-height: 120px; padding: 0.75rem; border: 1px solid #d1d5db; border-radius: 8px; font-family: monospace; font-size: 0.9rem; resize: vertical; }
.btn { padding: 0.75rem 2rem; background: #667eea; color: white; border: none; border-radius: 8px; font-size: 1rem; font-weight: 600; cursor: pointer; }
.btn:hover { background: #5a67d8; }
.btn-success { background: #10b981; }
.btn-success:hover { background: #059669; }
.alert { padding: 0.75rem; border-radius: 8px; margin-bottom: 1rem; }
.alert-success { background: #d1fae5; color: #065f46; border: 1px solid #a7f3d0; }
.alert-error { background: #fee2e2; color: #dc2626; border: 1px solid #fecaca; }
</style>
{% endblock %}
{% block content %}
<div class="container">
<h1>Prompts Management</h1>
<p style="color: #64748b; margin-bottom: 2rem;">Configure system prompts for analysis and training operations.</p>
{% with messages = get_flashed_messages(with_categories=true) %}
{% if messages %}
{% for category, message in messages %}
<div class="alert alert-{{ 'error' if category == 'error' else 'success' }}">{{ message }}</div>
{% endfor %}
{% endif %}
{% endwith %}
<!-- Analyze Video Jobs Section -->
<div class="prompts-section">
<h3>📹 Analyze Video Jobs</h3>
<form method="post" action="/admin/prompts/save">
<input type="hidden" name="functionality" value="analyze">
<div class="prompt-group">
<h4>System Prompt</h4>
<div class="form-group">
<label for="analyze_system_prompt">Base system prompt for all analysis operations:</label>
<textarea name="system_prompt" id="analyze_system_prompt" placeholder="Enter system prompt...">{{ analyze_prompts.system_prompt or '' }}</textarea>
</div>
</div>
<div class="prompt-group">
<h4>Video Analysis Prompt</h4>
<div class="form-group">
<label for="analyze_video_prompt">Prompt for video content analysis:</label>
<textarea name="video_prompt" id="analyze_video_prompt" placeholder="Enter video analysis prompt...">{{ analyze_prompts.video_prompt or '' }}</textarea>
</div>
</div>
<div class="prompt-group">
<h4>Audio Analysis Prompt</h4>
<div class="form-group">
<label for="analyze_audio_prompt">Prompt for audio content analysis:</label>
<textarea name="audio_prompt" id="analyze_audio_prompt" placeholder="Enter audio analysis prompt...">{{ analyze_prompts.audio_prompt or '' }}</textarea>
</div>
</div>
<div class="prompt-group">
<h4>Image Analysis Prompt</h4>
<div class="form-group">
<label for="analyze_image_prompt">Prompt for image content analysis:</label>
<textarea name="image_prompt" id="analyze_image_prompt" placeholder="Enter image analysis prompt...">{{ analyze_prompts.image_prompt or '' }}</textarea>
</div>
</div>
<div class="prompt-group">
<h4>Video Splitting Prompt</h4>
<div class="form-group">
<label for="analyze_split_media_prompt">Prompt for video scene splitting operations:</label>
<textarea name="split_media_prompt" id="analyze_split_media_prompt" placeholder="Enter video splitting prompt...">{{ analyze_prompts.split_media_prompt or '' }}</textarea>
</div>
</div>
<button type="submit" class="btn btn-success">Save Analyze Prompts</button>
</form>
</div>
<!-- Training Section -->
<div class="prompts-section">
<h3>🎯 Training Jobs</h3>
<form method="post" action="/admin/prompts/save">
<input type="hidden" name="functionality" value="training">
<div class="prompt-group">
<h4>System Prompt</h4>
<div class="form-group">
<label for="training_system_prompt">Base system prompt for all training operations:</label>
<textarea name="system_prompt" id="training_system_prompt" placeholder="Enter system prompt...">{{ training_prompts.system_prompt or '' }}</textarea>
</div>
</div>
<div class="prompt-group">
<h4>Video Training Prompt</h4>
<div class="form-group">
<label for="training_video_prompt">Prompt for video training operations:</label>
<textarea name="video_prompt" id="training_video_prompt" placeholder="Enter video training prompt...">{{ training_prompts.video_prompt or '' }}</textarea>
</div>
</div>
<div class="prompt-group">
<h4>Audio Training Prompt</h4>
<div class="form-group">
<label for="training_audio_prompt">Prompt for audio training operations:</label>
<textarea name="audio_prompt" id="training_audio_prompt" placeholder="Enter audio training prompt...">{{ training_prompts.audio_prompt or '' }}</textarea>
</div>
</div>
<div class="prompt-group">
<h4>Image Training Prompt</h4>
<div class="form-group">
<label for="training_image_prompt">Prompt for image training operations:</label>
<textarea name="image_prompt" id="training_image_prompt" placeholder="Enter image training prompt...">{{ training_prompts.image_prompt or '' }}</textarea>
</div>
</div>
<div class="prompt-group">
<h4>Video Splitting Prompt</h4>
<div class="form-group">
<label for="training_split_media_prompt">Prompt for video scene splitting in training:</label>
<textarea name="split_media_prompt" id="training_split_media_prompt" placeholder="Enter video splitting prompt...">{{ training_prompts.split_media_prompt or '' }}</textarea>
</div>
</div>
<button type="submit" class="btn btn-success">Save Training Prompts</button>
</form>
</div>
</div>
{% endblock %}
\ No newline at end of file
......@@ -69,61 +69,58 @@
fetch('/api/stats')
.then(response => response.json())
.then(data => {
let html = '';
// Show current job status if we have one
if (currentJobId) {
html += '<h3>Job Progress</h3>';
html += '<div id="job-status">Loading job status...</div>';
html += '<div id="job-progress" style="margin-top: 1rem;"></div>';
html += '<hr>';
}
html += '<h3>System Stats</h3>';
// GPU Information
if (data.gpu_info) {
html += '<h4>GPU Information</h4>';
html += `<p>CUDA: ${data.gpu_info.cuda_available ? 'Available' : 'Not available'} (${data.gpu_info.cuda_devices} devices)</p>`;
html += `<p>ROCm: ${data.gpu_info.rocm_available ? 'Available' : 'Not available'} (${data.gpu_info.rocm_devices} devices)</p>`;
html += `<p>Available backends: ${data.gpu_info.available_backends.join(', ') || 'None'}</p>`;
// Update only the changing parts to prevent blinking
// GPU Information (static, only update once)
if (!document.getElementById('gpu-info-section')) {
let gpuInfoHtml = '';
if (data.gpu_info) {
gpuInfoHtml += '<h4>GPU Information</h4>';
gpuInfoHtml += `<p>CUDA: ${data.gpu_info.cuda_available ? 'Available' : 'Not available'} (${data.gpu_info.cuda_devices} devices)</p>`;
gpuInfoHtml += `<p>ROCm: ${data.gpu_info.rocm_available ? 'Available' : 'Not available'} (${data.gpu_info.rocm_devices} devices)</p>`;
gpuInfoHtml += `<p>Available backends: ${data.gpu_info.available_backends.join(', ') || 'None'}</p>`;
}
document.getElementById('gpu-info-section').innerHTML = gpuInfoHtml;
}
// Local GPU stats
// Local GPU stats (update dynamically)
let gpuStatsHtml = '';
if (data.gpu_count > 0) {
html += '<h4>Local GPU Usage</h4>';
gpuStatsHtml += '<h4>Local GPU Usage</h4>';
data.gpus.forEach((gpu, i) => {
let memPercent = (gpu.memory_used / gpu.memory_total * 100).toFixed(1);
html += `<p>GPU ${i}: ${gpu.name}<br>Memory: <progress value="${gpu.memory_used}" max="${gpu.memory_total}"></progress> ${gpu.memory_used.toFixed(2)} / ${gpu.memory_total.toFixed(2)} GB (${memPercent}%)<br>Utilization: ${gpu.utilization}%</p>`;
gpuStatsHtml += `<p>GPU ${i}: ${gpu.name}<br>Memory: <progress value="${gpu.memory_used}" max="${gpu.memory_total}"></progress> ${gpu.memory_used.toFixed(2)} / ${gpu.memory_total.toFixed(2)} GB (${memPercent}%)<br>Utilization: ${gpu.utilization}%</p>`;
});
}
document.getElementById('gpu-stats-section').innerHTML = gpuStatsHtml;
// CPU and RAM
html += '<h4>Local Resources</h4>';
html += `<p>CPU: ${data.cpu_percent.toFixed(1)}%</p>`;
html += `<p>RAM: ${data.ram_used.toFixed(2)} / ${data.ram_total.toFixed(2)} GB</p>`;
// CPU and RAM (update dynamically)
let resourcesHtml = '<h4>Local Resources</h4>';
resourcesHtml += `<p>CPU: ${data.cpu_percent.toFixed(1)}%</p>`;
resourcesHtml += `<p>RAM: ${data.ram_used.toFixed(2)} / ${data.ram_total.toFixed(2)} GB</p>`;
document.getElementById('resources-section').innerHTML = resourcesHtml;
// Cluster stats (admin only)
// Cluster stats (admin only, update dynamically)
let clusterHtml = '';
if (data.cluster_clients !== undefined) {
html += '<h4>Cluster Status</h4>';
html += `<p>Connected clients: ${data.cluster_clients}</p>`;
html += `<p>Active processes: ${data.active_processes || 0}</p>`;
html += `<p>GPU-enabled clients: ${data.gpu_clients || 0}</p>`;
clusterHtml += '<h4>Cluster Status</h4>';
clusterHtml += `<p>Connected clients: ${data.cluster_clients}</p>`;
clusterHtml += `<p>Active processes: ${data.active_processes || 0}</p>`;
clusterHtml += `<p>GPU-enabled clients: ${data.gpu_clients || 0}</p>`;
// Connected nodes
if (data.connected_nodes && data.connected_nodes.length > 0) {
html += '<h5>Connected Nodes:</h5>';
html += '<ul>';
clusterHtml += '<h5>Connected Nodes:</h5>';
clusterHtml += '<ul>';
data.connected_nodes.forEach(node => {
const gpuStatus = node.gpu_available ? 'GPU' : 'CPU';
const lastSeen = new Date(node.last_seen * 1000).toLocaleString();
html += `<li><strong>${node.hostname}</strong> (${node.ip_address}) - ${gpuStatus} - Weight: ${node.weight} - Last seen: ${lastSeen}</li>`;
clusterHtml += `<li><strong>${node.hostname}</strong> (${node.ip_address}) - ${gpuStatus} - Weight: ${node.weight} - Last seen: ${lastSeen}</li>`;
});
html += '</ul>';
clusterHtml += '</ul>';
}
}
document.getElementById('stats').innerHTML = html;
document.getElementById('cluster-section').innerHTML = clusterHtml;
// Start job status updates if we have a job
if (currentJobId) {
......@@ -131,7 +128,8 @@
}
})
.catch(e => {
document.getElementById('stats').innerHTML = '<p>Error loading stats</p>';
console.error('Error loading stats:', e);
// Don't replace entire content on error to prevent blinking
});
}
......@@ -404,6 +402,20 @@
<label>Prompt: <textarea name="prompt" rows="5" cols="80">Describe this image.</textarea></label>
</div>
<div class="form-group">
<label style="display: flex; align-items: center; gap: 0.5rem;">
<input type="checkbox" name="analyze_audio" checked>
Analyze audio content
</label>
</div>
<div class="form-group">
<label style="display: flex; align-items: center; gap: 0.5rem;">
<input type="checkbox" name="split_video">
Split video files into scenes
</label>
</div>
<input type="submit" value="Analyze" class="btn">
</form>
......@@ -418,7 +430,13 @@
</div>
<div class="sidebar">
<div id="stats" class="stats">Loading stats...</div>
<div id="stats" class="stats">
<h3>System Stats</h3>
<div id="gpu-info-section">Loading GPU info...</div>
<div id="gpu-stats-section">Loading GPU stats...</div>
<div id="resources-section">Loading resources...</div>
<div id="cluster-section"></div>
</div>
</div>
</div>
......
......@@ -107,6 +107,7 @@
<div id="adminDropdown" class="admin-dropdown">
<a href="/admin/train" {% if active_page == 'train' %}class="active"{% endif %}>Train</a>
<a href="/admin/models" {% if active_page == 'models' %}class="active"{% endif %}>Models</a>
<a href="/admin/prompts" {% if active_page == 'prompts' %}class="active"{% endif %}>Prompts</a>
<a href="/admin/cluster_tokens" {% if active_page == 'cluster_tokens' %}class="active"{% endif %}>Cluster Tokens</a>
<a href="/admin/cluster_nodes" {% if active_page == 'cluster_nodes' %}class="active"{% endif %}>Cluster Nodes</a>
<a href="/admin/config" {% if active_page == 'config' %}class="active"{% endif %}>Configurations</a>
......
......@@ -21,7 +21,7 @@ Provides web interface for administrative functions.
from flask import Blueprint, request, render_template, redirect, url_for, flash
from .auth import require_auth
from .database import get_user_tokens, update_user_tokens, get_user_queue_items, get_default_user_tokens, create_remember_token, validate_remember_token, delete_remember_token, extend_remember_token, get_all_users, update_user_status, update_user_info, delete_user, get_worker_tokens, deactivate_worker_token, activate_worker_token, delete_worker_token, create_user, get_all_models, create_model, update_model, delete_model, get_model_by_id
from .database import get_user_tokens, update_user_tokens, get_user_queue_items, get_default_user_tokens, create_remember_token, validate_remember_token, delete_remember_token, extend_remember_token, get_all_users, update_user_status, update_user_info, delete_user, get_worker_tokens, deactivate_worker_token, activate_worker_token, delete_worker_token, create_user, get_all_models, create_model, update_model, delete_model, get_model_by_id, get_all_prompts_by_functionality, save_multiple_prompts
from .comm import SocketCommunicator, Message
from .utils import get_current_user_session, login_required, admin_required
from .logging_utils import log_message
......@@ -570,4 +570,48 @@ def delete_model_route(model_id):
flash('Model deleted successfully!', 'success')
else:
flash('Failed to delete model', 'error')
return redirect(url_for('admin.models'))
\ No newline at end of file
return redirect(url_for('admin.models'))
@admin_bp.route('/prompts')
@admin_required
def prompts():
"""Prompts management page."""
user = get_current_user_session()
# Get prompts for analyze and training functionalities
analyze_prompts = get_all_prompts_by_functionality('analyze')
training_prompts = get_all_prompts_by_functionality('training')
return render_template('admin/prompts.html',
user=user,
analyze_prompts=analyze_prompts,
training_prompts=training_prompts,
active_page='prompts')
@admin_bp.route('/prompts/save', methods=['POST'])
@admin_required
def save_prompts():
"""Save prompts for a functionality."""
functionality = request.form.get('functionality')
if functionality not in ['analyze', 'training']:
flash('Invalid functionality', 'error')
return redirect(url_for('admin.prompts'))
# Collect all prompt data for this functionality
prompts_data = {
functionality: {
'system_prompt': request.form.get('system_prompt', ''),
'video_prompt': request.form.get('video_prompt', ''),
'audio_prompt': request.form.get('audio_prompt', ''),
'image_prompt': request.form.get('image_prompt', ''),
'split_media_prompt': request.form.get('split_media_prompt', '')
}
}
if save_multiple_prompts(prompts_data):
flash(f'{functionality.title()} prompts saved successfully!', 'success')
else:
flash(f'Failed to save {functionality} prompts', 'error')
return redirect(url_for('admin.prompts'))
\ No newline at end of file
......@@ -533,6 +533,32 @@ def init_db(conn) -> None:
except sqlite3.OperationalError:
pass # Column already exists
# Prompts table for system prompts management
if config['type'] == 'mysql':
cursor.execute('''
CREATE TABLE IF NOT EXISTS prompts (
id INT AUTO_INCREMENT PRIMARY KEY,
prompt_functionality VARCHAR(50) NOT NULL,
prompt_name VARCHAR(50) NOT NULL,
prompt_value TEXT NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
UNIQUE KEY unique_functionality_name (prompt_functionality, prompt_name)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
''')
else:
cursor.execute('''
CREATE TABLE IF NOT EXISTS prompts (
id INTEGER PRIMARY KEY,
prompt_functionality TEXT NOT NULL,
prompt_name TEXT NOT NULL,
prompt_value TEXT NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
UNIQUE(prompt_functionality, prompt_name)
)
''')
# Add avatar column to users table if it doesn't exist
try:
if config['type'] == 'mysql':
......@@ -2229,7 +2255,7 @@ def ensure_model_exists(name: str, model_type: str, path: str, vram_estimate: in
# Create the model
cursor.execute('INSERT INTO models (name, type, path, vram_estimate, vram_overhead_gb, available, capabilities) VALUES (?, ?, ?, ?, ?, ?, ?)',
(name, model_type, path, vram_estimate, 6, 1 if available else 0, capabilities))
(name, model_type, path, vram_estimate, 6, 1 if available else 0, capabilities))
conn.commit()
else:
# Update availability if it's not already available
......@@ -2237,4 +2263,83 @@ def ensure_model_exists(name: str, model_type: str, path: str, vram_estimate: in
cursor.execute('UPDATE models SET available = 1 WHERE path = ?', (path,))
conn.commit()
conn.close()
\ No newline at end of file
conn.close()
# Prompt management functions
def get_prompt(prompt_functionality: str, prompt_name: str) -> Optional[str]:
"""Get a prompt value by functionality and name."""
conn = get_db_connection()
cursor = conn.cursor()
cursor.execute('SELECT prompt_value FROM prompts WHERE prompt_functionality = ? AND prompt_name = ?',
(prompt_functionality, prompt_name))
row = cursor.fetchone()
conn.close()
return row['prompt_value'] if row else None
def save_prompt(prompt_functionality: str, prompt_name: str, prompt_value: str) -> bool:
"""Save or update a prompt value."""
conn = get_db_connection()
cursor = conn.cursor()
config = get_db_config()
if config['type'] == 'mysql':
cursor.execute('''
INSERT INTO prompts (prompt_functionality, prompt_name, prompt_value, updated_at)
VALUES (?, ?, ?, CURRENT_TIMESTAMP)
ON DUPLICATE KEY UPDATE prompt_value = VALUES(prompt_value), updated_at = CURRENT_TIMESTAMP
''', (prompt_functionality, prompt_name, prompt_value))
else:
cursor.execute('''
INSERT OR REPLACE INTO prompts (prompt_functionality, prompt_name, prompt_value, updated_at)
VALUES (?, ?, ?, CURRENT_TIMESTAMP)
''', (prompt_functionality, prompt_name, prompt_value))
conn.commit()
success = cursor.rowcount > 0
conn.close()
return success
def get_all_prompts_by_functionality(prompt_functionality: str) -> Dict[str, str]:
"""Get all prompts for a specific functionality."""
conn = get_db_connection()
cursor = conn.cursor()
cursor.execute('SELECT prompt_name, prompt_value FROM prompts WHERE prompt_functionality = ?',
(prompt_functionality,))
rows = cursor.fetchall()
conn.close()
return {row['prompt_name']: row['prompt_value'] for row in rows}
def save_multiple_prompts(prompts_data: Dict[str, Dict[str, str]]) -> bool:
"""Save multiple prompts at once. Format: {functionality: {name: value}}"""
conn = get_db_connection()
cursor = conn.cursor()
config = get_db_config()
try:
for functionality, prompts in prompts_data.items():
for name, value in prompts.items():
if config['type'] == 'mysql':
cursor.execute('''
INSERT INTO prompts (prompt_functionality, prompt_name, prompt_value, updated_at)
VALUES (?, ?, ?, CURRENT_TIMESTAMP)
ON DUPLICATE KEY UPDATE prompt_value = VALUES(prompt_value), updated_at = CURRENT_TIMESTAMP
''', (functionality, name, value))
else:
cursor.execute('''
INSERT OR REPLACE INTO prompts (prompt_functionality, prompt_name, prompt_value, updated_at)
VALUES (?, ?, ?, CURRENT_TIMESTAMP)
''', (functionality, name, value))
conn.commit()
success = True
except Exception as e:
log_message(f"Error saving multiple prompts: {e}")
success = False
finally:
conn.close()
return success
\ No newline at end of file
......@@ -305,10 +305,13 @@ def analyze():
model_path = 'Qwen/Qwen2.5-VL-7B-Instruct'
prompt = request.form.get('prompt', 'Describe this image.')
interval = int(request.form.get('interval', 10))
analyze_audio = request.form.get('analyze_audio') == 'on'
split_video = request.form.get('split_video') == 'on'
uploaded_file = request.files.get('file')
local_path = request.form.get('local_path')
media_path = None
is_video = False
if uploaded_file and uploaded_file.filename:
# Save uploaded file temporarily
import tempfile
......@@ -316,16 +319,63 @@ def analyze():
with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(uploaded_file.filename)[1]) as tmp:
tmp.write(uploaded_file.read())
media_path = tmp.name
# Check if it's a video file
video_extensions = ['.mp4', '.avi', '.mov', '.mkv', '.webm', '.flv', '.wmv']
is_video = any(media_path.lower().endswith(ext) for ext in video_extensions)
elif local_path:
media_path = local_path
# Check if it's a video file
video_extensions = ['.mp4', '.avi', '.mov', '.mkv', '.webm', '.flv', '.wmv']
is_video = any(local_path.lower().endswith(ext) for ext in video_extensions)
if media_path:
# Get system prompts from database
from .database import get_prompt
# Compose prompts based on options
prompts = []
# Video analysis prompt (always included)
video_system_prompt = get_prompt('analyze', 'system_prompt') or ''
video_prompt = get_prompt('analyze', 'video_prompt') or ''
video_full_prompt = f"{video_system_prompt}\n{video_prompt}\n{prompt}".strip()
# Add split_media prompt if video and split_video is checked
if is_video and split_video:
split_media_prompt = get_prompt('analyze', 'split_media_prompt') or ''
if split_media_prompt:
video_full_prompt += f"\n{split_media_prompt}"
prompts.append({
'step': 'video',
'prompt': video_full_prompt
})
# Audio analysis prompt (if audio analysis is enabled)
if analyze_audio:
audio_system_prompt = get_prompt('analyze', 'system_prompt') or ''
audio_prompt = get_prompt('analyze', 'audio_prompt') or ''
audio_full_prompt = f"{audio_system_prompt}\n{audio_prompt}\n{prompt}".strip()
# Add split_media prompt if video and split_video is checked
if is_video and split_video:
split_media_prompt = get_prompt('analyze', 'split_media_prompt') or ''
if split_media_prompt:
audio_full_prompt += f"\n{split_media_prompt}"
prompts.append({
'step': 'audio',
'prompt': audio_full_prompt
})
data = {
'model_path': model_path,
'prompt': prompt,
'prompts': prompts, # Changed from single prompt to list of prompts
'local_path': media_path,
'interval': interval,
'user_id': user['id']
'user_id': user['id'],
'analyze_audio': analyze_audio,
'split_video': split_video
}
# Submit job to queue system
......
......@@ -487,7 +487,13 @@ def worker_process(backend_type: str):
if get_debug():
log_message(f"DEBUG: No media path provided for job {message.msg_id}")
else:
prompt = data.get('prompt', 'Describe this image.')
# Handle new multi-step prompts format
prompts = data.get('prompts', [])
if not prompts:
# Fallback to old single prompt format for backward compatibility
prompt = data.get('prompt', 'Describe this image.')
prompts = [{'step': 'video', 'prompt': prompt}]
model_path = data.get('model_path', 'Qwen/Qwen2.5-VL-7B-Instruct')
interval = data.get('interval', 10)
job_id = message.msg_id # Use message ID for job identification
......@@ -495,15 +501,45 @@ def worker_process(backend_type: str):
log_message(f"PROGRESS: Job {job_id_int} accepted - Starting analysis")
if get_debug():
log_message(f"DEBUG: Starting analysis of {media_path} with model {model_path} for job {job_id}")
result, tokens_used = analyze_media(media_path, prompt, model_path, interval, job_id, comm)
results = {}
total_tokens = 0
# Process each step
for prompt_data in prompts:
step = prompt_data.get('step', 'video')
prompt = prompt_data.get('prompt', 'Describe this image.')
if step == 'video':
# Process video/image analysis
if get_debug():
log_message(f"DEBUG: Processing video step for job {job_id}")
result, tokens_used = analyze_media(media_path, prompt, model_path, interval, job_id, comm)
results['video'] = result
total_tokens += tokens_used
elif step == 'audio':
# Placeholder for future audio analysis
if get_debug():
log_message(f"DEBUG: Audio step placeholder for job {job_id} - storing prompt for future implementation")
results['audio'] = f"Audio analysis prompt stored: {prompt[:100]}..."
# For now, no tokens used for audio step
# Combine results
if len(results) == 1:
result = list(results.values())[0]
else:
result = "Analysis Results:\n\n"
for step, step_result in results.items():
result += f"{step.upper()} ANALYSIS:\n{step_result}\n\n"
if get_debug():
log_message(f"DEBUG: Analysis completed for job {message.msg_id}, used {tokens_used} tokens")
log_message(f"DEBUG: Analysis completed for job {message.msg_id}, used {total_tokens} tokens")
# Release model reference (don't unload yet, per requirements)
release_model(model_path)
# Send result back
response = Message('analyze_response', message.msg_id, {'result': result, 'tokens_used': tokens_used})
response = Message('analyze_response', message.msg_id, {'result': result, 'tokens_used': total_tokens})
if get_debug():
log_message(f"DEBUG: Sending analyze_response for job {message.msg_id}")
comm.send_message(response)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment