Make PROGRESS output messages visible without --debug flag

parent be188699
......@@ -194,8 +194,7 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Initializing analysis job'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 5% - Initializing analysis job")
print(f"PROGRESS: Job {job_id_int} - 5% - Initializing analysis job")
torch.cuda.empty_cache()
total_tokens = 0
......@@ -214,8 +213,7 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': f'Model {model_path.split("/")[-1]} loaded successfully'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 8% - Model loaded successfully")
print(f"PROGRESS: Job {job_id_int} - 8% - Model loaded successfully")
# Get system prompt
print(f"DEBUG: Retrieving system prompt for job {job_id_int}")
......@@ -241,8 +239,7 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': f'Extracted {total_frames} frames'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 10% - Extracted {total_frames} frames")
print(f"PROGRESS: Job {job_id_int} - 10% - Extracted {total_frames} frames")
descriptions = []
......@@ -259,8 +256,7 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': f'Processing frame {i+1}/{total_frames} at {ts:.1f}s'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - {progress_percent}% - Processing frame {i+1}/{total_frames}")
print(f"PROGRESS: Job {job_id_int} - {progress_percent}% - Processing frame {i+1}/{total_frames}")
# Check for cancellation
if job_id_int and check_job_cancelled(job_id_int):
......@@ -295,8 +291,7 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': f'Completed frame {i+1}/{total_frames} ({progress_percent}%)'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - {progress_percent}% - Completed frame {i+1}/{total_frames}")
print(f"PROGRESS: Job {job_id_int} - {progress_percent}% - Completed frame {i+1}/{total_frames}")
if output_dir:
import shutil
......@@ -313,8 +308,7 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Generating video summary'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 85% - Generating video summary")
print(f"PROGRESS: Job {job_id_int} - 85% - Generating video summary")
# Check for cancellation before summary
if job_id_int and check_job_cancelled(job_id_int):
......@@ -367,8 +361,7 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Analysis completed'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 100% - Analysis completed")
print(f"PROGRESS: Job {job_id_int} - 100% - Analysis completed")
result = f"Frame Descriptions:\n" + "\n".join(descriptions) + f"\n\nSummary:\n{summary}"
return result, total_tokens
......@@ -384,8 +377,7 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Starting image analysis'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 20% - Starting image analysis")
print(f"PROGRESS: Job {job_id_int} - 20% - Starting image analysis")
# Check for cancellation before processing image
if job_id_int and check_job_cancelled(job_id_int):
......@@ -401,8 +393,7 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Processing image with AI model'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 50% - Processing image with AI model")
print(f"PROGRESS: Job {job_id_int} - 50% - Processing image with AI model")
result, tokens = analyze_single_image(media_path, full_prompt, model)
total_tokens += tokens
......@@ -417,8 +408,7 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Finalizing analysis results'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 90% - Finalizing analysis results")
print(f"PROGRESS: Job {job_id_int} - 90% - Finalizing analysis results")
# Send final progress update
if comm:
......@@ -429,8 +419,7 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Image analysis completed successfully'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 100% - Image analysis completed successfully")
print(f"PROGRESS: Job {job_id_int} - 100% - Image analysis completed successfully")
torch.cuda.empty_cache()
return result, total_tokens
......@@ -468,8 +457,7 @@ def worker_process(backend_type: str):
interval = data.get('interval', 10)
job_id = message.msg_id # Use message ID for job identification
job_id_int = int(message.msg_id.split('_')[1]) # Extract integer job ID
if get_debug():
print(f"PROGRESS: Job {job_id_int} accepted - Starting analysis")
print(f"PROGRESS: Job {job_id_int} accepted - Starting analysis")
print(f"DEBUG: Starting analysis of {media_path} with model {model_path} for job {job_id}")
result, tokens_used = analyze_media(media_path, prompt, model_path, interval, job_id_int, comm)
print(f"DEBUG: Analysis completed for job {message.msg_id}, used {tokens_used} tokens")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment