Make PROGRESS output messages visible without --debug flag

parent be188699
......@@ -194,7 +194,6 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Initializing analysis job'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 5% - Initializing analysis job")
torch.cuda.empty_cache()
......@@ -214,7 +213,6 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': f'Model {model_path.split("/")[-1]} loaded successfully'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 8% - Model loaded successfully")
# Get system prompt
......@@ -241,7 +239,6 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': f'Extracted {total_frames} frames'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 10% - Extracted {total_frames} frames")
descriptions = []
......@@ -259,7 +256,6 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': f'Processing frame {i+1}/{total_frames} at {ts:.1f}s'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - {progress_percent}% - Processing frame {i+1}/{total_frames}")
# Check for cancellation
......@@ -295,7 +291,6 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': f'Completed frame {i+1}/{total_frames} ({progress_percent}%)'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - {progress_percent}% - Completed frame {i+1}/{total_frames}")
if output_dir:
......@@ -313,7 +308,6 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Generating video summary'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 85% - Generating video summary")
# Check for cancellation before summary
......@@ -367,7 +361,6 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Analysis completed'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 100% - Analysis completed")
result = f"Frame Descriptions:\n" + "\n".join(descriptions) + f"\n\nSummary:\n{summary}"
......@@ -384,7 +377,6 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Starting image analysis'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 20% - Starting image analysis")
# Check for cancellation before processing image
......@@ -401,7 +393,6 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Processing image with AI model'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 50% - Processing image with AI model")
result, tokens = analyze_single_image(media_path, full_prompt, model)
......@@ -417,7 +408,6 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Finalizing analysis results'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 90% - Finalizing analysis results")
# Send final progress update
......@@ -429,7 +419,6 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
'message': 'Image analysis completed successfully'
})
comm.send_message(progress_msg)
if get_debug():
print(f"PROGRESS: Job {job_id_int} - 100% - Image analysis completed successfully")
torch.cuda.empty_cache()
......@@ -468,7 +457,6 @@ def worker_process(backend_type: str):
interval = data.get('interval', 10)
job_id = message.msg_id # Use message ID for job identification
job_id_int = int(message.msg_id.split('_')[1]) # Extract integer job ID
if get_debug():
print(f"PROGRESS: Job {job_id_int} accepted - Starting analysis")
print(f"DEBUG: Starting analysis of {media_path} with model {model_path} for job {job_id}")
result, tokens_used = analyze_media(media_path, prompt, model_path, interval, job_id_int, comm)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment